Merge "Fix parsing of addresses. lp#1074039"
diff --git a/.gitignore b/.gitignore
index 55096ed..b4dca86 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,6 @@
*.swo
*.egg-info
.tox
+.venv
+dist
+build
diff --git a/HACKING b/HACKING
deleted file mode 100644
index c2c403b..0000000
--- a/HACKING
+++ /dev/null
@@ -1,14 +0,0 @@
-Test Data/Configuration
------------------------
-- Assume nothing about existing test data
-- Tests should be self contained (provide their own data)
-- Clean up test data at the completion of each test
-- Use configuration files for values that will vary by environment
-
-General
--------
-- Put two newlines between top-level code (funcs, classes, etc)
-- Put one newline between methods in classes and anywhere else
-- Do not write "except:", use "except Exception:" at the very least
-- Include your name with TODOs as in "#TODO(termie)"
-- Do not name anything the same name as a built-in or reserved word
\ No newline at end of file
diff --git a/HACKING.rst b/HACKING.rst
new file mode 100644
index 0000000..103f8cd
--- /dev/null
+++ b/HACKING.rst
@@ -0,0 +1,195 @@
+Test Data/Configuration
+-----------------------
+- Assume nothing about existing test data
+- Tests should be self contained (provide their own data)
+- Clean up test data at the completion of each test
+- Use configuration files for values that will vary by environment
+
+
+General
+-------
+- Put two newlines between top-level code (funcs, classes, etc)
+- Put one newline between methods in classes and anywhere else
+- Long lines should be wrapped in parentheses
+ in preference to using a backslash for line continuation.
+- Do not write "except:", use "except Exception:" at the very least
+- Include your name with TODOs as in "#TODO(termie)"
+- Do not name anything the same name as a built-in or reserved word Example::
+
+ def list():
+ return [1, 2, 3]
+
+ mylist = list() # BAD, shadows `list` built-in
+
+ class Foo(object):
+ def list(self):
+ return [1, 2, 3]
+
+ mylist = Foo().list() # OKAY, does not shadow built-in
+
+Imports
+-------
+- Do not import objects, only modules (*)
+- Do not import more than one module per line (*)
+- Do not make relative imports
+- Order your imports by the full module path
+- Organize your imports according to the following template
+
+Example::
+
+ # vim: tabstop=4 shiftwidth=4 softtabstop=4
+ {{stdlib imports in human alphabetical order}}
+ \n
+ {{third-party lib imports in human alphabetical order}}
+ \n
+ {{tempest imports in human alphabetical order}}
+ \n
+ \n
+ {{begin your code}}
+
+
+Human Alphabetical Order Examples
+---------------------------------
+Example::
+
+ import httplib
+ import logging
+ import random
+ import StringIO
+ import time
+ import unittest
+
+ import eventlet
+ import webob.exc
+
+ import tempest.config
+ from tempest.services.compute.json.limits_client import LimitsClientJSON
+ from tempest.services.compute.xml.limits_client import LimitsClientXML
+ from tempest.services.volume.volumes_client import VolumesClientJSON
+ import tempest.test
+
+
+Docstrings
+----------
+Example::
+
+ """A one line docstring looks like this and ends in a period."""
+
+
+ """A multi line docstring has a one-line summary, less than 80 characters.
+
+ Then a new paragraph after a newline that explains in more detail any
+ general information about the function, class or method. Example usages
+ are also great to have here if it is a complex class for function.
+
+ When writing the docstring for a class, an extra line should be placed
+ after the closing quotations. For more in-depth explanations for these
+ decisions see http://www.python.org/dev/peps/pep-0257/
+
+ If you are going to describe parameters and return values, use Sphinx, the
+ appropriate syntax is as follows.
+
+ :param foo: the foo parameter
+ :param bar: the bar parameter
+ :returns: return_type -- description of the return value
+ :returns: description of the return value
+ :raises: AttributeError, KeyError
+ """
+
+
+Dictionaries/Lists
+------------------
+If a dictionary (dict) or list object is longer than 80 characters, its items
+should be split with newlines. Embedded iterables should have their items
+indented. Additionally, the last item in the dictionary should have a trailing
+comma. This increases readability and simplifies future diffs.
+
+Example::
+
+ my_dictionary = {
+ "image": {
+ "name": "Just a Snapshot",
+ "size": 2749573,
+ "properties": {
+ "user_id": 12,
+ "arch": "x86_64",
+ },
+ "things": [
+ "thing_one",
+ "thing_two",
+ ],
+ "status": "ACTIVE",
+ },
+ }
+
+
+Calling Methods
+---------------
+Calls to methods 80 characters or longer should format each argument with
+newlines. This is not a requirement, but a guideline::
+
+ unnecessarily_long_function_name('string one',
+ 'string two',
+ kwarg1=constants.ACTIVE,
+ kwarg2=['a', 'b', 'c'])
+
+
+Rather than constructing parameters inline, it is better to break things up::
+
+ list_of_strings = [
+ 'what_a_long_string',
+ 'not as long',
+ ]
+
+ dict_of_numbers = {
+ 'one': 1,
+ 'two': 2,
+ 'twenty four': 24,
+ }
+
+ object_one.call_a_method('string three',
+ 'string four',
+ kwarg1=list_of_strings,
+ kwarg2=dict_of_numbers)
+
+
+OpenStack Trademark
+-------------------
+
+OpenStack is a registered trademark of OpenStack, LLC, and uses the
+following capitalization:
+
+ OpenStack
+
+
+Commit Messages
+---------------
+Using a common format for commit messages will help keep our git history
+readable. Follow these guidelines:
+
+ First, provide a brief summary (it is recommended to keep the commit title
+ under 50 chars).
+
+ The first line of the commit message should provide an accurate
+ description of the change, not just a reference to a bug or
+ blueprint. It must be followed by a single blank line.
+
+ If the change relates to a specific driver (libvirt, xenapi, qpid, etc...),
+ begin the first line of the commit message with the driver name, lowercased,
+ followed by a colon.
+
+ Following your brief summary, provide a more detailed description of
+ the patch, manually wrapping the text at 72 characters. This
+ description should provide enough detail that one does not have to
+ refer to external resources to determine its high-level functionality.
+
+ Once you use 'git review', two lines will be appended to the commit
+ message: a blank line followed by a 'Change-Id'. This is important
+ to correlate this commit with a specific review in Gerrit, and it
+ should not be modified.
+
+For further information on constructing high quality commit messages,
+and how to split up commits into a series of changes, consult the
+project wiki:
+
+ http://wiki.openstack.org/GitCommitMessages
diff --git a/bin/tempest b/bin/tempest
new file mode 100755
index 0000000..87ba6d5
--- /dev/null
+++ b/bin/tempest
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+function usage {
+ echo "Usage: $0 [OPTION]..."
+ echo "Run Tempest test suite"
+ echo ""
+ echo " -s, --smoke Only run smoke tests"
+ echo " -w, --whitebox Only run whitebox tests"
+ echo " -h, --help Print this usage message"
+ echo " -d. --debug Debug this script -- set -o xtrace"
+ exit
+}
+
+function process_option {
+ case "$1" in
+ -h|--help) usage;;
+ -d|--debug) set -o xtrace;;
+ -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
+ -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
+ *) noseargs="$noseargs $1"
+ esac
+}
+
+noseargs=""
+
+export NOSE_WITH_OPENSTACK=1
+export NOSE_OPENSTACK_COLOR=1
+export NOSE_OPENSTACK_RED=15.00
+export NOSE_OPENSTACK_YELLOW=3.00
+export NOSE_OPENSTACK_SHOW_ELAPSED=1
+export NOSE_OPENSTACK_STDOUT=1
+
+for arg in "$@"; do
+ process_option $arg
+done
+
+
+# only add tempest default if we don't specify a test
+if [[ "x$noseargs" =~ "tempest" ]]; then
+ noseargs="$noseargs"
+else
+ noseargs="$noseargs tempest"
+fi
+
+
+function run_tests {
+ $NOSETESTS
+}
+
+NOSETESTS="nosetests $noseargs"
+
+run_tests || exit
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index ed3cf6c..3cbe1b5 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -22,6 +22,8 @@
# Should typically be left as keystone unless you have a non-Keystone
# authentication API service
strategy = keystone
+# The identity region
+region = RegionOne
[compute]
# This section contains configuration options used when executing tests
@@ -54,6 +56,9 @@
# The above non-administrative user's tenant name
alt_tenant_name = alt_demo
+# The compute region
+region = RegionOne
+
# Reference data for tests. The ref and ref_alt should be
# distinct images/flavors.
image_ref = {$IMAGE_ID}
@@ -134,6 +139,11 @@
# performed, which requires XenServer pools in case of using XS)
use_block_migration_for_live_migration = false
+# By default, rely on the status of the diskConfig extension to
+# decide if to execute disk config tests. When set to false, tests
+# are forced to skip, regardless of the extension status
+disk_config_enabled_override = true
+
[image]
# This section contains configuration options used when executing tests
# against the OpenStack Images API
@@ -180,6 +190,44 @@
# Catalog type of the Quantum Service
catalog_type = network
+# This should be the username of a user WITHOUT administrative privileges
+username = demo
+# The above non-administrative user's password
+password = pass
+# The above non-administrative user's tenant name
+tenant_name = demo
+
+# A large private cidr block from which to allocate smaller blocks for
+# tenant networks.
+tenant_network_cidr = 10.100.0.0/16
+
+# The mask bits used to partition the tenant block.
+tenant_network_mask_bits = 29
+
+# If tenant networks are reachable, connectivity checks will be
+# performed directly against addresses on those networks.
+tenant_networks_reachable = false
+
+# Id of the public network that provides external connectivity.
+public_network_id = {$PUBLIC_NETWORK_ID}
+
+# Id of a shared public router that provides external connectivity.
+# A shared public router would commonly be used where IP namespaces
+# were disabled. If namespaces are enabled, it would be preferable
+# for each tenant to have their own router.
+public_router_id = {$PUBLIC_ROUTER_ID}
+
+[network-admin]
+# This section contains configuration options for an administrative
+# user of the Network API.
+
+# This should be the username of a user WITH administrative privileges
+username = admin
+# The above administrative user's password
+password = pass
+# The above administrative user's tenant name
+tenant_name = admin
+
[identity-admin]
# This section contains configuration options for an administrative
# user of the Compute API. These options are used in tests that stress
@@ -210,18 +258,18 @@
[object-storage]
# This section contains configuration options used when executing tests
# against the OpenStack Object Storage API.
-# This should be the username of a user WITHOUT administrative privileges
-username = admin
-# The above non-administrative user's password
-password = password
-# The above non-administrative user's tenant name
-tenant_name = admin
+
+# You can configure the credentials in the compute section
# The type of endpoint for an Object Storage API service. Unless you have a
# custom Keystone service catalog implementation, you probably want to leave
# this value as "object-store"
catalog_type = object-store
+# The object-store region
+region = RegionOne
+
+
[boto]
# This section contains configuration options used when executing tests
# with boto.
@@ -261,6 +309,9 @@
#TCP/IP connection timeout
http_socket_timeout = 5
+#Number of retries actions on connection or 5xx error
+num_retries = 1
+
# Status change wait timout
build_timeout = 120
diff --git a/etc/tempest.conf.tpl b/etc/tempest.conf.tpl
deleted file mode 100644
index 880a3c1..0000000
--- a/etc/tempest.conf.tpl
+++ /dev/null
@@ -1,238 +0,0 @@
-[identity]
-# This section contains configuration options that a variety of Tempest
-# test clients use when authenticating with different user/tenant
-# combinations
-
-# Set to True if your test environment's Keystone authentication service should
-# be accessed over HTTPS
-use_ssl = %IDENTITY_USE_SSL%
-# This is the main host address of the authentication service API
-host = %IDENTITY_HOST%
-# Port that the authentication service API is running on
-port = %IDENTITY_PORT%
-# Version of the authentication service API (a string)
-api_version = %IDENTITY_API_VERSION%
-# Path to the authentication service tokens resource (do not modify unless you
-# have a custom authentication API and are not using Keystone)
-path = %IDENTITY_PATH%
-# Should typically be left as keystone unless you have a non-Keystone
-# authentication API service
-strategy = %IDENTITY_STRATEGY%
-
-[compute]
-# This section contains configuration options used when executing tests
-# against the OpenStack Compute API.
-
-# Allows test cases to create/destroy tenants and users. This option
-# enables isolated test cases and better parallel execution,
-# but also requires that OpenStack Identity API admin credentials
-# are known.
-allow_tenant_isolation = %COMPUTE_ALLOW_TENANT_ISOLATION%
-
-# Allows test cases to create/destroy tenants and users. This option
-# enables isolated test cases and better parallel execution,
-# but also requires that OpenStack Identity API admin credentials
-# are known.
-allow_tenant_reuse = %COMPUTE_ALLOW_TENANT_REUSE%
-
-# This should be the username of a user WITHOUT administrative privileges
-username = %USERNAME%
-# The above non-administrative user's password
-password = %PASSWORD%
-# The above non-administrative user's tenant name
-tenant_name = %TENANT_NAME%
-
-# This should be the username of an alternate user WITHOUT
-# administrative privileges
-alt_username = %ALT_USERNAME%
-# The above non-administrative user's password
-alt_password = %ALT_PASSWORD%
-# The above non-administrative user's tenant name
-alt_tenant_name = %ALT_TENANT_NAME%
-
-# Reference data for tests. The ref and ref_alt should be
-# distinct images/flavors.
-image_ref = %IMAGE_ID%
-image_ref_alt = %IMAGE_ID_ALT%
-flavor_ref = %FLAVOR_REF%
-flavor_ref_alt = %FLAVOR_REF_ALT%
-
-# Number of seconds to wait while looping to check the status of an
-# instance that is building.
-build_interval = %COMPUTE_BUILD_INTERVAL%
-
-# Number of seconds to time out on waiting for an instance
-# to build or reach an expected status
-build_timeout = %COMPUTE_BUILD_TIMEOUT%
-
-# The type of endpoint for a Compute API service. Unless you have a
-# custom Keystone service catalog implementation, you probably want to leave
-# this value as "compute"
-catalog_type = %COMPUTE_CATALOG_TYPE%
-
-# Does the Compute API support creation of images?
-create_image_enabled = %COMPUTE_CREATE_IMAGE_ENABLED%
-
-# For resize to work with libvirt/kvm, one of the following must be true:
-# Single node: allow_resize_to_same_host=True must be set in nova.conf
-# Cluster: the 'nova' user must have scp access between cluster nodes
-resize_available = %COMPUTE_RESIZE_AVAILABLE%
-
-# Does the compute API support changing the admin password?
-change_password_available = %COMPUTE_CHANGE_PASSWORD_AVAILABLE%
-
-# Level to log Compute API request/response details.
-log_level = %COMPUTE_LOG_LEVEL%
-
-# Whitebox options for compute. Whitebox options enable the
-# whitebox test cases, which look at internal Nova database state,
-# SSH into VMs to check instance state, etc.
-
-# Should we run whitebox tests for Compute?
-whitebox_enabled = %COMPUTE_WHITEBOX_ENABLED%
-
-# Path of nova source directory
-source_dir = %COMPUTE_SOURCE_DIR%
-
-# Path of nova configuration file
-config_path = %COMPUTE_CONFIG_PATH%
-
-# Directory containing nova binaries such as nova-manage
-bin_dir = %COMPUTE_BIN_DIR%
-
-# Path to a private key file for SSH access to remote hosts
-path_to_private_key = %COMPUTE_PATH_TO_PRIVATE_KEY%
-
-# Connection string to the database of Compute service
-db_uri = %COMPUTE_DB_URI%
-
-# Run live migration tests (requires 2 hosts)
-live_migration_available = %LIVE_MIGRATION_AVAILABLE%
-
-# Use block live migration (Otherwise, non-block migration will be
-# performed, which requires XenServer pools in case of using XS)
-use_block_migration_for_live_migration = %USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%
-
-[image]
-# This section contains configuration options used when executing tests
-# against the OpenStack Images API
-
-# The type of endpoint for an Image API service. Unless you have a
-# custom Keystone service catalog implementation, you probably want to leave
-# this value as "image"
-catalog_type = %IMAGE_CATALOG_TYPE%
-
-# The version of the OpenStack Images API to use
-api_version = %IMAGE_API_VERSION%
-
-# This is the main host address of the Image API
-host = %IMAGE_HOST%
-
-# Port that the Image API is running on
-port = %IMAGE_PORT%
-
-# This should be the username of a user WITHOUT administrative privileges
-username = %USERNAME%
-# The above non-administrative user's password
-password = %PASSWORD%
-# The above non-administrative user's tenant name
-tenant_name = %TENANT_NAME%
-
-[compute-admin]
-# This section contains configuration options for an administrative
-# user of the Compute API. These options are used in tests that stress
-# the admin-only parts of the Compute API
-
-# This should be the username of a user WITH administrative privileges
-username = %COMPUTE_ADMIN_USERNAME%
-# The above administrative user's password
-password = %COMPUTE_ADMIN_PASSWORD%
-# The above administrative user's tenant name
-tenant_name = %COMPUTE_ADMIN_TENANT_NAME%
-
-[identity-admin]
-# This section contains configuration options for an administrative
-# user of the Compute API. These options are used in tests that stress
-# the admin-only parts of the Compute API
-
-# This should be the username of a user WITH administrative privileges
-username = %IDENTITY_ADMIN_USERNAME%
-# The above administrative user's password
-password = %IDENTITY_ADMIN_PASSWORD%
-# The above administrative user's tenant name
-tenant_name = %IDENTITY_ADMIN_TENANT_NAME%
-
-[volume]
-# This section contains the configuration options used when executing tests
-# against the OpenStack Block Storage API service
-
-# The type of endpoint for a Cinder or Block Storage API service.
-# Unless you have a custom Keystone service catalog implementation, you
-# probably want to leave this value as "volume"
-catalog_type = %VOLUME_CATALOG_TYPE%
-# Number of seconds to wait while looping to check the status of a
-# volume that is being made available
-build_interval = %VOLUME_BUILD_INTERVAL%
-# Number of seconds to time out on waiting for a volume
-# to be available or reach an expected status
-build_timeout = %VOLUME_BUILD_TIMEOUT%
-
-[object-storage]
-# This section contains configuration options used when executing tests
-# against the OpenStack Object Storage API.
-# This should be the username of a user WITHOUT administrative privileges
-username = %USERNAME%
-# The above non-administrative user's password
-password = %PASSWORD%
-# The above non-administrative user's tenant name
-tenant_name = %TENANT_NAME%
-
-# The type of endpoint for an Object Storage API service. Unless you have a
-# custom Keystone service catalog implementation, you probably want to leave
-# this value as "object-store"
-catalog_type = %OBJECT_CATALOG_TYPE%
-
-[boto]
-# This section contains configuration options used when executing tests
-# with boto.
-
-# EC2 URL
-ec2_url = %BOTO_EC2_URL%
-# S3 URL
-s3_url = %BOTO_S3_URL%
-
-# Use keystone ec2-* command to get those values for your test user and tenant
-aws_access = %BOTO_AWS_ACCESS%
-aws_secret = %BOTO_AWS_SECRET%
-
-#Region
-aws_region = %BOTO_AWS_REGION%
-
-#Image materials for S3 upload
-# ALL content of the specified directory will be uploaded to S3
-s3_materials_path = %BOTO_S3_MATERIALS_PATH%
-
-# The manifest.xml files, must be in the s3_materials_path directory
-# Subdirectories not allowed!
-# The filenames will be used as a Keys in the S3 Buckets
-
-#ARI Ramdisk manifest. Must be in the above s3_materials_path directory
-ari_manifest = %BOTO_ARI_MANIFEST%
-
-#AMI Machine Image manifest. Must be in the above s3_materials_path directory
-ami_manifest = %BOTO_AMI_MANIFEST%
-
-#AKI Kernel Image manifest, Must be in the above s3_materials_path directory
-aki_manifest = %BOTO_AKI_MANIFEST%
-
-#Instance type
-instance_type = %BOTO_FLAVOR_NAME%
-
-#TCP/IP connection timeout
-http_socket_timeout = %BOTO_SOCKET_TIMEOUT%
-
-# Status change wait timout
-build_timeout = %BOTO_BUILD_TIMEOUT%
-
-# Status change wait interval
-build_interval = %BOTO_BUILD_INTERVAL%
diff --git a/openstack-common.conf b/openstack-common.conf
new file mode 100644
index 0000000..a75279f
--- /dev/null
+++ b/openstack-common.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+
+# The list of modules to copy from openstack-common
+modules=setup,cfg,iniparser
+
+# The base module to hold the copy of openstack.common
+base=tempest
diff --git a/run_tests.sh b/run_tests.sh
index e359caf..e350c13 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -1,30 +1,48 @@
-#!/bin/bash
+#!/usr/bin/env bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Tempest test suite"
echo ""
+ echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
+ echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
+ echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -s, --smoke Only run smoke tests"
echo " -w, --whitebox Only run whitebox tests"
echo " -p, --pep8 Just run pep8"
echo " -h, --help Print this usage message"
- echo " -d. --debug Debug this script -- set -o xtrace"
+ echo " -d, --debug Debug this script -- set -o xtrace"
+ echo " -S, --stdout Don't capture stdout"
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
+ -V|--virtual-env) always_venv=1; never_venv=0;;
+ -N|--no-virtual-env) always_venv=0; never_venv=1;;
+ -s|--no-site-packages) no_site_packages=1;;
+ -f|--force) force=1;;
-d|--debug) set -o xtrace;;
-p|--pep8) let just_pep8=1;;
-s|--smoke) noseargs="$noseargs --attr=type=smoke";;
-w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
+ -S|--stdout) noseargs="$noseargs -s";;
*) noseargs="$noseargs $1"
esac
}
noseargs=""
just_pep8=0
+venv=.venv
+with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=0
+no_site_packages=0
+force=0
+wrapper=""
+
export NOSE_WITH_OPENSTACK=1
export NOSE_OPENSTACK_COLOR=1
@@ -37,6 +55,9 @@
process_option $arg
done
+if [ $no_site_packages -eq 1 ]; then
+ installvenvopts="--no-site-packages"
+fi
# only add tempest default if we don't specify a test
if [[ "x$noseargs" =~ "tempest" ]]; then
@@ -47,19 +68,49 @@
function run_tests {
- $NOSETESTS
+ ${wrapper} $NOSETESTS
}
function run_pep8 {
echo "Running pep8 ..."
- PEP8_EXCLUDE="etc,include,tools,*venv"
- PEP8_OPTIONS="--exclude=$PEP8_EXCLUDE --repeat"
- PEP8_INCLUDE="."
- pep8 $PEP8_OPTIONS $PEP8_INCLUDE
+ srcfiles="`find tempest -type f -name "*.py"`"
+ srcfiles+=" `find tools -type f -name "*.py"`"
+ srcfiles+=" `find stress -type f -name "*.py"`"
+ srcfiles+=" setup.py"
+
+ ignore='--ignore=E121,E122,E125,E126'
+
+ ${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
}
NOSETESTS="nosetests $noseargs"
+if [ $never_venv -eq 0 ]
+then
+ # Remove the virtual environment if --force used
+ if [ $force -eq 1 ]; then
+ echo "Cleaning virtualenv..."
+ rm -rf ${venv}
+ fi
+ if [ -e ${venv} ]; then
+ wrapper="${with_venv}"
+ else
+ if [ $always_venv -eq 1 ]; then
+ # Automatically install the virtualenv
+ python tools/install_venv.py $installvenvopts
+ wrapper="${with_venv}"
+ else
+ echo -e "No virtual environment found...create one? (Y/n) \c"
+ read use_ve
+ if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
+ # Install the virtualenv and run the test suite in it
+ python tools/install_venv.py $installvenvopts
+ wrapper=${with_venv}
+ fi
+ fi
+ fi
+fi
+
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index fceadba..1f071bb
--- a/setup.py
+++ b/setup.py
@@ -1,34 +1,51 @@
-#!/usr/bin/python
-# Copyright (c) 2012 OpenStack, LLC.
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import setuptools
-setuptools.setup(
- name='tempest',
- version="0.1",
- description='The OpenStack Integration Test Suite',
- license='Apache License (2.0)',
- author='OpenStack',
- author_email='openstack@lists.launchpad.net',
- url='http://github.com/openstack/tempest/',
- classifiers=[
- 'Development Status :: 4 - Beta',
- 'License :: OSI Approved :: Apache Software License',
- 'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 2.6',
- 'Environment :: No Input/Output (Daemon)',
- ],
- py_modules=[])
+from tempest.openstack.common import setup as common_setup
+
+requires = common_setup.parse_requirements()
+depend_links = common_setup.parse_dependency_links()
+
+setuptools.setup(name='tempest',
+ version="2012.2",
+ description='Integration test tools',
+ author='OpenStack',
+ author_email='openstack-qa@lists.launchpad.net',
+ url='http://www.openstack.org/',
+ classifiers=['Environment :: OpenStack',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :'
+ ': Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7', ],
+ cmdclass=common_setup.get_cmdclass(),
+ packages=setuptools.find_packages(exclude=['bin']),
+ install_requires=requires,
+ dependency_links=depend_links,
+ include_package_data=True,
+ test_suite='nose.collector',
+ setup_requires=['setuptools_git>=0.4'],
+ scripts=['bin/tempest'],
+ py_modules=[])
diff --git a/stress/config.py b/stress/config.py
index 64091cd..ca86ce5 100755
--- a/stress/config.py
+++ b/stress/config.py
@@ -39,15 +39,15 @@
@property
def nova_logdir(self):
- """Directory containing log files on the compute nodes"""
+ """Directory containing log files on the compute nodes."""
return self.get("nova_logdir", None)
@property
def controller(self):
- """Controller host"""
+ """Controller host."""
return self.get("controller", None)
@property
def max_instances(self):
- """Maximum number of instances to create during test"""
+ """Maximum number of instances to create during test."""
return self.get("max_instances", 16)
diff --git a/stress/driver.py b/stress/driver.py
index 3032d58..c50e957 100644
--- a/stress/driver.py
+++ b/stress/driver.py
@@ -15,19 +15,18 @@
Users pass in a description of the workload and a nova manager object
to the bash_openstack function call"""
-
-import random
import datetime
+import random
import time
-
-# local imports
-from test_case import *
-import utils.util
from config import StressConfig
-from state import ClusterState, KeyPairState, FloatingIpState, VolumeState
+from state import ClusterState
+from state import FloatingIpState
+from state import KeyPairState
+from state import VolumeState
+from test_case import *
from tempest.common.utils.data_utils import rand_name
-
+import utils.util
# setup logging to file
logging.basicConfig(
diff --git a/stress/pending_action.py b/stress/pending_action.py
index a2d5a6b..abfa74d 100644
--- a/stress/pending_action.py
+++ b/stress/pending_action.py
@@ -14,9 +14,9 @@
"""Describe follow-up actions using `PendingAction` class to verify
that nova API calls such as create/delete are completed"""
-
import logging
import time
+
from tempest.exceptions import TimeoutException
@@ -46,7 +46,7 @@
return False
def check_timeout(self):
- """Check for timeouts of TestCase actions"""
+ """Check for timeouts of TestCase actions."""
time_diff = time.time() - self._start_time
if time_diff > self._timeout:
self._logger.error('%s exceeded timeout of %d' %
@@ -76,7 +76,7 @@
self._target = target_server
def _check_for_status(self, state_string):
- """Check to see if the machine has transitioned states"""
+ """Check to see if the machine has transitioned states."""
t = time.time() # for debugging
target = self._target
_resp, body = self._manager.servers_client.get_server(target['id'])
diff --git a/stress/test_case.py b/stress/test_case.py
index fe510d5..d04ace0 100644
--- a/stress/test_case.py
+++ b/stress/test_case.py
@@ -25,5 +25,5 @@
self._logger = logging.getLogger(self.__class__.__name__)
def run(self, nova_manager, state_obj, *pargs, **kargs):
- """Nova API methods to call that would modify state of the cluster"""
+ """Nova API methods to call that would modify state of the cluster."""
return
diff --git a/stress/test_floating_ips.py b/stress/test_floating_ips.py
index 302385a..97e4382 100755
--- a/stress/test_floating_ips.py
+++ b/stress/test_floating_ips.py
@@ -12,16 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-# system imports
import random
-import time
import telnetlib
-import logging
+import time
-# local imports
-import test_case
import pending_action
+import test_case
class TestChangeFloatingIp(test_case.StressTestCase):
@@ -40,14 +36,13 @@
return None
floating_ip.change_pending = True
timeout = int(kwargs.get('timeout', 60))
+ cli = manager.floating_ips_client
if floating_ip.server_id is None:
server = random.choice(self.server_ids)
address = floating_ip.address
self._logger.info('Adding %s to server %s' % (address, server))
- resp, body =\
- manager.floating_ips_client.associate_floating_ip_to_server(
- address,
- server)
+ resp, body = cli.associate_floating_ip_to_server(address,
+ server)
if resp.status != 202:
raise Exception("response: %s body: %s" % (resp, body))
floating_ip.server_id = server
@@ -57,9 +52,8 @@
server = floating_ip.server_id
address = floating_ip.address
self._logger.info('Removing %s from server %s' % (address, server))
- resp, body =\
- manager.floating_ips_client.disassociate_floating_ip_from_server(
- address, server)
+ resp, body = cli.disassociate_floating_ip_from_server(address,
+ server)
if resp.status != 202:
raise Exception("response: %s body: %s" % (resp, body))
return VerifyChangeFloatingIp(manager, floating_ip,
@@ -67,7 +61,7 @@
class VerifyChangeFloatingIp(pending_action.PendingAction):
- """Verify that floating ip was changed"""
+ """Verify that floating ip was changed."""
def __init__(self, manager, floating_ip, timeout, add=None):
super(VerifyChangeFloatingIp, self).__init__(manager, timeout=timeout)
self.floating_ip = floating_ip
@@ -85,7 +79,7 @@
(self.floating_ip.address, self.elapsed()))
self.floating_ip.change_pending = False
return True
- except:
+ except Exception:
if not self.add:
self._logger.info('%s removed [%.1f secs elapsed]' %
(self.floating_ip.address, self.elapsed()))
diff --git a/stress/test_server_actions.py b/stress/test_server_actions.py
index 58350ac..a2032f0 100644
--- a/stress/test_server_actions.py
+++ b/stress/test_server_actions.py
@@ -17,20 +17,17 @@
sub-class will have a corresponding PendingServerAction. These pending
actions veriy that the API call was successful or not."""
-
-# system imports
import random
import time
-# local imports
-import test_case
import pending_action
from tempest.exceptions import Duplicate
+import test_case
from utils.util import *
class TestRebootVM(test_case.StressTestCase):
- """Reboot a server"""
+ """Reboot a server."""
def run(self, manager, state, *pargs, **kwargs):
"""
@@ -59,8 +56,7 @@
reboot_target = target[0]
# It seems that doing a reboot when in reboot is an error.
try:
- response, body = manager.servers_client.reboot(
- reboot_target['id'],
+ response, body = manager.servers_client.reboot(reboot_target['id'],
_reboot_arg)
except Duplicate:
return
@@ -136,7 +132,7 @@
# This code needs to be tested against a cluster that supports resize.
#class TestResizeVM(test_case.StressTestCase):
-# """Resize a server (change flavors)"""
+# """Resize a server (change flavors)."""
#
# def run(self, manager, state, *pargs, **kwargs):
# """
@@ -197,7 +193,7 @@
# timeout=_timeout)
#
#class VerifyResizeVM(pending_action.PendingServerAction):
-# """Verify that resizing of a VM was successful"""
+# """Verify that resizing of a VM was successful."""
# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
#
# def __init__(self, manager, state, created_server,
diff --git a/stress/test_servers.py b/stress/test_servers.py
index 113e5cb..e1cb4d1 100644
--- a/stress/test_servers.py
+++ b/stress/test_servers.py
@@ -17,15 +17,11 @@
Each sub-class will have a corresponding PendingServerAction. These pending
actions veriy that the API call was successful or not."""
-
-# system imports
import random
import time
-
-# local imports
-import test_case
import pending_action
+import test_case
class TestCreateVM(test_case.StressTestCase):
@@ -100,7 +96,7 @@
class VerifyCreateVM(pending_action.PendingServerAction):
- """Verify that VM was built and is running"""
+ """Verify that VM was built and is running."""
def __init__(self, manager,
state,
created_server,
@@ -179,7 +175,7 @@
class VerifyKillActiveVM(pending_action.PendingServerAction):
- """Verify that server was destroyed"""
+ """Verify that server was destroyed."""
def retry(self):
"""
@@ -242,7 +238,7 @@
class TestUpdateVMName(test_case.StressTestCase):
- """Class to change the name of the active server"""
+ """Class to change the name of the active server."""
def run(self, manager, state, *pargs, **kwargs):
"""
Issue HTTP POST request to change the name of active server.
@@ -292,7 +288,7 @@
class VerifyUpdateVMName(pending_action.PendingServerAction):
- """Check that VM has new name"""
+ """Check that VM has new name."""
def retry(self):
"""
Check that VM has new name. Update local view of `state` to RUNNING.
diff --git a/stress/tests/create_kill.py b/stress/tests/create_kill.py
index 26600de..2565723 100644
--- a/stress/tests/create_kill.py
+++ b/stress/tests/create_kill.py
@@ -17,14 +17,14 @@
from stress.test_servers import *
from stress.basher import BasherAction
from stress.driver import *
-from tempest import openstack
+from tempest import clients
choice_spec = [
BasherAction(TestCreateVM(), 50),
BasherAction(TestKillActiveVM(), 50)
]
-nova = openstack.Manager()
+nova = clients.Manager()
bash_openstack(nova,
choice_spec,
diff --git a/stress/tests/floating_ips.py b/stress/tests/floating_ips.py
index 03bd509..6a4452c 100755
--- a/stress/tests/floating_ips.py
+++ b/stress/tests/floating_ips.py
@@ -11,19 +11,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Stress test that associates/disasssociates floating ips"""
+"""Stress test that associates/disasssociates floating ips."""
-# local imports
-from stress.test_floating_ips import TestChangeFloatingIp
from stress.basher import BasherAction
from stress.driver import *
-from tempest import openstack
+from stress.test_floating_ips import TestChangeFloatingIp
+from tempest import clients
+
choice_spec = [
BasherAction(TestChangeFloatingIp(), 100)
]
-nova = openstack.Manager()
+nova = clients.Manager()
bash_openstack(nova,
choice_spec,
diff --git a/stress/tests/hard_reboots.py b/stress/tests/hard_reboots.py
index 324b133..fe57be1 100644
--- a/stress/tests/hard_reboots.py
+++ b/stress/tests/hard_reboots.py
@@ -11,14 +11,14 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Test that reboots random instances in a Nova cluster"""
+"""Test that reboots random instances in a Nova cluster."""
from stress.test_servers import *
from stress.test_server_actions import *
from stress.basher import BasherAction
from stress.driver import *
-from tempest import openstack
+from tempest import clients
choice_spec = [
BasherAction(TestCreateVM(), 50),
@@ -26,7 +26,7 @@
kargs={'type': 'HARD'})
]
-nova = openstack.Manager()
+nova = clients.Manager()
bash_openstack(nova,
choice_spec,
diff --git a/stress/tests/user_script_sample.py b/stress/tests/user_script_sample.py
index 51270a7..04163e3 100644
--- a/stress/tests/user_script_sample.py
+++ b/stress/tests/user_script_sample.py
@@ -18,7 +18,7 @@
from stress.test_servers import *
from stress.basher import BasherAction
from stress.driver import *
-from tempest import openstack
+from tempest import clients
choice_spec = [
BasherAction(TestCreateVM(), 50,
@@ -27,7 +27,7 @@
]
-nova = openstack.Manager()
+nova = clients.Manager()
bash_openstack(nova,
choice_spec,
diff --git a/stress/utils/util.py b/stress/utils/util.py
index 5870ca1..ec63b99 100644
--- a/stress/utils/util.py
+++ b/stress/utils/util.py
@@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import subprocess
import shlex
+import subprocess
SSH_OPTIONS = (" -q" +
" -o UserKnownHostsFile=/dev/null" +
@@ -50,6 +50,6 @@
def enum(*sequential, **named):
- """Create auto-incremented enumerated types"""
+ """Create auto-incremented enumerated types."""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
diff --git a/tempest/openstack.py b/tempest/clients.py
similarity index 90%
rename from tempest/openstack.py
rename to tempest/clients.py
index fbd2f00..0bb1752 100644
--- a/tempest/openstack.py
+++ b/tempest/clients.py
@@ -19,47 +19,51 @@
from tempest import config
from tempest import exceptions
+from tempest.services.boto.clients import APIClientEC2
+from tempest.services.boto.clients import ObjectClientS3
+from tempest.services.compute.json.extensions_client import \
+ ExtensionsClientJSON
+from tempest.services.compute.json.flavors_client import FlavorsClientJSON
+from tempest.services.compute.json.floating_ips_client import \
+ FloatingIPsClientJSON
+from tempest.services.compute.json.images_client import ImagesClientJSON
+from tempest.services.compute.json.limits_client import LimitsClientJSON
+from tempest.services.compute.json.servers_client import ServersClientJSON
+from tempest.services.compute.json.security_groups_client import \
+ SecurityGroupsClientJSON
+from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
+from tempest.services.compute.json.quotas_client import QuotasClient
+from tempest.services.compute.json.volumes_extensions_client import \
+ VolumesExtensionsClientJSON
+from tempest.services.compute.json.console_output_client import \
+ ConsoleOutputsClientJSON
+from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
+from tempest.services.compute.xml.flavors_client import FlavorsClientXML
+from tempest.services.compute.xml.floating_ips_client import \
+ FloatingIPsClientXML
+from tempest.services.compute.xml.images_client import ImagesClientXML
+from tempest.services.compute.xml.keypairs_client import KeyPairsClientXML
+from tempest.services.compute.xml.limits_client import LimitsClientXML
+from tempest.services.compute.xml.security_groups_client \
+ import SecurityGroupsClientXML
+from tempest.services.compute.xml.servers_client import ServersClientXML
+from tempest.services.compute.xml.volumes_extensions_client import \
+ VolumesExtensionsClientXML
+from tempest.services.compute.xml.console_output_client import \
+ ConsoleOutputsClientXML
from tempest.services.identity.json.admin_client import AdminClientJSON
from tempest.services.identity.json.admin_client import TokenClientJSON
from tempest.services.identity.xml.admin_client import AdminClientXML
from tempest.services.identity.xml.admin_client import TokenClientXML
from tempest.services.image import service as image_service
from tempest.services.network.json.network_client import NetworkClient
-from tempest.services.compute.json.extensions_client import \
-ExtensionsClientJSON
-from tempest.services.compute.json.flavors_client import FlavorsClientJSON
-from tempest.services.compute.json.floating_ips_client import \
-FloatingIPsClientJSON
-from tempest.services.compute.json.images_client import ImagesClientJSON
-from tempest.services.compute.json.limits_client import LimitsClientJSON
-from tempest.services.compute.json.servers_client import ServersClientJSON
-from tempest.services.compute.json.security_groups_client \
-import SecurityGroupsClientJSON
-from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
-from tempest.services.compute.json.volumes_extensions_client \
-import VolumesExtensionsClientJSON
-from tempest.services.compute.json.console_output_client \
-import ConsoleOutputsClient
-from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
-from tempest.services.compute.xml.flavors_client import FlavorsClientXML
-from tempest.services.compute.xml.floating_ips_client import \
-FloatingIPsClientXML
-from tempest.services.compute.xml.images_client import ImagesClientXML
-from tempest.services.compute.xml.keypairs_client import KeyPairsClientXML
-from tempest.services.compute.xml.limits_client import LimitsClientXML
-from tempest.services.compute.xml.security_groups_client \
-import SecurityGroupsClientXML
-from tempest.services.compute.xml.servers_client import ServersClientXML
-from tempest.services.compute.xml.volumes_extensions_client \
-import VolumesExtensionsClientXML
-from tempest.services.volume.json.volumes_client import VolumesClientJSON
-from tempest.services.volume.xml.volumes_client import VolumesClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
-from tempest.services.boto.clients import APIClientEC2
-from tempest.services.boto.clients import ObjectClientS3
-from tempest.services.compute.json.quotas_client import QuotasClient
+from tempest.services.volume.json.volumes_client import VolumesClientJSON
+from tempest.services.volume.xml.volumes_client import VolumesClientXML
+from tempest.services.object_storage.object_client import \
+ ObjectClientCustomizedHeader
LOG = logging.getLogger(__name__)
@@ -123,6 +127,11 @@
"xml": SecurityGroupsClientXML,
}
+CONSOLE_OUTPUT_CLIENT = {
+ "json": ConsoleOutputsClientJSON,
+ "xml": ConsoleOutputsClientXML,
+}
+
class Manager(object):
@@ -180,10 +189,11 @@
self.token_client = TOKEN_CLIENT[interface](self.config)
self.security_groups_client = \
SECURITY_GROUPS_CLIENT[interface](*client_args)
+ self.console_outputs_client = \
+ CONSOLE_OUTPUT_CLIENT[interface](*client_args)
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
- self.console_outputs_client = ConsoleOutputsClient(*client_args)
self.quotas_client = QuotasClient(*client_args)
self.network_client = NetworkClient(*client_args)
self.account_client = AccountClient(*client_args)
@@ -191,6 +201,7 @@
self.object_client = ObjectClient(*client_args)
self.ec2api_client = APIClientEC2(*client_args)
self.s3_client = ObjectClientS3(*client_args)
+ self.custom_object_client = ObjectClientCustomizedHeader(*client_args)
class AltManager(Manager):
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 8311365..287ef56 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import httplib2
+import json
import logging
from lxml import etree
import time
@@ -44,13 +44,21 @@
self.token = None
self.base_url = None
self.config = config
- self.region = 0
+ self.region = {'compute': self.config.compute.region}
self.endpoint_url = 'publicURL'
self.strategy = self.config.identity.strategy
self.headers = {'Content-Type': 'application/%s' % self.TYPE,
'Accept': 'application/%s' % self.TYPE}
self.build_interval = config.compute.build_interval
self.build_timeout = config.compute.build_timeout
+ self.general_header_lc = set(('cache-control', 'connection',
+ 'date', 'pragma', 'trailer',
+ 'transfer-encoding', 'via',
+ 'warning'))
+ self.response_header_lc = set(('accept-ranges', 'age', 'etag',
+ 'location', 'proxy-authenticate',
+ 'retry-after', 'server',
+ 'vary', 'www-authenticate'))
def _set_auth(self):
"""
@@ -95,11 +103,11 @@
params['headers'] = {'User-Agent': 'Test-Client', 'X-Auth-User': user,
'X-Auth-Key': password}
- self.http_obj = httplib2.Http()
+ self.http_obj = httplib2.Http(disable_ssl_certificate_validation=True)
resp, body = self.http_obj.request(auth_url, 'GET', **params)
try:
return resp['x-auth-token'], resp['x-server-management-url']
- except:
+ except Exception:
raise
def keystone_auth(self, user, password, auth_url, service, tenant_name):
@@ -117,7 +125,7 @@
}
}
- self.http_obj = httplib2.Http()
+ self.http_obj = httplib2.Http(disable_ssl_certificate_validation=True)
headers = {'Content-Type': 'application/json'}
body = json.dumps(creds)
resp, body = self.http_obj.request(auth_url, 'POST',
@@ -133,14 +141,13 @@
mgmt_url = None
for ep in auth_data['serviceCatalog']:
- if ep["type"] == service and service != 'volume':
- mgmt_url = ep['endpoints'][self.region][self.endpoint_url]
- tenant_id = auth_data['token']['tenant']['id']
- break
-
- elif (ep["type"] == service and ep['name'] == 'cinder' and
- service == 'volume'):
- mgmt_url = ep['endpoints'][self.region][self.endpoint_url]
+ if ep["type"] == service:
+ for _ep in ep['endpoints']:
+ if service in self.region and \
+ _ep['region'] == self.region[service]:
+ mgmt_url = _ep[self.endpoint_url]
+ if not mgmt_url:
+ mgmt_url = ep['endpoints'][0][self.endpoint_url]
tenant_id = auth_data['token']['tenant']['id']
break
@@ -193,7 +200,7 @@
if (self.token is None) or (self.base_url is None):
self._set_auth()
- self.http_obj = httplib2.Http()
+ self.http_obj = httplib2.Http(disable_ssl_certificate_validation=True)
if headers is None:
headers = {}
headers['X-Auth-Token'] = self.token
@@ -201,6 +208,38 @@
req_url = "%s/%s" % (self.base_url, url)
resp, resp_body = self.http_obj.request(req_url, method,
headers=headers, body=body)
+
+ #TODO(afazekas): Make sure we can validate all responses, and the
+ #http library does not do any action automatically
+ if (resp.status in set((204, 205, 304)) or resp.status < 200 or
+ method.upper() == 'HEAD') and resp_body:
+ raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
+
+ #NOTE(afazekas):
+ # If the HTTP Status Code is 205
+ # 'The response MUST NOT include an entity.'
+ # A HTTP entity has an entity-body and an 'entity-header'.
+ # In the HTTP response specification (Section 6) the 'entity-header'
+ # 'generic-header' and 'response-header' are in OR relation.
+ # All headers not in the above two group are considered as entity
+ # header in every interpretation.
+
+ if (resp.status == 205 and
+ 0 != len(set(resp.keys()) - set(('status',)) -
+ self.response_header_lc - self.general_header_lc)):
+ raise exceptions.ResponseWithEntity()
+
+ #NOTE(afazekas)
+ # Now the swift sometimes (delete not empty container)
+ # returns with non json error response, we can create new rest class
+ # for swift.
+ # Usually RFC2616 says error responses SHOULD contain an explanation.
+ # The warning is normal for SHOULD/SHOULD NOT case
+
+ # Likely it will cause error
+ if not body and resp.status >= 400:
+ self.log.warning("status >= 400 response with empty body")
+
if resp.status == 401 or resp.status == 403:
self._log(req_url, body, resp, resp_body)
raise exceptions.Unauthorized()
@@ -225,7 +264,7 @@
self._log(req_url, body, resp, resp_body)
if 'overLimit' in resp_body:
raise exceptions.OverLimit(resp_body['overLimit']['message'])
- elif 'limit' in resp_body['message']:
+ elif 'exceeded' in resp_body['message']:
raise exceptions.OverLimit(resp_body['message'])
elif depth < MAX_RECURSION_DEPTH:
delay = resp['Retry-After'] if 'Retry-After' in resp else 60
@@ -264,7 +303,7 @@
return resp, resp_body
def wait_for_resource_deletion(self, id):
- """Waits for a resource to be deleted"""
+ """Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(id):
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index c03a90c..151060f 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -15,12 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-import socket
-import warnings
-import select
from cStringIO import StringIO
+import select
+import socket
+import time
+import warnings
+
from tempest import exceptions
@@ -47,7 +48,7 @@
self.buf_size = 1024
def _get_ssh_connection(self):
- """Returns an ssh connection to the specified host"""
+ """Returns an ssh connection to the specified host."""
_timeout = True
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
@@ -78,7 +79,7 @@
return (time.time() - timeout) > start_time
def connect_until_closed(self):
- """Connect to the server and wait until connection is lost"""
+ """Connect to the server and wait until connection is lost."""
try:
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
@@ -136,7 +137,7 @@
return ''.join(out_data)
def test_connection_auth(self):
- """ Returns true if ssh can connect to server"""
+ """Returns true if ssh can connect to server."""
try:
connection = self._get_ssh_connection()
connection.close()
diff --git a/tempest/common/utils/data_utils.py b/tempest/common/utils/data_utils.py
index 15afd0a..3a7661c 100644
--- a/tempest/common/utils/data_utils.py
+++ b/tempest/common/utils/data_utils.py
@@ -15,9 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import itertools
import random
import re
import urllib
+
from tempest import exceptions
@@ -27,7 +29,7 @@
def build_url(host, port, api_version=None, path=None,
params=None, use_ssl=False):
- """Build the request URL from given host, port, path and parameters"""
+ """Build the request URL from given host, port, path and parameters."""
pattern = 'v\d\.\d'
if re.match(pattern, path):
@@ -57,31 +59,15 @@
def parse_image_id(image_ref):
- """Return the image id from a given image ref"""
- temp = image_ref.rsplit('/')
- #Return the last item, which is the image id
- return temp[len(temp) - 1]
+ """Return the image id from a given image ref."""
+ return image_ref.rsplit('/')[-1]
def arbitrary_string(size=4, base_text=None):
- """Return exactly size bytes worth of base_text as a string"""
-
- if (base_text is None) or (base_text == ''):
+ """
+ Return size characters from base_text, repeating the base_text infinitely
+ if needed.
+ """
+ if not base_text:
base_text = 'test'
-
- if size <= 0:
- return ''
-
- extra = size % len(base_text)
- body = ''
-
- if extra == 0:
- body = base_text * size
-
- if extra == size:
- body = base_text[:size]
-
- if extra > 0 and extra < size:
- body = (size / len(base_text)) * base_text + base_text[:extra]
-
- return body
+ return ''.join(itertools.islice(itertools.cycle(base_text), size))
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index ca1557f..b501df4 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -1,10 +1,11 @@
-from tempest.common.ssh import Client
-from tempest.config import TempestConfig
-from tempest.common import utils
-from tempest.exceptions import SSHTimeout, ServerUnreachable
-
-import time
import re
+import time
+
+from tempest.common.ssh import Client
+from tempest.common import utils
+from tempest.config import TempestConfig
+from tempest.exceptions import ServerUnreachable
+from tempest.exceptions import SSHTimeout
class RemoteClient():
diff --git a/tempest/config.py b/tempest/config.py
index 0ccd4b6..8233dd5 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -15,488 +15,450 @@
# License for the specific language governing permissions and limitations
# under the License.
-import ConfigParser
import logging
import os
+import sys
from tempest.common.utils import data_utils
+from tempest.openstack.common import cfg
LOG = logging.getLogger(__name__)
+identity_group = cfg.OptGroup(name='identity',
+ title="Keystone Configuration Options")
-class BaseConfig(object):
+IdentityGroup = [
+ cfg.StrOpt('catalog_type',
+ default='identity',
+ help="Catalog type of the Identity service."),
+ cfg.StrOpt('host',
+ default="127.0.0.1",
+ help="Host IP for making Identity API requests."),
+ cfg.IntOpt('port',
+ default=8773,
+ help="Port for the Identity service."),
+ cfg.StrOpt('api_version',
+ default="v1.1",
+ help="Version of the Identity API"),
+ cfg.StrOpt('path',
+ default='/',
+ help="Path of API request"),
+ cfg.BoolOpt('use_ssl',
+ default=False,
+ help="Specifies if we are using https."),
+ cfg.StrOpt('strategy',
+ default='keystone',
+ help="Which auth method does the environment use? "
+ "(basic|keystone)"),
+ cfg.StrOpt('region',
+ default=None,
+ help="The identity region name to use."),
+]
- SECTION_NAME = None
- def __init__(self, conf):
- self.conf = conf
+def register_identity_opts(conf):
+ conf.register_group(identity_group)
+ for opt in IdentityGroup:
+ conf.register_opt(opt, group='identity')
- def get(self, item_name, default_value=None):
- try:
- return self.conf.get(self.SECTION_NAME, item_name, raw=True)
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return default_value
+ authurl = data_utils.build_url(conf.identity.host,
+ str(conf.identity.port),
+ conf.identity.api_version,
+ conf.identity.path,
+ use_ssl=conf.identity.use_ssl)
- def getboolean(self, item_name, default_value=None):
- try:
- return self.conf.getboolean(self.SECTION_NAME, item_name)
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
- return default_value
+ auth_url = cfg.StrOpt('auth_url',
+ default=authurl,
+ help="The Identity URL (derived)")
+ conf.register_opt(auth_url, group="identity")
-class IdentityConfig(BaseConfig):
+identity_admin_group = cfg.OptGroup(name='identity-admin',
+ title="Identity Admin Options")
- """Provides configuration information for authenticating with Keystone."""
+IdentityAdminGroup = [
+ cfg.StrOpt('username',
+ default='admin',
+ help="Username to use for Identity Admin API requests"),
+ cfg.StrOpt('tenant_name',
+ default='admin',
+ help="Tenant name to use for Identity Admin API requests"),
+ cfg.StrOpt('password',
+ default='pass',
+ help="API key to use for Identity Admin API requests",
+ secret=True),
+]
- SECTION_NAME = "identity"
- @property
- def catalog_type(self):
- """Catalog type of the Identity service."""
- return self.get("catalog_type", 'identity')
+def register_identity_admin_opts(conf):
+ conf.register_group(identity_admin_group)
+ for opt in IdentityAdminGroup:
+ conf.register_opt(opt, group='identity-admin')
- @property
- def host(self):
- """Host IP for making Identity API requests."""
- return self.get("host", "127.0.0.1")
- @property
- def port(self):
- """Port for the Identity service."""
- return self.get("port", "8773")
+compute_group = cfg.OptGroup(name='compute',
+ title='Compute Service Options')
- @property
- def api_version(self):
- """Version of the Identity API"""
- return self.get("api_version", "v1.1")
+ComputeGroup = [
+ cfg.BoolOpt('allow_tenant_isolation',
+ default=False,
+ help="Allows test cases to create/destroy tenants and "
+ "users. This option enables isolated test cases and "
+ "better parallel execution, but also requires that "
+ "OpenStack Identity API admin credentials are known."),
+ cfg.BoolOpt('allow_tenant_reuse',
+ default=True,
+ help="If allow_tenant_isolation is True and a tenant that "
+ "would be created for a given test already exists (such "
+ "as from a previously-failed run), re-use that tenant "
+ "instead of failing because of the conflict. Note that "
+ "this would result in the tenant being deleted at the "
+ "end of a subsequent successful run."),
+ cfg.StrOpt('username',
+ default='demo',
+ help="Username to use for Nova API requests."),
+ cfg.StrOpt('tenant_name',
+ default='demo',
+ help="Tenant name to use for Nova API requests."),
+ cfg.StrOpt('password',
+ default='pass',
+ help="API key to use when authenticating.",
+ secret=True),
+ cfg.StrOpt('alt_username',
+ default=None,
+ help="Username of alternate user to use for Nova API "
+ "requests."),
+ cfg.StrOpt('alt_tenant_name',
+ default=None,
+ help="Alternate user's Tenant name to use for Nova API "
+ "requests."),
+ cfg.StrOpt('alt_password',
+ default=None,
+ help="API key to use when authenticating as alternate user.",
+ secret=True),
+ cfg.StrOpt('region',
+ default=None,
+ help="The compute region name to use."),
+ cfg.StrOpt('image_ref',
+ default="{$IMAGE_ID}",
+ help="Valid secondary image reference to be used in tests."),
+ cfg.StrOpt('image_ref_alt',
+ default="{$IMAGE_ID_ALT}",
+ help="Valid secondary image reference to be used in tests."),
+ cfg.IntOpt('flavor_ref',
+ default=1,
+ help="Valid primary flavor to use in tests."),
+ cfg.IntOpt('flavor_ref_alt',
+ default=2,
+ help='Valid secondary flavor to be used in tests.'),
+ cfg.BoolOpt('resize_available',
+ default=False,
+ help="Does the test environment support resizing?"),
+ cfg.BoolOpt('live_migration_available',
+ default=False,
+ help="Does the test environment support live migration "
+ "available?"),
+ cfg.BoolOpt('use_block_migration_for_live_migration',
+ default=False,
+ help="Does the test environment use block devices for live "
+ "migration"),
+ cfg.BoolOpt('change_password_available',
+ default=False,
+ help="Does the test environment support changing the admin "
+ "password?"),
+ cfg.BoolOpt('create_image_enabled',
+ default=False,
+ help="Does the test environment support snapshots?"),
+ cfg.IntOpt('build_interval',
+ default=10,
+ help="Time in seconds between build status checks."),
+ cfg.IntOpt('build_timeout',
+ default=300,
+ help="Timeout in seconds to wait for an instance to build."),
+ cfg.BoolOpt('run_ssh',
+ default=False,
+ help="Does the test environment support snapshots?"),
+ cfg.StrOpt('ssh_user',
+ default='root',
+ help="User name used to authenticate to an instance."),
+ cfg.IntOpt('ssh_timeout',
+ default=300,
+ help="Timeout in seconds to wait for authentcation to "
+ "succeed."),
+ cfg.StrOpt('network_for_ssh',
+ default='public',
+ help="Network used for SSH connections."),
+ cfg.IntOpt('ip_version_for_ssh',
+ default=4,
+ help="IP version used for SSH connections."),
+ cfg.StrOpt('catalog_type',
+ default='compute',
+ help="Catalog type of the Compute service."),
+ cfg.StrOpt('log_level',
+ default="ERROR",
+ help="Level for logging compute API calls."),
+ cfg.BoolOpt('whitebox_enabled',
+ default=False,
+ help="Does the test environment support whitebox tests for "
+ "Compute?"),
+ cfg.StrOpt('db_uri',
+ default=None,
+ help="Connection string to the database of Compute service"),
+ cfg.StrOpt('source_dir',
+ default="/opt/stack/nova",
+ help="Path of nova source directory"),
+ cfg.StrOpt('config_path',
+ default='/etc/nova/nova.conf',
+ help="Path of nova configuration file"),
+ cfg.StrOpt('bin_dir',
+ default="/usr/local/bin/",
+ help="Directory containing nova binaries such as nova-manage"),
+ cfg.StrOpt('path_to_private_key',
+ default=None,
+ help="Path to a private key file for SSH access to remote "
+ "hosts"),
+ cfg.BoolOpt('disk_config_enabled_override',
+ default=True,
+ help="If false, skip config tests regardless of the "
+ "extension status"),
+]
- @property
- def path(self):
- """Path of API request"""
- return self.get("path", "/")
- @property
- def auth_url(self):
- """The Identity URL (derived)"""
- auth_url = data_utils.build_url(self.host,
- self.port,
- self.api_version,
- self.path,
- use_ssl=self.use_ssl)
- return auth_url
+def register_compute_opts(conf):
+ conf.register_group(compute_group)
+ for opt in ComputeGroup:
+ conf.register_opt(opt, group='compute')
- @property
- def use_ssl(self):
- """Specifies if we are using https."""
- return self.get("use_ssl", 'false').lower() != 'false'
+compute_admin_group = cfg.OptGroup(name='compute-admin',
+ title="Compute Admin Options")
- @property
- def strategy(self):
- """Which auth method does the environment use? (basic|keystone)"""
- return self.get("strategy", 'keystone')
+ComputeAdminGroup = [
+ cfg.StrOpt('username',
+ default='admin',
+ help="Administrative Username to use for Nova API requests."),
+ cfg.StrOpt('tenant_name',
+ default='admin',
+ help="Administrative Tenant name to use for Nova API "
+ "requests."),
+ cfg.StrOpt('password',
+ default='pass',
+ help="API key to use when authenticating as admin.",
+ secret=True),
+]
-class IdentityAdminConfig(BaseConfig):
+def register_compute_admin_opts(conf):
+ conf.register_group(compute_admin_group)
+ for opt in ComputeAdminGroup:
+ conf.register_opt(opt, group='compute-admin')
- SECTION_NAME = "identity-admin"
- @property
- def username(self):
- """Username to use for Identity Admin API requests"""
- return self.get("username", "admin")
+image_group = cfg.OptGroup(name='image',
+ title="Image Service Options")
- @property
- def tenant_name(self):
- """Tenant name to use for Identity Admin API requests"""
- return self.get("tenant_name", "admin")
+ImageGroup = [
+ cfg.StrOpt('host',
+ default='127.0.0.1',
+ help="Host IP for making Images API requests. Defaults to "
+ "'127.0.0.1'."),
+ cfg.IntOpt('port',
+ default=9292,
+ help="Listen port of the Images service."),
+ cfg.StrOpt('api_version',
+ default='1',
+ help="Version of the API"),
+ cfg.StrOpt('username',
+ default='demo',
+ help="Username to use for Images API requests. Defaults to "
+ "'demo'."),
+ cfg.StrOpt('password',
+ default='pass',
+ help="Password for user",
+ secret=True),
+ cfg.StrOpt('tenant_name',
+ default="demo",
+ help="Tenant to use for Images API requests. Defaults to "
+ "'demo'."),
+]
- @property
- def password(self):
- """API key to use for Identity Admin API requests"""
- return self.get("password", "pass")
+def register_image_opts(conf):
+ conf.register_group(image_group)
+ for opt in ImageGroup:
+ conf.register_opt(opt, group='image')
-class ComputeConfig(BaseConfig):
- SECTION_NAME = "compute"
+network_group = cfg.OptGroup(name='network',
+ title='Network Service Options')
- @property
- def allow_tenant_isolation(self):
- """
- Allows test cases to create/destroy tenants and users. This option
- enables isolated test cases and better parallel execution,
- but also requires that OpenStack Identity API admin credentials
- are known.
- """
- return self.get("allow_tenant_isolation", 'false').lower() != 'false'
+NetworkGroup = [
+ cfg.StrOpt('catalog_type',
+ default='network',
+ help='Catalog type of the Quantum service.'),
+ cfg.StrOpt('api_version',
+ default="v1.1",
+ help="Version of Quantum API"),
+ cfg.StrOpt('username',
+ default="demo",
+ help="Username to use for Quantum API requests."),
+ cfg.StrOpt('tenant_name',
+ default="demo",
+ help="Tenant name to use for Quantum API requests."),
+ cfg.StrOpt('password',
+ default="pass",
+ help="API key to use when authenticating as admin.",
+ secret=True),
+ cfg.StrOpt('tenant_network_cidr',
+ default="10.100.0.0/16",
+ help="The cidr block to allocate tenant networks from"),
+ cfg.IntOpt('tenant_network_mask_bits',
+ default=29,
+ help="The mask bits for tenant networks"),
+ cfg.BoolOpt('tenant_networks_reachable',
+ default=False,
+ help="Whether tenant network connectivity should be "
+ "evaluated directly"),
+ cfg.StrOpt('public_network_id',
+ default="",
+ help="Id of the public network that provides external "
+ "connectivity"),
+ cfg.StrOpt('public_router_id',
+ default="",
+ help="Id of the public router that provides external "
+ "connectivity"),
+]
- @property
- def allow_tenant_reuse(self):
- """
- If allow_tenant_isolation is True and a tenant that would be created
- for a given test already exists (such as from a previously-failed run),
- re-use that tenant instead of failing because of the conflict. Note
- that this would result in the tenant being deleted at the end of a
- subsequent successful run.
- """
- return self.get("allow_tenant_reuse", 'true').lower() != 'false'
- @property
- def username(self):
- """Username to use for Nova API requests."""
- return self.get("username", "demo")
+def register_network_opts(conf):
+ conf.register_group(network_group)
+ for opt in NetworkGroup:
+ conf.register_opt(opt, group='network')
- @property
- def tenant_name(self):
- """Tenant name to use for Nova API requests."""
- return self.get("tenant_name", "demo")
+network_admin_group = cfg.OptGroup(name='network-admin',
+ title="Network Admin Options")
- @property
- def password(self):
- """API key to use when authenticating."""
- return self.get("password", "pass")
+NetworkAdminGroup = [
+ cfg.StrOpt('username',
+ default='admin',
+ help="Administrative Username to use for Quantum API "
+ "requests."),
+ cfg.StrOpt('tenant_name',
+ default='admin',
+ help="Administrative Tenant name to use for Quantum API "
+ "requests."),
+ cfg.StrOpt('password',
+ default='pass',
+ help="API key to use when authenticating as admin.",
+ secret=True),
+]
- @property
- def alt_username(self):
- """Username of alternate user to use for Nova API requests."""
- return self.get("alt_username")
- @property
- def alt_tenant_name(self):
- """Alternate user's Tenant name to use for Nova API requests."""
- return self.get("alt_tenant_name")
-
- @property
- def alt_password(self):
- """API key to use when authenticating as alternate user."""
- return self.get("alt_password")
-
- @property
- def image_ref(self):
- """Valid primary image to use in tests."""
- return self.get("image_ref", "{$IMAGE_ID}")
-
- @property
- def image_ref_alt(self):
- """Valid secondary image reference to be used in tests."""
- return self.get("image_ref_alt", "{$IMAGE_ID_ALT}")
-
- @property
- def flavor_ref(self):
- """Valid primary flavor to use in tests."""
- return self.get("flavor_ref", 1)
-
- @property
- def flavor_ref_alt(self):
- """Valid secondary flavor to be used in tests."""
- return self.get("flavor_ref_alt", 2)
-
- @property
- def resize_available(self):
- """Does the test environment support resizing?"""
- return self.get("resize_available", 'false').lower() != 'false'
-
- @property
- def live_migration_available(self):
- return self.get(
- "live_migration_available", 'false').lower() == 'true'
-
- @property
- def use_block_migration_for_live_migration(self):
- return self.get(
- "use_block_migration_for_live_migration", 'false'
- ).lower() == 'true'
-
- @property
- def change_password_available(self):
- """Does the test environment support changing the admin password?"""
- return self.get("change_password_available", 'false').lower() != \
- 'false'
-
- @property
- def create_image_enabled(self):
- """Does the test environment support snapshots?"""
- return self.get("create_image_enabled", 'false').lower() != 'false'
-
- @property
- def build_interval(self):
- """Time in seconds between build status checks."""
- return float(self.get("build_interval", 10))
-
- @property
- def build_timeout(self):
- """Timeout in seconds to wait for an instance to build."""
- return float(self.get("build_timeout", 300))
-
- @property
- def run_ssh(self):
- """Does the test environment support snapshots?"""
- return self.get("run_ssh", 'false').lower() != 'false'
-
- @property
- def ssh_user(self):
- """User name used to authenticate to an instance."""
- return self.get("ssh_user", "root")
-
- @property
- def ssh_timeout(self):
- """Timeout in seconds to wait for authentcation to succeed."""
- return float(self.get("ssh_timeout", 300))
-
- @property
- def network_for_ssh(self):
- """Network used for SSH connections."""
- return self.get("network_for_ssh", "public")
-
- @property
- def ip_version_for_ssh(self):
- """IP version used for SSH connections."""
- return int(self.get("ip_version_for_ssh", 4))
-
- @property
- def catalog_type(self):
- """Catalog type of the Compute service."""
- return self.get("catalog_type", 'compute')
-
- @property
- def log_level(self):
- """Level for logging compute API calls."""
- return self.get("log_level", 'ERROR')
-
- @property
- def whitebox_enabled(self):
- """Does the test environment support whitebox tests for Compute?"""
- return self.get("whitebox_enabled", 'false').lower() != 'false'
-
- @property
- def db_uri(self):
- """Connection string to the database of Compute service"""
- return self.get("db_uri", None)
-
- @property
- def source_dir(self):
- """Path of nova source directory"""
- return self.get("source_dir", "/opt/stack/nova")
-
- @property
- def config_path(self):
- """Path of nova configuration file"""
- return self.get("config_path", "/etc/nova/nova.conf")
-
- @property
- def bin_dir(self):
- """Directory containing nova binaries such as nova-manage"""
- return self.get("bin_dir", "/usr/local/bin/")
-
- @property
- def path_to_private_key(self):
- """Path to a private key file for SSH access to remote hosts"""
- return self.get("path_to_private_key")
-
-
-class ComputeAdminConfig(BaseConfig):
-
- SECTION_NAME = "compute-admin"
-
- @property
- def username(self):
- """Administrative Username to use for Nova API requests."""
- return self.get("username", "admin")
-
- @property
- def tenant_name(self):
- """Administrative Tenant name to use for Nova API requests."""
- return self.get("tenant_name", "admin")
-
- @property
- def password(self):
- """API key to use when authenticating as admin."""
- return self.get("password", "pass")
-
-
-class ImagesConfig(BaseConfig):
-
- """
- Provides configuration information for connecting to an
- OpenStack Images service.
- """
-
- SECTION_NAME = "image"
-
- @property
- def host(self):
- """Host IP for making Images API requests. Defaults to '127.0.0.1'."""
- return self.get("host", "127.0.0.1")
-
- @property
- def port(self):
- """Listen port of the Images service."""
- return int(self.get("port", "9292"))
-
- @property
- def api_version(self):
- """Version of the API"""
- return self.get("api_version", "1")
-
- @property
- def username(self):
- """Username to use for Images API requests. Defaults to 'demo'."""
- return self.get("username", "demo")
-
- @property
- def password(self):
- """Password for user"""
- return self.get("password", "pass")
-
- @property
- def tenant_name(self):
- """Tenant to use for Images API requests. Defaults to 'demo'."""
- return self.get("tenant_name", "demo")
-
-
-class NetworkConfig(BaseConfig):
- """Provides configuration information for connecting to an OpenStack
- Network Service.
- """
-
- SECTION_NAME = "network"
-
- @property
- def catalog_type(self):
- """Catalog type of the Quantum service."""
- return self.get("catalog_type", 'network')
-
- @property
- def api_version(self):
- """Version of Quantum API"""
- return self.get("api_version", "v1.1")
-
-
-class VolumeConfig(BaseConfig):
- """Provides configuration information for connecting to an OpenStack Block
- Storage Service.
- """
-
- SECTION_NAME = "volume"
-
- @property
- def build_interval(self):
- """Time in seconds between volume availability checks."""
- return float(self.get("build_interval", 10))
-
- @property
- def build_timeout(self):
- """Timeout in seconds to wait for a volume to become available."""
- return float(self.get("build_timeout", 300))
-
- @property
- def catalog_type(self):
- """Catalog type of the Volume Service"""
- return self.get("catalog_type", 'volume')
-
-
-class ObjectStorageConfig(BaseConfig):
-
- SECTION_NAME = "object-storage"
-
- @property
- def username(self):
- """Username to use for Object-Storage API requests."""
- return self.get("username", "admin")
-
- @property
- def tenant_name(self):
- """Tenant name to use for Object-Storage API requests."""
- return self.get("tenant_name", "admin")
-
- @property
- def password(self):
- """API key to use when authenticating."""
- return self.get("password", "password")
-
- @property
- def catalog_type(self):
- """Catalog type of the Object-Storage service."""
- return self.get("catalog_type", 'object-store')
-
-
-class BotoConfig(BaseConfig):
- """Provides configuration information for connecting to EC2/S3."""
- SECTION_NAME = "boto"
+def register_network_admin_opts(conf):
+ conf.register_group(network_admin_group)
+ for opt in NetworkAdminGroup:
+ conf.register_opt(opt, group='network-admin')
- @property
- def ec2_url(self):
- """EC2 URL"""
- return self.get("ec2_url", "http://localhost:8773/services/Cloud")
- @property
- def s3_url(self):
- """S3 URL"""
- return self.get("s3_url", "http://localhost:8080")
+volume_group = cfg.OptGroup(name='volume',
+ title='Block Storage Options')
- @property
- def aws_secret(self):
- """AWS Secret Key"""
- return self.get("aws_secret")
+VolumeGroup = [
+ cfg.IntOpt('build_interval',
+ default=10,
+ help='Time in seconds between volume availability checks.'),
+ cfg.IntOpt('build_timeout',
+ default=300,
+ help='Timeout in seconds to wait for a volume to become'
+ 'available.'),
+ cfg.StrOpt('catalog_type',
+ default='Volume',
+ help="Catalog type of the Volume Service"),
+]
- @property
- def aws_access(self):
- """AWS Access Key"""
- return self.get("aws_access")
- @property
- def aws_region(self):
- """AWS Region"""
- return self.get("aws_region", "RegionOne")
+def register_volume_opts(conf):
+ conf.register_group(volume_group)
+ for opt in VolumeGroup:
+ conf.register_opt(opt, group='volume')
- @property
- def s3_materials_path(self):
- return self.get("s3_materials_path",
- "/opt/stack/devstack/files/images/"
- "s3-materials/cirros-0.3.0")
- @property
- def ari_manifest(self):
- """ARI Ramdisk Image manifest"""
- return self.get("ari_manifest",
- "cirros-0.3.0-x86_64-initrd.manifest.xml")
+object_storage_group = cfg.OptGroup(name='object-storage',
+ title='Object Storage Service Options')
- @property
- def ami_manifest(self):
- """AMI Machine Image manifest"""
- return self.get("ami_manifest",
- "cirros-0.3.0-x86_64-blank.img.manifest.xml")
+ObjectStoreConfig = [
+ cfg.StrOpt('catalog_type',
+ default='object-store',
+ help="Catalog type of the Object-Storage service."),
+ cfg.StrOpt('region',
+ default=None,
+ help='The object-store region name to use.'),
+]
- @property
- def aki_manifest(self):
- """AKI Kernel Image manifest"""
- return self.get("aki_manifest",
- "cirros-0.3.0-x86_64-vmlinuz.manifest.xml")
- @property
- def instance_type(self):
- """Instance type"""
- return self.get("Instance type", "m1.tiny")
+def register_object_storage_opts(conf):
+ conf.register_group(object_storage_group)
+ for opt in ObjectStoreConfig:
+ conf.register_opt(opt, group='object-storage')
- @property
- def http_socket_timeout(self):
- """boto Http socket timeout"""
- return self.get("http_socket_timeout", "3")
+boto_group = cfg.OptGroup(name='boto',
+ title='EC2/S3 options')
+BotoConfig = [
+ cfg.StrOpt('ec2_url',
+ default="http://localhost:8773/services/Cloud",
+ help="EC2 URL"),
+ cfg.StrOpt('s3_url',
+ default="http://localhost:8080",
+ help="S3 URL"),
+ cfg.StrOpt('aws_secret',
+ default=None,
+ help="AWS Secret Key",
+ secret=True),
+ cfg.StrOpt('aws_access',
+ default=None,
+ help="AWS Access Key"),
+ cfg.StrOpt('aws_region',
+ default=None,
+ help="AWS Region"),
+ cfg.StrOpt('s3_materials_path',
+ default="/opt/stack/devstack/files/images/"
+ "s3-materials/cirros-0.3.0",
+ help="S3 Materials Path"),
+ cfg.StrOpt('ari_manifest',
+ default="cirros-0.3.0-x86_64-initrd.manifest.xml",
+ help="ARI Ramdisk Image manifest"),
+ cfg.StrOpt('ami_manifest',
+ default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
+ help="AMI Machine Image manifest"),
+ cfg.StrOpt('aki_manifest',
+ default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
+ help="AKI Kernel Image manifest"),
+ cfg.StrOpt('instance_type',
+ default="m1.tiny",
+ help="Instance type"),
+ cfg.IntOpt('http_socket_timeout',
+ default=3,
+ help="boto Http socket timeout"),
+ cfg.IntOpt('num_retries',
+ default=1,
+ help="boto num_retries on error"),
+ cfg.IntOpt('build_timeout',
+ default=60,
+ help="Status Change Timeout"),
+ cfg.IntOpt('build_interval',
+ default=1,
+ help="Status Change Test Interval"),
+]
- @property
- def build_timeout(self):
- """status change timeout"""
- return float(self.get("build_timeout", "60"))
- @property
- def build_interval(self):
- """status change test interval"""
- return float(self.get("build_interval", 1))
+def register_boto_opts(conf):
+ conf.register_group(boto_group)
+ for opt in BotoConfig:
+ conf.register_opt(opt, group='boto')
# TODO(jaypipes): Move this to a common utils (not data_utils...)
def singleton(cls):
- """Simple wrapper for classes that should only have a single instance"""
+ """Simple wrapper for classes that should only have a single instance."""
instances = {}
def getinstance():
@@ -511,9 +473,7 @@
"""Provides OpenStack configuration information."""
DEFAULT_CONFIG_DIR = os.path.join(
- os.path.abspath(
- os.path.dirname(
- os.path.dirname(__file__))),
+ os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"etc")
DEFAULT_CONFIG_FILE = "tempest.conf"
@@ -528,25 +488,37 @@
path = os.path.join(conf_dir, conf_file)
+ if (not os.path.isfile(path) and
+ not 'TEMPEST_CONFIG_DIR' in os.environ and
+ not 'TEMPEST_CONFIG' in os.environ):
+ path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
+
LOG.info("Using tempest config file %s" % path)
if not os.path.exists(path):
msg = "Config file %(path)s not found" % locals()
- raise RuntimeError(msg)
+ print >> sys.stderr, RuntimeError(msg)
+ sys.exit(os.EX_NOINPUT)
- self._conf = self.load_config(path)
- self.compute = ComputeConfig(self._conf)
- self.compute_admin = ComputeAdminConfig(self._conf)
- self.identity = IdentityConfig(self._conf)
- self.identity_admin = IdentityAdminConfig(self._conf)
- self.images = ImagesConfig(self._conf)
- self.network = NetworkConfig(self._conf)
- self.volume = VolumeConfig(self._conf)
- self.object_storage = ObjectStorageConfig(self._conf)
- self.boto = BotoConfig(self._conf)
+ cfg.CONF([], project='tempest', default_config_files=[path])
- def load_config(self, path):
- """Read configuration from given path and return a config object."""
- config = ConfigParser.SafeConfigParser()
- config.read(path)
- return config
+ register_compute_opts(cfg.CONF)
+ register_identity_opts(cfg.CONF)
+ register_identity_admin_opts(cfg.CONF)
+ register_compute_admin_opts(cfg.CONF)
+ register_image_opts(cfg.CONF)
+ register_network_opts(cfg.CONF)
+ register_network_admin_opts(cfg.CONF)
+ register_volume_opts(cfg.CONF)
+ register_object_storage_opts(cfg.CONF)
+ register_boto_opts(cfg.CONF)
+ self.compute = cfg.CONF.compute
+ self.compute_admin = cfg.CONF['compute-admin']
+ self.identity = cfg.CONF.identity
+ self.identity_admin = cfg.CONF['identity-admin']
+ self.images = cfg.CONF.image
+ self.network = cfg.CONF.network
+ self.network_admin = cfg.CONF['network-admin']
+ self.volume = cfg.CONF.volume
+ self.object_storage = cfg.CONF['object-storage']
+ self.boto = cfg.CONF.boto
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 016de69..178c2f2 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -66,11 +66,11 @@
class AddImageException(TempestException):
- message = "Image %(image_id) failed to become ACTIVE in the allotted time"
+ message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
class EC2RegisterImageException(TempestException):
- message = ("Image %(image_id) failed to become 'available' "
+ message = ("Image %(image_id)s failed to become 'available' "
"in the allotted time")
@@ -118,7 +118,7 @@
class SSHExecCommandFailed(TempestException):
- ''' Raised when remotely executed command returns nonzero status. '''
+ """Raised when remotely executed command returns nonzero status."""
message = ("Command '%(command)s', exit status: %(exit_status)d, "
"Error:\n%(strerror)s")
@@ -133,3 +133,17 @@
class TearDownException(TempestException):
message = "%(num)d cleanUp operation failed"
+
+
+class RFCViolation(TempestException):
+ message = "RFC Violation"
+
+
+class ResponseWithNonEmptyBody(RFCViolation):
+ message = ("RFC Violation! Response with %(status)d HTTP Status Code "
+ "MUST NOT have a body")
+
+
+class ResponseWithEntity(RFCViolation):
+ message = ("RFC Violation! Response with 205 HTTP Status Code "
+ "MUST NOT have an entity")
diff --git a/tempest/manager.py b/tempest/manager.py
index 59743e5..513e5d9 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -29,19 +29,19 @@
import tempest.config
from tempest import exceptions
# Tempest REST Fuzz testing client libs
+from tempest.services.compute.json import console_output_client
+from tempest.services.compute.json import extensions_client
+from tempest.services.compute.json import flavors_client
+from tempest.services.compute.json import floating_ips_client
+from tempest.services.compute.json import images_client
+from tempest.services.compute.json import keypairs_client
+from tempest.services.compute.json import limits_client
+from tempest.services.compute.json import quotas_client
+from tempest.services.compute.json import security_groups_client
+from tempest.services.compute.json import servers_client
+from tempest.services.compute.json import volumes_extensions_client
from tempest.services.network.json import network_client
from tempest.services.volume.json import volumes_client
-from tempest.services.compute.json import images_client
-from tempest.services.compute.json import flavors_client
-from tempest.services.compute.json import servers_client
-from tempest.services.compute.json import limits_client
-from tempest.services.compute.json import extensions_client
-from tempest.services.compute.json import security_groups_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import keypairs_client
-from tempest.services.compute.json import volumes_extensions_client
-from tempest.services.compute.json import console_output_client
-from tempest.services.compute.json import quotas_client
NetworkClient = network_client.NetworkClient
ImagesClient = images_client.ImagesClientJSON
@@ -54,7 +54,7 @@
KeyPairsClient = keypairs_client.KeyPairsClientJSON
VolumesExtensionsClient = volumes_extensions_client.VolumesExtensionsClientJSON
VolumesClient = volumes_client.VolumesClientJSON
-ConsoleOutputsClient = console_output_client.ConsoleOutputsClient
+ConsoleOutputsClient = console_output_client.ConsoleOutputsClientJSON
QuotasClient = quotas_client.QuotasClient
LOG = logging.getLogger(__name__)
@@ -145,12 +145,16 @@
endpoint_type='publicURL')
return glanceclient.Client('1', endpoint=endpoint, token=token)
- def _get_identity_client(self):
+ def _get_identity_client(self, username=None, password=None,
+ tenant_name=None):
# This identity client is not intended to check the security
- # of the identity service, so use admin credentials.
- username = self.config.identity_admin.username
- password = self.config.identity_admin.password
- tenant_name = self.config.identity_admin.tenant_name
+ # of the identity service, so use admin credentials by default.
+ if not username:
+ username = self.config.identity_admin.username
+ if not password:
+ password = self.config.identity_admin.password
+ if not tenant_name:
+ tenant_name = self.config.identity_admin.tenant_name
if None in (username, password, tenant_name):
msg = ("Missing required credentials for identity client. "
@@ -166,10 +170,15 @@
auth_url=auth_url)
def _get_network_client(self):
- # TODO(mnewby) add network-specific auth configuration
- username = self.config.compute.username
- password = self.config.compute.password
- tenant_name = self.config.compute.tenant_name
+ # The intended configuration is for the network client to have
+ # admin privileges and indicate for whom resources are being
+ # created via a 'tenant_id' parameter. This will often be
+ # preferable to authenticating as a specific user because
+ # working with certain resources (public routers and networks)
+ # often requires admin privileges anyway.
+ username = self.config.network_admin.username
+ password = self.config.network_admin.password
+ tenant_name = self.config.network_admin.tenant_name
if None in (username, password, tenant_name):
msg = ("Missing required credentials for network client. "
@@ -249,9 +258,9 @@
def __init__(self):
conf = tempest.config.TempestConfig()
super(ComputeFuzzClientAltManager, self).__init__(
- conf.compute.alt_username,
- conf.compute.alt_password,
- conf.compute.alt_tenant_name)
+ conf.compute.alt_username,
+ conf.compute.alt_password,
+ conf.compute.alt_tenant_name)
class ComputeFuzzClientAdminManager(Manager):
@@ -264,6 +273,6 @@
def __init__(self):
conf = tempest.config.TempestConfig()
super(ComputeFuzzClientAdminManager, self).__init__(
- conf.compute_admin.username,
- conf.compute_admin.password,
- conf.compute_admin.tenant_name)
+ conf.compute_admin.username,
+ conf.compute_admin.password,
+ conf.compute_admin.tenant_name)
diff --git a/tempest/openstack/__init__.py b/tempest/openstack/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/openstack/__init__.py
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/openstack/common/__init__.py
diff --git a/tempest/openstack/common/cfg.py b/tempest/openstack/common/cfg.py
new file mode 100644
index 0000000..1bbfe6a
--- /dev/null
+++ b/tempest/openstack/common/cfg.py
@@ -0,0 +1,1787 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+r"""
+Configuration options which may be set on the command line or in config files.
+
+The schema for each option is defined using the Opt sub-classes, e.g.:
+
+::
+
+ common_opts = [
+ cfg.StrOpt('bind_host',
+ default='0.0.0.0',
+ help='IP address to listen on'),
+ cfg.IntOpt('bind_port',
+ default=9292,
+ help='Port number to listen on')
+ ]
+
+Options can be strings, integers, floats, booleans, lists or 'multi strings'::
+
+ enabled_apis_opt = cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute'],
+ help='List of APIs to enable by default')
+
+ DEFAULT_EXTENSIONS = [
+ 'nova.api.openstack.compute.contrib.standard_extensions'
+ ]
+ osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension',
+ default=DEFAULT_EXTENSIONS)
+
+Option schemas are registered with the config manager at runtime, but before
+the option is referenced::
+
+ class ExtensionManager(object):
+
+ enabled_apis_opt = cfg.ListOpt(...)
+
+ def __init__(self, conf):
+ self.conf = conf
+ self.conf.register_opt(enabled_apis_opt)
+ ...
+
+ def _load_extensions(self):
+ for ext_factory in self.conf.osapi_compute_extension:
+ ....
+
+A common usage pattern is for each option schema to be defined in the module or
+class which uses the option::
+
+ opts = ...
+
+ def add_common_opts(conf):
+ conf.register_opts(opts)
+
+ def get_bind_host(conf):
+ return conf.bind_host
+
+ def get_bind_port(conf):
+ return conf.bind_port
+
+An option may optionally be made available via the command line. Such options
+must registered with the config manager before the command line is parsed (for
+the purposes of --help and CLI arg validation)::
+
+ cli_opts = [
+ cfg.BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output'),
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output'),
+ ]
+
+ def add_common_opts(conf):
+ conf.register_cli_opts(cli_opts)
+
+The config manager has two CLI options defined by default, --config-file
+and --config-dir::
+
+ class ConfigOpts(object):
+
+ def __call__(self, ...):
+
+ opts = [
+ MultiStrOpt('config-file',
+ ...),
+ StrOpt('config-dir',
+ ...),
+ ]
+
+ self.register_cli_opts(opts)
+
+Option values are parsed from any supplied config files using
+openstack.common.iniparser. If none are specified, a default set is used
+e.g. glance-api.conf and glance-common.conf::
+
+ glance-api.conf:
+ [DEFAULT]
+ bind_port = 9292
+
+ glance-common.conf:
+ [DEFAULT]
+ bind_host = 0.0.0.0
+
+Option values in config files override those on the command line. Config files
+are parsed in order, with values in later files overriding those in earlier
+files.
+
+The parsing of CLI args and config files is initiated by invoking the config
+manager e.g.::
+
+ conf = ConfigOpts()
+ conf.register_opt(BoolOpt('verbose', ...))
+ conf(sys.argv[1:])
+ if conf.verbose:
+ ...
+
+Options can be registered as belonging to a group::
+
+ rabbit_group = cfg.OptGroup(name='rabbit',
+ title='RabbitMQ options')
+
+ rabbit_host_opt = cfg.StrOpt('host',
+ default='localhost',
+ help='IP/hostname to listen on'),
+ rabbit_port_opt = cfg.IntOpt('port',
+ default=5672,
+ help='Port number to listen on')
+
+ def register_rabbit_opts(conf):
+ conf.register_group(rabbit_group)
+ # options can be registered under a group in either of these ways:
+ conf.register_opt(rabbit_host_opt, group=rabbit_group)
+ conf.register_opt(rabbit_port_opt, group='rabbit')
+
+If it no group attributes are required other than the group name, the group
+need not be explicitly registered e.g.
+
+ def register_rabbit_opts(conf):
+ # The group will automatically be created, equivalent calling::
+ # conf.register_group(OptGroup(name='rabbit'))
+ conf.register_opt(rabbit_port_opt, group='rabbit')
+
+If no group is specified, options belong to the 'DEFAULT' section of config
+files::
+
+ glance-api.conf:
+ [DEFAULT]
+ bind_port = 9292
+ ...
+
+ [rabbit]
+ host = localhost
+ port = 5672
+ use_ssl = False
+ userid = guest
+ password = guest
+ virtual_host = /
+
+Command-line options in a group are automatically prefixed with the
+group name::
+
+ --rabbit-host localhost --rabbit-port 9999
+
+Option values in the default group are referenced as attributes/properties on
+the config manager; groups are also attributes on the config manager, with
+attributes for each of the options associated with the group::
+
+ server.start(app, conf.bind_port, conf.bind_host, conf)
+
+ self.connection = kombu.connection.BrokerConnection(
+ hostname=conf.rabbit.host,
+ port=conf.rabbit.port,
+ ...)
+
+Option values may reference other values using PEP 292 string substitution::
+
+ opts = [
+ cfg.StrOpt('state_path',
+ default=os.path.join(os.path.dirname(__file__), '../'),
+ help='Top-level directory for maintaining nova state'),
+ cfg.StrOpt('sqlite_db',
+ default='nova.sqlite',
+ help='file name for sqlite'),
+ cfg.StrOpt('sql_connection',
+ default='sqlite:///$state_path/$sqlite_db',
+ help='connection string for sql database'),
+ ]
+
+Note that interpolation can be avoided by using '$$'.
+
+Options may be declared as required so that an error is raised if the user
+does not supply a value for the option.
+
+Options may be declared as secret so that their values are not leaked into
+log files::
+
+ opts = [
+ cfg.StrOpt('s3_store_access_key', secret=True),
+ cfg.StrOpt('s3_store_secret_key', secret=True),
+ ...
+ ]
+
+This module also contains a global instance of the CommonConfigOpts class
+in order to support a common usage pattern in OpenStack::
+
+ from tempest.openstack.common import cfg
+
+ opts = [
+ cfg.StrOpt('bind_host', default='0.0.0.0'),
+ cfg.IntOpt('bind_port', default=9292),
+ ]
+
+ CONF = cfg.CONF
+ CONF.register_opts(opts)
+
+ def start(server, app):
+ server.start(app, CONF.bind_port, CONF.bind_host)
+
+Positional command line arguments are supported via a 'positional' Opt
+constructor argument::
+
+ >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True))
+ True
+ >>> CONF(['a', 'b'])
+ >>> CONF.bar
+ ['a', 'b']
+
+It is also possible to use argparse "sub-parsers" to parse additional
+command line arguments using the SubCommandOpt class:
+
+ >>> def add_parsers(subparsers):
+ ... list_action = subparsers.add_parser('list')
+ ... list_action.add_argument('id')
+ ...
+ >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
+ True
+ >>> CONF(['list', '10'])
+ >>> CONF.action.name, CONF.action.id
+ ('list', '10')
+
+"""
+
+import argparse
+import collections
+import copy
+import functools
+import glob
+import os
+import string
+import sys
+
+from tempest.openstack.common import iniparser
+
+
+class Error(Exception):
+ """Base class for cfg exceptions."""
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class ArgsAlreadyParsedError(Error):
+ """Raised if a CLI opt is registered after parsing."""
+
+ def __str__(self):
+ ret = "arguments already parsed"
+ if self.msg:
+ ret += ": " + self.msg
+ return ret
+
+
+class NoSuchOptError(Error, AttributeError):
+ """Raised if an opt which doesn't exist is referenced."""
+
+ def __init__(self, opt_name, group=None):
+ self.opt_name = opt_name
+ self.group = group
+
+ def __str__(self):
+ if self.group is None:
+ return "no such option: %s" % self.opt_name
+ else:
+ return "no such option in group %s: %s" % (self.group.name,
+ self.opt_name)
+
+
+class NoSuchGroupError(Error):
+ """Raised if a group which doesn't exist is referenced."""
+
+ def __init__(self, group_name):
+ self.group_name = group_name
+
+ def __str__(self):
+ return "no such group: %s" % self.group_name
+
+
+class DuplicateOptError(Error):
+ """Raised if multiple opts with the same name are registered."""
+
+ def __init__(self, opt_name):
+ self.opt_name = opt_name
+
+ def __str__(self):
+ return "duplicate option: %s" % self.opt_name
+
+
+class RequiredOptError(Error):
+ """Raised if an option is required but no value is supplied by the user."""
+
+ def __init__(self, opt_name, group=None):
+ self.opt_name = opt_name
+ self.group = group
+
+ def __str__(self):
+ if self.group is None:
+ return "value required for option: %s" % self.opt_name
+ else:
+ return "value required for option: %s.%s" % (self.group.name,
+ self.opt_name)
+
+
+class TemplateSubstitutionError(Error):
+ """Raised if an error occurs substituting a variable in an opt value."""
+
+ def __str__(self):
+ return "template substitution error: %s" % self.msg
+
+
+class ConfigFilesNotFoundError(Error):
+ """Raised if one or more config files are not found."""
+
+ def __init__(self, config_files):
+ self.config_files = config_files
+
+ def __str__(self):
+ return ('Failed to read some config files: %s' %
+ string.join(self.config_files, ','))
+
+
+class ConfigFileParseError(Error):
+ """Raised if there is an error parsing a config file."""
+
+ def __init__(self, config_file, msg):
+ self.config_file = config_file
+ self.msg = msg
+
+ def __str__(self):
+ return 'Failed to parse %s: %s' % (self.config_file, self.msg)
+
+
+class ConfigFileValueError(Error):
+ """Raised if a config file value does not match its opt type."""
+ pass
+
+
+def _fixpath(p):
+ """Apply tilde expansion and absolutization to a path."""
+ return os.path.abspath(os.path.expanduser(p))
+
+
+def _get_config_dirs(project=None):
+ """Return a list of directors where config files may be located.
+
+ :param project: an optional project name
+
+ If a project is specified, following directories are returned::
+
+ ~/.${project}/
+ ~/
+ /etc/${project}/
+ /etc/
+
+ Otherwise, these directories::
+
+ ~/
+ /etc/
+ """
+ cfg_dirs = [
+ _fixpath(os.path.join('~', '.' + project)) if project else None,
+ _fixpath('~'),
+ os.path.join('/etc', project) if project else None,
+ '/etc'
+ ]
+
+ return filter(bool, cfg_dirs)
+
+
+def _search_dirs(dirs, basename, extension=""):
+ """Search a list of directories for a given filename.
+
+ Iterator over the supplied directories, returning the first file
+ found with the supplied name and extension.
+
+ :param dirs: a list of directories
+ :param basename: the filename, e.g. 'glance-api'
+ :param extension: the file extension, e.g. '.conf'
+ :returns: the path to a matching file, or None
+ """
+ for d in dirs:
+ path = os.path.join(d, '%s%s' % (basename, extension))
+ if os.path.exists(path):
+ return path
+
+
+def find_config_files(project=None, prog=None, extension='.conf'):
+ """Return a list of default configuration files.
+
+ :param project: an optional project name
+ :param prog: the program name, defaulting to the basename of sys.argv[0]
+ :param extension: the type of the config file
+
+ We default to two config files: [${project}.conf, ${prog}.conf]
+
+ And we look for those config files in the following directories::
+
+ ~/.${project}/
+ ~/
+ /etc/${project}/
+ /etc/
+
+ We return an absolute path for (at most) one of each the default config
+ files, for the topmost directory it exists in.
+
+ For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf
+ and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf',
+ '~/.foo/bar.conf']
+
+ If no project name is supplied, we only look for ${prog.conf}.
+ """
+ if prog is None:
+ prog = os.path.basename(sys.argv[0])
+
+ cfg_dirs = _get_config_dirs(project)
+
+ config_files = []
+ if project:
+ config_files.append(_search_dirs(cfg_dirs, project, extension))
+ config_files.append(_search_dirs(cfg_dirs, prog, extension))
+
+ return filter(bool, config_files)
+
+
+def _is_opt_registered(opts, opt):
+ """Check whether an opt with the same name is already registered.
+
+ The same opt may be registered multiple times, with only the first
+ registration having any effect. However, it is an error to attempt
+ to register a different opt with the same name.
+
+ :param opts: the set of opts already registered
+ :param opt: the opt to be registered
+ :returns: True if the opt was previously registered, False otherwise
+ :raises: DuplicateOptError if a naming conflict is detected
+ """
+ if opt.dest in opts:
+ if opts[opt.dest]['opt'] != opt:
+ raise DuplicateOptError(opt.name)
+ return True
+ else:
+ return False
+
+
+def set_defaults(opts, **kwargs):
+ for opt in opts:
+ if opt.dest in kwargs:
+ opt.default = kwargs[opt.dest]
+ break
+
+
+class Opt(object):
+
+ """Base class for all configuration options.
+
+ An Opt object has no public methods, but has a number of public string
+ properties:
+
+ name:
+ the name of the option, which may include hyphens
+ dest:
+ the (hyphen-less) ConfigOpts property which contains the option value
+ short:
+ a single character CLI option name
+ default:
+ the default value of the option
+ positional:
+ True if the option is a positional CLI argument
+ metavar:
+ the name shown as the argument to a CLI option in --help output
+ help:
+ an string explaining how the options value is used
+ """
+ multi = False
+
+ def __init__(self, name, dest=None, short=None, default=None,
+ positional=False, metavar=None, help=None,
+ secret=False, required=False, deprecated_name=None):
+ """Construct an Opt object.
+
+ The only required parameter is the option's name. However, it is
+ common to also supply a default and help string for all options.
+
+ :param name: the option's name
+ :param dest: the name of the corresponding ConfigOpts property
+ :param short: a single character CLI option name
+ :param default: the default value of the option
+ :param positional: True if the option is a positional CLI argument
+ :param metavar: the option argument to show in --help
+ :param help: an explanation of how the option is used
+ :param secret: true iff the value should be obfuscated in log output
+ :param required: true iff a value must be supplied for this option
+ :param deprecated_name: deprecated name option. Acts like an alias
+ """
+ self.name = name
+ if dest is None:
+ self.dest = self.name.replace('-', '_')
+ else:
+ self.dest = dest
+ self.short = short
+ self.default = default
+ self.positional = positional
+ self.metavar = metavar
+ self.help = help
+ self.secret = secret
+ self.required = required
+ if deprecated_name is not None:
+ self.deprecated_name = deprecated_name.replace('-', '_')
+ else:
+ self.deprecated_name = None
+
+ def __ne__(self, another):
+ return vars(self) != vars(another)
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieves the option value from a MultiConfigParser object.
+
+ This is the method ConfigOpts uses to look up the option value from
+ config files. Most opt types override this method in order to perform
+ type appropriate conversion of the returned value.
+
+ :param cparser: a ConfigParser object
+ :param section: a section name
+ """
+ return self._cparser_get_with_deprecated(cparser, section)
+
+ def _cparser_get_with_deprecated(self, cparser, section):
+ """If cannot find option as dest try deprecated_name alias."""
+ if self.deprecated_name is not None:
+ return cparser.get(section, [self.dest, self.deprecated_name])
+ return cparser.get(section, [self.dest])
+
+ def _add_to_cli(self, parser, group=None):
+ """Makes the option available in the command line interface.
+
+ This is the method ConfigOpts uses to add the opt to the CLI interface
+ as appropriate for the opt type. Some opt types may extend this method,
+ others may just extend the helper methods it uses.
+
+ :param parser: the CLI option parser
+ :param group: an optional OptGroup object
+ """
+ container = self._get_argparse_container(parser, group)
+ kwargs = self._get_argparse_kwargs(group)
+ prefix = self._get_argparse_prefix('', group)
+ self._add_to_argparse(container, self.name, self.short, kwargs, prefix,
+ self.positional, self.deprecated_name)
+
+ def _add_to_argparse(self, container, name, short, kwargs, prefix='',
+ positional=False, deprecated_name=None):
+ """Add an option to an argparse parser or group.
+
+ :param container: an argparse._ArgumentGroup object
+ :param name: the opt name
+ :param short: the short opt name
+ :param kwargs: the keyword arguments for add_argument()
+ :param prefix: an optional prefix to prepend to the opt name
+ :param position: whether the optional is a positional CLI argument
+ :raises: DuplicateOptError if a naming confict is detected
+ """
+ def hyphen(arg):
+ return arg if not positional else ''
+
+ args = [hyphen('--') + prefix + name]
+ if short:
+ args.append(hyphen('-') + short)
+ if deprecated_name:
+ args.append(hyphen('--') + prefix + deprecated_name)
+
+ try:
+ container.add_argument(*args, **kwargs)
+ except argparse.ArgumentError as e:
+ raise DuplicateOptError(e)
+
+ def _get_argparse_container(self, parser, group):
+ """Returns an argparse._ArgumentGroup.
+
+ :param parser: an argparse.ArgumentParser
+ :param group: an (optional) OptGroup object
+ :returns: an argparse._ArgumentGroup if group is given, else parser
+ """
+ if group is not None:
+ return group._get_argparse_group(parser)
+ else:
+ return parser
+
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Build a dict of keyword arguments for argparse's add_argument().
+
+ Most opt types extend this method to customize the behaviour of the
+ options added to argparse.
+
+ :param group: an optional group
+ :param kwargs: optional keyword arguments to add to
+ :returns: a dict of keyword arguments
+ """
+ if not self.positional:
+ dest = self.dest
+ if group is not None:
+ dest = group.name + '_' + dest
+ kwargs['dest'] = dest
+ else:
+ kwargs['nargs'] = '?'
+ kwargs.update({'default': None,
+ 'metavar': self.metavar,
+ 'help': self.help, })
+ return kwargs
+
+ def _get_argparse_prefix(self, prefix, group):
+ """Build a prefix for the CLI option name, if required.
+
+ CLI options in a group are prefixed with the group's name in order
+ to avoid conflicts between similarly named options in different
+ groups.
+
+ :param prefix: an existing prefix to append to (e.g. 'no' or '')
+ :param group: an optional OptGroup object
+ :returns: a CLI option prefix including the group name, if appropriate
+ """
+ if group is not None:
+ return group.name + '-' + prefix
+ else:
+ return prefix
+
+
+class StrOpt(Opt):
+ """
+ String opts do not have their values transformed and are returned as
+ str objects.
+ """
+ pass
+
+
+class BoolOpt(Opt):
+
+ """
+ Bool opts are set to True or False on the command line using --optname or
+ --noopttname respectively.
+
+ In config files, boolean values are case insensitive and can be set using
+ 1/0, yes/no, true/false or on/off.
+ """
+
+ _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+ '0': False, 'no': False, 'false': False, 'off': False}
+
+ def __init__(self, *args, **kwargs):
+ if 'positional' in kwargs:
+ raise ValueError('positional boolean args not supported')
+ super(BoolOpt, self).__init__(*args, **kwargs)
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a boolean from ConfigParser."""
+ def convert_bool(v):
+ value = self._boolean_states.get(v.lower())
+ if value is None:
+ raise ValueError('Unexpected boolean value %r' % v)
+
+ return value
+
+ return [convert_bool(v) for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _add_to_cli(self, parser, group=None):
+ """Extends the base class method to add the --nooptname option."""
+ super(BoolOpt, self)._add_to_cli(parser, group)
+ self._add_inverse_to_argparse(parser, group)
+
+ def _add_inverse_to_argparse(self, parser, group):
+ """Add the --nooptname option to the option parser."""
+ container = self._get_argparse_container(parser, group)
+ kwargs = self._get_argparse_kwargs(group, action='store_false')
+ prefix = self._get_argparse_prefix('no', group)
+ kwargs["help"] = "The inverse of --" + self.name
+ self._add_to_argparse(container, self.name, None, kwargs, prefix,
+ self.positional, self.deprecated_name)
+
+ def _get_argparse_kwargs(self, group, action='store_true', **kwargs):
+ """Extends the base argparse keyword dict for boolean options."""
+
+ kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs)
+
+ # metavar has no effect for BoolOpt
+ if 'metavar' in kwargs:
+ del kwargs['metavar']
+
+ if action != 'store_true':
+ action = 'store_false'
+
+ kwargs['action'] = action
+
+ return kwargs
+
+
+class IntOpt(Opt):
+
+ """Int opt values are converted to integers using the int() builtin."""
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a integer from ConfigParser."""
+ return [int(v) for v in self._cparser_get_with_deprecated(cparser,
+ section)]
+
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for integer options."""
+ return super(IntOpt,
+ self)._get_argparse_kwargs(group, type=int, **kwargs)
+
+
+class FloatOpt(Opt):
+
+ """Float opt values are converted to floats using the float() builtin."""
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a float from ConfigParser."""
+ return [float(v) for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for float options."""
+ return super(FloatOpt, self)._get_argparse_kwargs(group,
+ type=float, **kwargs)
+
+
+class ListOpt(Opt):
+
+ """
+ List opt values are simple string values separated by commas. The opt value
+ is a list containing these strings.
+ """
+
+ class _StoreListAction(argparse.Action):
+ """
+ An argparse action for parsing an option value into a list.
+ """
+ def __call__(self, parser, namespace, values, option_string=None):
+ if values is not None:
+ values = [a.strip() for a in values.split(',')]
+ setattr(namespace, self.dest, values)
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a list from ConfigParser."""
+ return [[a.strip() for a in v.split(',')] for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for list options."""
+ return Opt._get_argparse_kwargs(self,
+ group,
+ action=ListOpt._StoreListAction,
+ **kwargs)
+
+
+class MultiStrOpt(Opt):
+
+ """
+ Multistr opt values are string opts which may be specified multiple times.
+ The opt value is a list containing all the string values specified.
+ """
+ multi = True
+
+ def _get_argparse_kwargs(self, group, **kwargs):
+ """Extends the base argparse keyword dict for multi str options."""
+ kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group)
+ if not self.positional:
+ kwargs['action'] = 'append'
+ else:
+ kwargs['nargs'] = '*'
+ return kwargs
+
+ def _cparser_get_with_deprecated(self, cparser, section):
+ """If cannot find option as dest try deprecated_name alias."""
+ if self.deprecated_name is not None:
+ return cparser.get(section, [self.dest, self.deprecated_name],
+ multi=True)
+ return cparser.get(section, [self.dest], multi=True)
+
+
+class SubCommandOpt(Opt):
+
+ """
+ Sub-command options allow argparse sub-parsers to be used to parse
+ additional command line arguments.
+
+ The handler argument to the SubCommandOpt contructor is a callable
+ which is supplied an argparse subparsers object. Use this handler
+ callable to add sub-parsers.
+
+ The opt value is SubCommandAttr object with the name of the chosen
+ sub-parser stored in the 'name' attribute and the values of other
+ sub-parser arguments available as additional attributes.
+ """
+
+ def __init__(self, name, dest=None, handler=None,
+ title=None, description=None, help=None):
+ """Construct an sub-command parsing option.
+
+ This behaves similarly to other Opt sub-classes but adds a
+ 'handler' argument. The handler is a callable which is supplied
+ an subparsers object when invoked. The add_parser() method on
+ this subparsers object can be used to register parsers for
+ sub-commands.
+
+ :param name: the option's name
+ :param dest: the name of the corresponding ConfigOpts property
+ :param title: title of the sub-commands group in help output
+ :param description: description of the group in help output
+ :param help: a help string giving an overview of available sub-commands
+ """
+ super(SubCommandOpt, self).__init__(name, dest=dest, help=help)
+ self.handler = handler
+ self.title = title
+ self.description = description
+
+ def _add_to_cli(self, parser, group=None):
+ """Add argparse sub-parsers and invoke the handler method."""
+ dest = self.dest
+ if group is not None:
+ dest = group.name + '_' + dest
+
+ subparsers = parser.add_subparsers(dest=dest,
+ title=self.title,
+ description=self.description,
+ help=self.help)
+
+ if not self.handler is None:
+ self.handler(subparsers)
+
+
+class OptGroup(object):
+
+ """
+ Represents a group of opts.
+
+ CLI opts in the group are automatically prefixed with the group name.
+
+ Each group corresponds to a section in config files.
+
+ An OptGroup object has no public methods, but has a number of public string
+ properties:
+
+ name:
+ the name of the group
+ title:
+ the group title as displayed in --help
+ help:
+ the group description as displayed in --help
+ """
+
+ def __init__(self, name, title=None, help=None):
+ """Constructs an OptGroup object.
+
+ :param name: the group name
+ :param title: the group title for --help
+ :param help: the group description for --help
+ """
+ self.name = name
+ if title is None:
+ self.title = "%s options" % title
+ else:
+ self.title = title
+ self.help = help
+
+ self._opts = {} # dict of dicts of (opt:, override:, default:)
+ self._argparse_group = None
+
+ def _register_opt(self, opt, cli=False):
+ """Add an opt to this group.
+
+ :param opt: an Opt object
+ :param cli: whether this is a CLI option
+ :returns: False if previously registered, True otherwise
+ :raises: DuplicateOptError if a naming conflict is detected
+ """
+ if _is_opt_registered(self._opts, opt):
+ return False
+
+ self._opts[opt.dest] = {'opt': opt, 'cli': cli}
+
+ return True
+
+ def _unregister_opt(self, opt):
+ """Remove an opt from this group.
+
+ :param opt: an Opt object
+ """
+ if opt.dest in self._opts:
+ del self._opts[opt.dest]
+
+ def _get_argparse_group(self, parser):
+ if self._argparse_group is None:
+ """Build an argparse._ArgumentGroup for this group."""
+ self._argparse_group = parser.add_argument_group(self.title,
+ self.help)
+ return self._argparse_group
+
+ def _clear(self):
+ """Clear this group's option parsing state."""
+ self._argparse_group = None
+
+
+class ParseError(iniparser.ParseError):
+ def __init__(self, msg, lineno, line, filename):
+ super(ParseError, self).__init__(msg, lineno, line)
+ self.filename = filename
+
+ def __str__(self):
+ return 'at %s:%d, %s: %r' % (self.filename, self.lineno,
+ self.msg, self.line)
+
+
+class ConfigParser(iniparser.BaseParser):
+ def __init__(self, filename, sections):
+ super(ConfigParser, self).__init__()
+ self.filename = filename
+ self.sections = sections
+ self.section = None
+
+ def parse(self):
+ with open(self.filename) as f:
+ return super(ConfigParser, self).parse(f)
+
+ def new_section(self, section):
+ self.section = section
+ self.sections.setdefault(self.section, {})
+
+ def assignment(self, key, value):
+ if not self.section:
+ raise self.error_no_section()
+
+ self.sections[self.section].setdefault(key, [])
+ self.sections[self.section][key].append('\n'.join(value))
+
+ def parse_exc(self, msg, lineno, line=None):
+ return ParseError(msg, lineno, line, self.filename)
+
+ def error_no_section(self):
+ return self.parse_exc('Section must be started before assignment',
+ self.lineno)
+
+
+class MultiConfigParser(object):
+ def __init__(self):
+ self.parsed = []
+
+ def read(self, config_files):
+ read_ok = []
+
+ for filename in config_files:
+ sections = {}
+ parser = ConfigParser(filename, sections)
+
+ try:
+ parser.parse()
+ except IOError:
+ continue
+ self.parsed.insert(0, sections)
+ read_ok.append(filename)
+
+ return read_ok
+
+ def get(self, section, names, multi=False):
+ rvalue = []
+ for sections in self.parsed:
+ if section not in sections:
+ continue
+ for name in names:
+ if name in sections[section]:
+ if multi:
+ rvalue = sections[section][name] + rvalue
+ else:
+ return sections[section][name]
+ if multi and rvalue != []:
+ return rvalue
+ raise KeyError
+
+
+class ConfigOpts(collections.Mapping):
+
+ """
+ Config options which may be set on the command line or in config files.
+
+ ConfigOpts is a configuration option manager with APIs for registering
+ option schemas, grouping options, parsing option values and retrieving
+ the values of options.
+ """
+
+ def __init__(self):
+ """Construct a ConfigOpts object."""
+ self._opts = {} # dict of dicts of (opt:, override:, default:)
+ self._groups = {}
+
+ self._args = None
+
+ self._oparser = None
+ self._cparser = None
+ self._cli_values = {}
+ self.__cache = {}
+ self._config_opts = []
+
+ def _pre_setup(self, project, prog, version, usage, default_config_files):
+ """Initialize a ConfigCliParser object for option parsing."""
+
+ if prog is None:
+ prog = os.path.basename(sys.argv[0])
+
+ if default_config_files is None:
+ default_config_files = find_config_files(project, prog)
+
+ self._oparser = argparse.ArgumentParser(prog=prog, usage=usage)
+ self._oparser.add_argument('--version',
+ action='version',
+ version=version)
+
+ return prog, default_config_files
+
+ def _setup(self, project, prog, version, usage, default_config_files):
+ """Initialize a ConfigOpts object for option parsing."""
+
+ self._config_opts = [
+ MultiStrOpt('config-file',
+ default=default_config_files,
+ metavar='PATH',
+ help='Path to a config file to use. Multiple config '
+ 'files can be specified, with values in later '
+ 'files taking precedence. The default files '
+ ' used are: %s' % (default_config_files, )),
+ StrOpt('config-dir',
+ metavar='DIR',
+ help='Path to a config directory to pull *.conf '
+ 'files from. This file set is sorted, so as to '
+ 'provide a predictable parse order if individual '
+ 'options are over-ridden. The set is parsed after '
+ 'the file(s), if any, specified via --config-file, '
+ 'hence over-ridden options in the directory take '
+ 'precedence.'),
+ ]
+ self.register_cli_opts(self._config_opts)
+
+ self.project = project
+ self.prog = prog
+ self.version = version
+ self.usage = usage
+ self.default_config_files = default_config_files
+
+ def __clear_cache(f):
+ @functools.wraps(f)
+ def __inner(self, *args, **kwargs):
+ if kwargs.pop('clear_cache', True):
+ self.__cache.clear()
+ return f(self, *args, **kwargs)
+
+ return __inner
+
+ def __call__(self,
+ args=None,
+ project=None,
+ prog=None,
+ version=None,
+ usage=None,
+ default_config_files=None):
+ """Parse command line arguments and config files.
+
+ Calling a ConfigOpts object causes the supplied command line arguments
+ and config files to be parsed, causing opt values to be made available
+ as attributes of the object.
+
+ The object may be called multiple times, each time causing the previous
+ set of values to be overwritten.
+
+ Automatically registers the --config-file option with either a supplied
+ list of default config files, or a list from find_config_files().
+
+ If the --config-dir option is set, any *.conf files from this
+ directory are pulled in, after all the file(s) specified by the
+ --config-file option.
+
+ :param args: command line arguments (defaults to sys.argv[1:])
+ :param project: the toplevel project name, used to locate config files
+ :param prog: the name of the program (defaults to sys.argv[0] basename)
+ :param version: the program version (for --version)
+ :param usage: a usage string (%prog will be expanded)
+ :param default_config_files: config files to use by default
+ :returns: the list of arguments left over after parsing options
+ :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
+ RequiredOptError, DuplicateOptError
+ """
+
+ self.clear()
+
+ prog, default_config_files = self._pre_setup(project,
+ prog,
+ version,
+ usage,
+ default_config_files)
+
+ self._setup(project, prog, version, usage, default_config_files)
+
+ self._cli_values = self._parse_cli_opts(args)
+
+ self._parse_config_files()
+
+ self._check_required_opts()
+
+ def __getattr__(self, name):
+ """Look up an option value and perform string substitution.
+
+ :param name: the opt name (or 'dest', more precisely)
+ :returns: the option value (after string subsititution) or a GroupAttr
+ :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
+ """
+ return self._get(name)
+
+ def __getitem__(self, key):
+ """Look up an option value and perform string substitution."""
+ return self.__getattr__(key)
+
+ def __contains__(self, key):
+ """Return True if key is the name of a registered opt or group."""
+ return key in self._opts or key in self._groups
+
+ def __iter__(self):
+ """Iterate over all registered opt and group names."""
+ for key in self._opts.keys() + self._groups.keys():
+ yield key
+
+ def __len__(self):
+ """Return the number of options and option groups."""
+ return len(self._opts) + len(self._groups)
+
+ def reset(self):
+ """Clear the object state and unset overrides and defaults."""
+ self._unset_defaults_and_overrides()
+ self.clear()
+
+ @__clear_cache
+ def clear(self):
+ """Clear the state of the object to before it was called.
+
+ Any subparsers added using the add_cli_subparsers() will also be
+ removed as a side-effect of this method.
+ """
+ self._args = None
+ self._cli_values.clear()
+ self._oparser = argparse.ArgumentParser()
+ self._cparser = None
+ self.unregister_opts(self._config_opts)
+ for group in self._groups.values():
+ group._clear()
+
+ @__clear_cache
+ def register_opt(self, opt, group=None, cli=False):
+ """Register an option schema.
+
+ Registering an option schema makes any option value which is previously
+ or subsequently parsed from the command line or config files available
+ as an attribute of this object.
+
+ :param opt: an instance of an Opt sub-class
+ :param cli: whether this is a CLI option
+ :param group: an optional OptGroup object or group name
+ :return: False if the opt was already register, True otherwise
+ :raises: DuplicateOptError
+ """
+ if group is not None:
+ group = self._get_group(group, autocreate=True)
+ return group._register_opt(opt, cli)
+
+ if _is_opt_registered(self._opts, opt):
+ return False
+
+ self._opts[opt.dest] = {'opt': opt, 'cli': cli}
+
+ return True
+
+ @__clear_cache
+ def register_opts(self, opts, group=None):
+ """Register multiple option schemas at once."""
+ for opt in opts:
+ self.register_opt(opt, group, clear_cache=False)
+
+ @__clear_cache
+ def register_cli_opt(self, opt, group=None):
+ """Register a CLI option schema.
+
+ CLI option schemas must be registered before the command line and
+ config files are parsed. This is to ensure that all CLI options are
+ show in --help and option validation works as expected.
+
+ :param opt: an instance of an Opt sub-class
+ :param group: an optional OptGroup object or group name
+ :return: False if the opt was already register, True otherwise
+ :raises: DuplicateOptError, ArgsAlreadyParsedError
+ """
+ if self._args is not None:
+ raise ArgsAlreadyParsedError("cannot register CLI option")
+
+ return self.register_opt(opt, group, cli=True, clear_cache=False)
+
+ @__clear_cache
+ def register_cli_opts(self, opts, group=None):
+ """Register multiple CLI option schemas at once."""
+ for opt in opts:
+ self.register_cli_opt(opt, group, clear_cache=False)
+
+ def register_group(self, group):
+ """Register an option group.
+
+ An option group must be registered before options can be registered
+ with the group.
+
+ :param group: an OptGroup object
+ """
+ if group.name in self._groups:
+ return
+
+ self._groups[group.name] = copy.copy(group)
+
+ @__clear_cache
+ def unregister_opt(self, opt, group=None):
+ """Unregister an option.
+
+ :param opt: an Opt object
+ :param group: an optional OptGroup object or group name
+ :raises: ArgsAlreadyParsedError, NoSuchGroupError
+ """
+ if self._args is not None:
+ raise ArgsAlreadyParsedError("reset before unregistering options")
+
+ if group is not None:
+ self._get_group(group)._unregister_opt(opt)
+ elif opt.dest in self._opts:
+ del self._opts[opt.dest]
+
+ @__clear_cache
+ def unregister_opts(self, opts, group=None):
+ """Unregister multiple CLI option schemas at once."""
+ for opt in opts:
+ self.unregister_opt(opt, group, clear_cache=False)
+
+ def import_opt(self, name, module_str, group=None):
+ """Import an option definition from a module.
+
+ Import a module and check that a given option is registered.
+
+ This is intended for use with global configuration objects
+ like cfg.CONF where modules commonly register options with
+ CONF at module load time. If one module requires an option
+ defined by another module it can use this method to explicitly
+ declare the dependency.
+
+ :param name: the name/dest of the opt
+ :param module_str: the name of a module to import
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ __import__(module_str)
+ self._get_opt_info(name, group)
+
+ @__clear_cache
+ def set_override(self, name, override, group=None):
+ """Override an opt value.
+
+ Override the command line, config file and default values of a
+ given option.
+
+ :param name: the name/dest of the opt
+ :param override: the override value
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info['override'] = override
+
+ @__clear_cache
+ def set_default(self, name, default, group=None):
+ """Override an opt's default value.
+
+ Override the default value of given option. A command line or
+ config file value will still take precedence over this default.
+
+ :param name: the name/dest of the opt
+ :param default: the default value
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info['default'] = default
+
+ @__clear_cache
+ def clear_override(self, name, group=None):
+ """Clear an override an opt value.
+
+ Clear a previously set override of the command line, config file
+ and default values of a given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('override', None)
+
+ @__clear_cache
+ def clear_default(self, name, group=None):
+ """Clear an override an opt's default value.
+
+ Clear a previously set override of the default value of given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('default', None)
+
+ def _all_opt_infos(self):
+ """A generator function for iteration opt infos."""
+ for info in self._opts.values():
+ yield info, None
+ for group in self._groups.values():
+ for info in group._opts.values():
+ yield info, group
+
+ def _all_cli_opts(self):
+ """A generator function for iterating CLI opts."""
+ for info, group in self._all_opt_infos():
+ if info['cli']:
+ yield info['opt'], group
+
+ def _unset_defaults_and_overrides(self):
+ """Unset any default or override on all options."""
+ for info, group in self._all_opt_infos():
+ info.pop('default', None)
+ info.pop('override', None)
+
+ def find_file(self, name):
+ """Locate a file located alongside the config files.
+
+ Search for a file with the supplied basename in the directories
+ which we have already loaded config files from and other known
+ configuration directories.
+
+ The directory, if any, supplied by the config_dir option is
+ searched first. Then the config_file option is iterated over
+ and each of the base directories of the config_files values
+ are searched. Failing both of these, the standard directories
+ searched by the module level find_config_files() function is
+ used. The first matching file is returned.
+
+ :param basename: the filename, e.g. 'policy.json'
+ :returns: the path to a matching file, or None
+ """
+ dirs = []
+ if self.config_dir:
+ dirs.append(_fixpath(self.config_dir))
+
+ for cf in reversed(self.config_file):
+ dirs.append(os.path.dirname(_fixpath(cf)))
+
+ dirs.extend(_get_config_dirs(self.project))
+
+ return _search_dirs(dirs, name)
+
+ def log_opt_values(self, logger, lvl):
+ """Log the value of all registered opts.
+
+ It's often useful for an app to log its configuration to a log file at
+ startup for debugging. This method dumps to the entire config state to
+ the supplied logger at a given log level.
+
+ :param logger: a logging.Logger object
+ :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log()
+ """
+ logger.log(lvl, "*" * 80)
+ logger.log(lvl, "Configuration options gathered from:")
+ logger.log(lvl, "command line args: %s", self._args)
+ logger.log(lvl, "config files: %s", self.config_file)
+ logger.log(lvl, "=" * 80)
+
+ def _sanitize(opt, value):
+ """Obfuscate values of options declared secret."""
+ return value if not opt.secret else '*' * len(str(value))
+
+ for opt_name in sorted(self._opts):
+ opt = self._get_opt_info(opt_name)['opt']
+ logger.log(lvl, "%-30s = %s", opt_name,
+ _sanitize(opt, getattr(self, opt_name)))
+
+ for group_name in self._groups:
+ group_attr = self.GroupAttr(self, self._get_group(group_name))
+ for opt_name in sorted(self._groups[group_name]._opts):
+ opt = self._get_opt_info(opt_name, group_name)['opt']
+ logger.log(lvl, "%-30s = %s",
+ "%s.%s" % (group_name, opt_name),
+ _sanitize(opt, getattr(group_attr, opt_name)))
+
+ logger.log(lvl, "*" * 80)
+
+ def print_usage(self, file=None):
+ """Print the usage message for the current program."""
+ self._oparser.print_usage(file)
+
+ def print_help(self, file=None):
+ """Print the help message for the current program."""
+ self._oparser.print_help(file)
+
+ def _get(self, name, group=None):
+ if isinstance(group, OptGroup):
+ key = (group.name, name)
+ else:
+ key = (group, name)
+ try:
+ return self.__cache[key]
+ except KeyError:
+ value = self._substitute(self._do_get(name, group))
+ self.__cache[key] = value
+ return value
+
+ def _do_get(self, name, group=None):
+ """Look up an option value.
+
+ :param name: the opt name (or 'dest', more precisely)
+ :param group: an OptGroup
+ :returns: the option value, or a GroupAttr object
+ :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
+ TemplateSubstitutionError
+ """
+ if group is None and name in self._groups:
+ return self.GroupAttr(self, self._get_group(name))
+
+ info = self._get_opt_info(name, group)
+ opt = info['opt']
+
+ if isinstance(opt, SubCommandOpt):
+ return self.SubCommandAttr(self, group, opt.dest)
+
+ if 'override' in info:
+ return info['override']
+
+ values = []
+ if self._cparser is not None:
+ section = group.name if group is not None else 'DEFAULT'
+ try:
+ value = opt._get_from_config_parser(self._cparser, section)
+ except KeyError:
+ pass
+ except ValueError as ve:
+ raise ConfigFileValueError(str(ve))
+ else:
+ if not opt.multi:
+ # No need to continue since the last value wins
+ return value[-1]
+ values.extend(value)
+
+ name = name if group is None else group.name + '_' + name
+ value = self._cli_values.get(name)
+ if value is not None:
+ if not opt.multi:
+ return value
+
+ # argparse ignores default=None for nargs='*'
+ if opt.positional and not value:
+ value = opt.default
+
+ return value + values
+
+ if values:
+ return values
+
+ if 'default' in info:
+ return info['default']
+
+ return opt.default
+
+ def _substitute(self, value):
+ """Perform string template substitution.
+
+ Substitute any template variables (e.g. $foo, ${bar}) in the supplied
+ string value(s) with opt values.
+
+ :param value: the string value, or list of string values
+ :returns: the substituted string(s)
+ """
+ if isinstance(value, list):
+ return [self._substitute(i) for i in value]
+ elif isinstance(value, str):
+ tmpl = string.Template(value)
+ return tmpl.safe_substitute(self.StrSubWrapper(self))
+ else:
+ return value
+
+ def _get_group(self, group_or_name, autocreate=False):
+ """Looks up a OptGroup object.
+
+ Helper function to return an OptGroup given a parameter which can
+ either be the group's name or an OptGroup object.
+
+ The OptGroup object returned is from the internal dict of OptGroup
+ objects, which will be a copy of any OptGroup object that users of
+ the API have access to.
+
+ :param group_or_name: the group's name or the OptGroup object itself
+ :param autocreate: whether to auto-create the group if it's not found
+ :raises: NoSuchGroupError
+ """
+ group = group_or_name if isinstance(group_or_name, OptGroup) else None
+ group_name = group.name if group else group_or_name
+
+ if not group_name in self._groups:
+ if not group is None or not autocreate:
+ raise NoSuchGroupError(group_name)
+
+ self.register_group(OptGroup(name=group_name))
+
+ return self._groups[group_name]
+
+ def _get_opt_info(self, opt_name, group=None):
+ """Return the (opt, override, default) dict for an opt.
+
+ :param opt_name: an opt name/dest
+ :param group: an optional group name or OptGroup object
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ if group is None:
+ opts = self._opts
+ else:
+ group = self._get_group(group)
+ opts = group._opts
+
+ if not opt_name in opts:
+ raise NoSuchOptError(opt_name, group)
+
+ return opts[opt_name]
+
+ def _parse_config_files(self):
+ """Parse the config files from --config-file and --config-dir.
+
+ :raises: ConfigFilesNotFoundError, ConfigFileParseError
+ """
+ config_files = list(self.config_file)
+
+ if self.config_dir:
+ config_dir_glob = os.path.join(self.config_dir, '*.conf')
+ config_files += sorted(glob.glob(config_dir_glob))
+
+ config_files = [_fixpath(p) for p in config_files]
+
+ self._cparser = MultiConfigParser()
+
+ try:
+ read_ok = self._cparser.read(config_files)
+ except iniparser.ParseError as pe:
+ raise ConfigFileParseError(pe.filename, str(pe))
+
+ if read_ok != config_files:
+ not_read_ok = filter(lambda f: f not in read_ok, config_files)
+ raise ConfigFilesNotFoundError(not_read_ok)
+
+ def _check_required_opts(self):
+ """Check that all opts marked as required have values specified.
+
+ :raises: RequiredOptError
+ """
+ for info, group in self._all_opt_infos():
+ opt = info['opt']
+
+ if opt.required:
+ if ('default' in info or 'override' in info):
+ continue
+
+ if self._get(opt.dest, group) is None:
+ raise RequiredOptError(opt.name, group)
+
+ def _parse_cli_opts(self, args):
+ """Parse command line options.
+
+ Initializes the command line option parser and parses the supplied
+ command line arguments.
+
+ :param args: the command line arguments
+ :returns: a dict of parsed option values
+ :raises: SystemExit, DuplicateOptError
+
+ """
+ self._args = args
+
+ for opt, group in self._all_cli_opts():
+ opt._add_to_cli(self._oparser, group)
+
+ return vars(self._oparser.parse_args(args))
+
+ class GroupAttr(collections.Mapping):
+
+ """
+ A helper class representing the option values of a group as a mapping
+ and attributes.
+ """
+
+ def __init__(self, conf, group):
+ """Construct a GroupAttr object.
+
+ :param conf: a ConfigOpts object
+ :param group: an OptGroup object
+ """
+ self._conf = conf
+ self._group = group
+
+ def __getattr__(self, name):
+ """Look up an option value and perform template substitution."""
+ return self._conf._get(name, self._group)
+
+ def __getitem__(self, key):
+ """Look up an option value and perform string substitution."""
+ return self.__getattr__(key)
+
+ def __contains__(self, key):
+ """Return True if key is the name of a registered opt or group."""
+ return key in self._group._opts
+
+ def __iter__(self):
+ """Iterate over all registered opt and group names."""
+ for key in self._group._opts.keys():
+ yield key
+
+ def __len__(self):
+ """Return the number of options and option groups."""
+ return len(self._group._opts)
+
+ class SubCommandAttr(object):
+
+ """
+ A helper class representing the name and arguments of an argparse
+ sub-parser.
+ """
+
+ def __init__(self, conf, group, dest):
+ """Construct a SubCommandAttr object.
+
+ :param conf: a ConfigOpts object
+ :param group: an OptGroup object
+ :param dest: the name of the sub-parser
+ """
+ self._conf = conf
+ self._group = group
+ self._dest = dest
+
+ def __getattr__(self, name):
+ """Look up a sub-parser name or argument value."""
+ if name == 'name':
+ name = self._dest
+ if self._group is not None:
+ name = self._group.name + '_' + name
+ return self._conf._cli_values[name]
+
+ if name in self._conf:
+ raise DuplicateOptError(name)
+
+ try:
+ return self._conf._cli_values[name]
+ except KeyError:
+ raise NoSuchOptError(name)
+
+ class StrSubWrapper(object):
+
+ """
+ A helper class exposing opt values as a dict for string substitution.
+ """
+
+ def __init__(self, conf):
+ """Construct a StrSubWrapper object.
+
+ :param conf: a ConfigOpts object
+ """
+ self.conf = conf
+
+ def __getitem__(self, key):
+ """Look up an opt value from the ConfigOpts object.
+
+ :param key: an opt name
+ :returns: an opt value
+ :raises: TemplateSubstitutionError if attribute is a group
+ """
+ value = getattr(self.conf, key)
+ if isinstance(value, self.conf.GroupAttr):
+ raise TemplateSubstitutionError(
+ 'substituting group %s not supported' % key)
+ return value
+
+
+class CommonConfigOpts(ConfigOpts):
+
+ DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
+ DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+ common_cli_opts = [
+ BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output'),
+ BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output'),
+ ]
+
+ logging_cli_opts = [
+ StrOpt('log-config',
+ metavar='PATH',
+ help='If this option is specified, the logging configuration '
+ 'file specified is used and overrides any other logging '
+ 'options specified. Please see the Python logging module '
+ 'documentation for details on logging configuration '
+ 'files.'),
+ StrOpt('log-format',
+ default=DEFAULT_LOG_FORMAT,
+ metavar='FORMAT',
+ help='A logging.Formatter log message format string which may '
+ 'use any of the available logging.LogRecord attributes. '
+ 'Default: %(default)s'),
+ StrOpt('log-date-format',
+ default=DEFAULT_LOG_DATE_FORMAT,
+ metavar='DATE_FORMAT',
+ help='Format string for %%(asctime)s in log records. '
+ 'Default: %(default)s'),
+ StrOpt('log-file',
+ metavar='PATH',
+ deprecated_name='logfile',
+ help='(Optional) Name of log file to output to. '
+ 'If not set, logging will go to stdout.'),
+ StrOpt('log-dir',
+ deprecated_name='logdir',
+ help='(Optional) The directory to keep log files in '
+ '(will be prepended to --log-file)'),
+ BoolOpt('use-syslog',
+ default=False,
+ help='Use syslog for logging.'),
+ StrOpt('syslog-log-facility',
+ default='LOG_USER',
+ help='syslog facility to receive log lines')
+ ]
+
+ def __init__(self):
+ super(CommonConfigOpts, self).__init__()
+ self.register_cli_opts(self.common_cli_opts)
+ self.register_cli_opts(self.logging_cli_opts)
+
+
+CONF = CommonConfigOpts()
diff --git a/tempest/openstack/common/iniparser.py b/tempest/openstack/common/iniparser.py
new file mode 100644
index 0000000..b5cb604
--- /dev/null
+++ b/tempest/openstack/common/iniparser.py
@@ -0,0 +1,130 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class ParseError(Exception):
+ def __init__(self, message, lineno, line):
+ self.msg = message
+ self.line = line
+ self.lineno = lineno
+
+ def __str__(self):
+ return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
+
+
+class BaseParser(object):
+ lineno = 0
+ parse_exc = ParseError
+
+ def _assignment(self, key, value):
+ self.assignment(key, value)
+ return None, []
+
+ def _get_section(self, line):
+ if line[-1] != ']':
+ return self.error_no_section_end_bracket(line)
+ if len(line) <= 2:
+ return self.error_no_section_name(line)
+
+ return line[1:-1]
+
+ def _split_key_value(self, line):
+ colon = line.find(':')
+ equal = line.find('=')
+ if colon < 0 and equal < 0:
+ return self.error_invalid_assignment(line)
+
+ if colon < 0 or (equal >= 0 and equal < colon):
+ key, value = line[:equal], line[equal + 1:]
+ else:
+ key, value = line[:colon], line[colon + 1:]
+
+ value = value.strip()
+ if ((value and value[0] == value[-1]) and
+ (value[0] == "\"" or value[0] == "'")):
+ value = value[1:-1]
+ return key.strip(), [value]
+
+ def parse(self, lineiter):
+ key = None
+ value = []
+
+ for line in lineiter:
+ self.lineno += 1
+
+ line = line.rstrip()
+ if not line:
+ # Blank line, ends multi-line values
+ if key:
+ key, value = self._assignment(key, value)
+ continue
+ elif line[0] in (' ', '\t'):
+ # Continuation of previous assignment
+ if key is None:
+ self.error_unexpected_continuation(line)
+ else:
+ value.append(line.lstrip())
+ continue
+
+ if key:
+ # Flush previous assignment, if any
+ key, value = self._assignment(key, value)
+
+ if line[0] == '[':
+ # Section start
+ section = self._get_section(line)
+ if section:
+ self.new_section(section)
+ elif line[0] in '#;':
+ self.comment(line[1:].lstrip())
+ else:
+ key, value = self._split_key_value(line)
+ if not key:
+ return self.error_empty_key(line)
+
+ if key:
+ # Flush previous assignment, if any
+ self._assignment(key, value)
+
+ def assignment(self, key, value):
+ """Called when a full assignment is parsed."""
+ raise NotImplementedError()
+
+ def new_section(self, section):
+ """Called when a new section is started."""
+ raise NotImplementedError()
+
+ def comment(self, comment):
+ """Called when a comment is parsed."""
+ pass
+
+ def error_invalid_assignment(self, line):
+ raise self.parse_exc("No ':' or '=' found in assignment",
+ self.lineno, line)
+
+ def error_empty_key(self, line):
+ raise self.parse_exc('Key cannot be empty', self.lineno, line)
+
+ def error_unexpected_continuation(self, line):
+ raise self.parse_exc('Unexpected continuation line',
+ self.lineno, line)
+
+ def error_no_section_end_bracket(self, line):
+ raise self.parse_exc('Invalid section (must end with ])',
+ self.lineno, line)
+
+ def error_no_section_name(self, line):
+ raise self.parse_exc('Empty section name', self.lineno, line)
diff --git a/tempest/openstack/common/setup.py b/tempest/openstack/common/setup.py
new file mode 100644
index 0000000..e6f72f0
--- /dev/null
+++ b/tempest/openstack/common/setup.py
@@ -0,0 +1,366 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utilities with minimum-depends for use in setup.py
+"""
+
+import datetime
+import os
+import re
+import subprocess
+import sys
+
+from setuptools.command import sdist
+
+
+def parse_mailmap(mailmap='.mailmap'):
+ mapping = {}
+ if os.path.exists(mailmap):
+ with open(mailmap, 'r') as fp:
+ for l in fp:
+ l = l.strip()
+ if not l.startswith('#') and ' ' in l:
+ canonical_email, alias = [x for x in l.split(' ')
+ if x.startswith('<')]
+ mapping[alias] = canonical_email
+ return mapping
+
+
+def canonicalize_emails(changelog, mapping):
+ """Takes in a string and an email alias mapping and replaces all
+ instances of the aliases in the string with their real email.
+ """
+ for alias, email in mapping.iteritems():
+ changelog = changelog.replace(alias, email)
+ return changelog
+
+
+# Get requirements from the first file that exists
+def get_reqs_from_files(requirements_files):
+ for requirements_file in requirements_files:
+ if os.path.exists(requirements_file):
+ with open(requirements_file, 'r') as fil:
+ return fil.read().split('\n')
+ return []
+
+
+def parse_requirements(requirements_files=['requirements.txt',
+ 'tools/pip-requires']):
+ requirements = []
+ for line in get_reqs_from_files(requirements_files):
+ # For the requirements list, we need to inject only the portion
+ # after egg= so that distutils knows the package it's looking for
+ # such as:
+ # -e git://github.com/openstack/nova/master#egg=nova
+ if re.match(r'\s*-e\s+', line):
+ requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
+ line))
+ # such as:
+ # http://github.com/openstack/nova/zipball/master#egg=nova
+ elif re.match(r'\s*https?:', line):
+ requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
+ line))
+ # -f lines are for index locations, and don't get used here
+ elif re.match(r'\s*-f\s+', line):
+ pass
+ # argparse is part of the standard library starting with 2.7
+ # adding it to the requirements list screws distro installs
+ elif line == 'argparse' and sys.version_info >= (2, 7):
+ pass
+ else:
+ requirements.append(line)
+
+ return requirements
+
+
+def parse_dependency_links(requirements_files=['requirements.txt',
+ 'tools/pip-requires']):
+ dependency_links = []
+ # dependency_links inject alternate locations to find packages listed
+ # in requirements
+ for line in get_reqs_from_files(requirements_files):
+ # skip comments and blank lines
+ if re.match(r'(\s*#)|(\s*$)', line):
+ continue
+ # lines with -e or -f need the whole line, minus the flag
+ if re.match(r'\s*-[ef]\s+', line):
+ dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
+ # lines that are only urls can go in unmolested
+ elif re.match(r'\s*https?:', line):
+ dependency_links.append(line)
+ return dependency_links
+
+
+def write_requirements():
+ venv = os.environ.get('VIRTUAL_ENV', None)
+ if venv is not None:
+ with open("requirements.txt", "w") as req_file:
+ output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
+ stdout=subprocess.PIPE)
+ requirements = output.communicate()[0].strip()
+ req_file.write(requirements)
+
+
+def _run_shell_command(cmd):
+ if os.name == 'nt':
+ output = subprocess.Popen(["cmd.exe", "/C", cmd],
+ stdout=subprocess.PIPE)
+ else:
+ output = subprocess.Popen(["/bin/sh", "-c", cmd],
+ stdout=subprocess.PIPE)
+ out = output.communicate()
+ if len(out) == 0:
+ return None
+ if len(out[0].strip()) == 0:
+ return None
+ return out[0].strip()
+
+
+def _get_git_next_version_suffix(branch_name):
+ datestamp = datetime.datetime.now().strftime('%Y%m%d')
+ if branch_name == 'milestone-proposed':
+ revno_prefix = "r"
+ else:
+ revno_prefix = ""
+ _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
+ milestone_cmd = "git show meta/openstack/release:%s" % branch_name
+ milestonever = _run_shell_command(milestone_cmd)
+ if milestonever:
+ first_half = "%s~%s" % (milestonever, datestamp)
+ else:
+ first_half = datestamp
+
+ post_version = _get_git_post_version()
+ # post version should look like:
+ # 0.1.1.4.gcc9e28a
+ # where the bit after the last . is the short sha, and the bit between
+ # the last and second to last is the revno count
+ (revno, sha) = post_version.split(".")[-2:]
+ second_half = "%s%s.%s" % (revno_prefix, revno, sha)
+ return ".".join((first_half, second_half))
+
+
+def _get_git_current_tag():
+ return _run_shell_command("git tag --contains HEAD")
+
+
+def _get_git_tag_info():
+ return _run_shell_command("git describe --tags")
+
+
+def _get_git_post_version():
+ current_tag = _get_git_current_tag()
+ if current_tag is not None:
+ return current_tag
+ else:
+ tag_info = _get_git_tag_info()
+ if tag_info is None:
+ base_version = "0.0"
+ cmd = "git --no-pager log --oneline"
+ out = _run_shell_command(cmd)
+ revno = len(out.split("\n"))
+ sha = _run_shell_command("git describe --always")
+ else:
+ tag_infos = tag_info.split("-")
+ base_version = "-".join(tag_infos[:-2])
+ (revno, sha) = tag_infos[-2:]
+ return "%s.%s.%s" % (base_version, revno, sha)
+
+
+def write_git_changelog():
+ """Write a changelog based on the git changelog."""
+ new_changelog = 'ChangeLog'
+ if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
+ if os.path.isdir('.git'):
+ git_log_cmd = 'git log --stat'
+ changelog = _run_shell_command(git_log_cmd)
+ mailmap = parse_mailmap()
+ with open(new_changelog, "w") as changelog_file:
+ changelog_file.write(canonicalize_emails(changelog, mailmap))
+ else:
+ open(new_changelog, 'w').close()
+
+
+def generate_authors():
+ """Create AUTHORS file using git commits."""
+ jenkins_email = 'jenkins@review.(openstack|stackforge).org'
+ old_authors = 'AUTHORS.in'
+ new_authors = 'AUTHORS'
+ if not os.getenv('SKIP_GENERATE_AUTHORS'):
+ if os.path.isdir('.git'):
+ # don't include jenkins email address in AUTHORS file
+ git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
+ "egrep -v '" + jenkins_email + "'")
+ changelog = _run_shell_command(git_log_cmd)
+ mailmap = parse_mailmap()
+ with open(new_authors, 'w') as new_authors_fh:
+ new_authors_fh.write(canonicalize_emails(changelog, mailmap))
+ if os.path.exists(old_authors):
+ with open(old_authors, "r") as old_authors_fh:
+ new_authors_fh.write('\n' + old_authors_fh.read())
+ else:
+ open(new_authors, 'w').close()
+
+
+_rst_template = """%(heading)s
+%(underline)s
+
+.. automodule:: %(module)s
+ :members:
+ :undoc-members:
+ :show-inheritance:
+"""
+
+
+def read_versioninfo(project):
+ """Read the versioninfo file. If it doesn't exist, we're in a github
+ zipball, and there's really no way to know what version we really
+ are, but that should be ok, because the utility of that should be
+ just about nil if this code path is in use in the first place."""
+ versioninfo_path = os.path.join(project, 'versioninfo')
+ if os.path.exists(versioninfo_path):
+ with open(versioninfo_path, 'r') as vinfo:
+ version = vinfo.read().strip()
+ else:
+ version = "0.0.0"
+ return version
+
+
+def write_versioninfo(project, version):
+ """Write a simple file containing the version of the package."""
+ with open(os.path.join(project, 'versioninfo'), 'w') as fil:
+ fil.write("%s\n" % version)
+
+
+def get_cmdclass():
+ """Return dict of commands to run from setup.py."""
+
+ cmdclass = dict()
+
+ def _find_modules(arg, dirname, files):
+ for filename in files:
+ if filename.endswith('.py') and filename != '__init__.py':
+ arg["%s.%s" % (dirname.replace('/', '.'),
+ filename[:-3])] = True
+
+ class LocalSDist(sdist.sdist):
+ """Builds the ChangeLog and Authors files from VC first."""
+
+ def run(self):
+ write_git_changelog()
+ generate_authors()
+ # sdist.sdist is an old style class, can't use super()
+ sdist.sdist.run(self)
+
+ cmdclass['sdist'] = LocalSDist
+
+ # If Sphinx is installed on the box running setup.py,
+ # enable setup.py to build the documentation, otherwise,
+ # just ignore it
+ try:
+ from sphinx.setup_command import BuildDoc
+
+ class LocalBuildDoc(BuildDoc):
+ def generate_autoindex(self):
+ print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
+ modules = {}
+ option_dict = self.distribution.get_option_dict('build_sphinx')
+ source_dir = os.path.join(option_dict['source_dir'][1], 'api')
+ if not os.path.exists(source_dir):
+ os.makedirs(source_dir)
+ for pkg in self.distribution.packages:
+ if '.' not in pkg:
+ os.path.walk(pkg, _find_modules, modules)
+ module_list = modules.keys()
+ module_list.sort()
+ autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
+ with open(autoindex_filename, 'w') as autoindex:
+ autoindex.write(""".. toctree::
+ :maxdepth: 1
+
+""")
+ for module in module_list:
+ output_filename = os.path.join(source_dir,
+ "%s.rst" % module)
+ heading = "The :mod:`%s` Module" % module
+ underline = "=" * len(heading)
+ values = dict(module=module, heading=heading,
+ underline=underline)
+
+ print "Generating %s" % output_filename
+ with open(output_filename, 'w') as output_file:
+ output_file.write(_rst_template % values)
+ autoindex.write(" %s.rst\n" % module)
+
+ def run(self):
+ if not os.getenv('SPHINX_DEBUG'):
+ self.generate_autoindex()
+
+ for builder in ['html', 'man']:
+ self.builder = builder
+ self.finalize_options()
+ self.project = self.distribution.get_name()
+ self.version = self.distribution.get_version()
+ self.release = self.distribution.get_version()
+ BuildDoc.run(self)
+ cmdclass['build_sphinx'] = LocalBuildDoc
+ except ImportError:
+ pass
+
+ return cmdclass
+
+
+def get_git_branchname():
+ for branch in _run_shell_command("git branch --color=never").split("\n"):
+ if branch.startswith('*'):
+ _branch_name = branch.split()[1].strip()
+ if _branch_name == "(no":
+ _branch_name = "no-branch"
+ return _branch_name
+
+
+def get_pre_version(projectname, base_version):
+ """Return a version which is leading up to a version that will
+ be released in the future."""
+ if os.path.isdir('.git'):
+ current_tag = _get_git_current_tag()
+ if current_tag is not None:
+ version = current_tag
+ else:
+ branch_name = os.getenv('BRANCHNAME',
+ os.getenv('GERRIT_REFNAME',
+ get_git_branchname()))
+ version_suffix = _get_git_next_version_suffix(branch_name)
+ version = "%s~%s" % (base_version, version_suffix)
+ write_versioninfo(projectname, version)
+ return version
+ else:
+ version = read_versioninfo(projectname)
+ return version
+
+
+def get_post_version(projectname):
+ """Return a version which is equal to the tag that's on the current
+ revision if there is one, or tag plus number of additional revisions
+ if the current revision has no tag."""
+
+ if os.path.isdir('.git'):
+ version = _get_git_post_version()
+ write_versioninfo(projectname, version)
+ return version
+ return read_versioninfo(projectname)
diff --git a/tempest/services/boto/__init__.py b/tempest/services/boto/__init__.py
index 9b9fceb..1365435 100644
--- a/tempest/services/boto/__init__.py
+++ b/tempest/services/boto/__init__.py
@@ -15,17 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import boto
-
from ConfigParser import DuplicateSectionError
+from contextlib import closing
+import re
+from types import MethodType
+
+import boto
from tempest.exceptions import InvalidConfiguration
from tempest.exceptions import NotFound
-import re
-from types import MethodType
-from contextlib import closing
-
class BotoClientBase(object):
@@ -36,7 +35,8 @@
auth_url=None, tenant_name=None,
*args, **kwargs):
- self.connection_timeout = config.boto.http_socket_timeout
+ self.connection_timeout = str(config.boto.http_socket_timeout)
+ self.num_retries = str(config.boto.num_retries)
self.build_timeout = config.boto.build_timeout
# We do not need the "path": "/token" part
if auth_url:
@@ -64,15 +64,16 @@
raise NotFound("Unable to get access and secret keys")
return ec2_cred
- def _config_boto_timeout(self, timeout):
+ def _config_boto_timeout(self, timeout, retries):
try:
boto.config.add_section("Boto")
except DuplicateSectionError:
pass
boto.config.set("Boto", "http_socket_timeout", timeout)
+ boto.config.set("Boto", "num_retries", retries)
def __getattr__(self, name):
- """Automatically creates methods for the allowed methods set"""
+ """Automatically creates methods for the allowed methods set."""
if name in self.ALLOWED_METHODS:
def func(self, *args, **kwargs):
with closing(self.get_connection()) as conn:
@@ -87,7 +88,7 @@
raise AttributeError(name)
def get_connection(self):
- self._config_boto_timeout(self.connection_timeout)
+ self._config_boto_timeout(self.connection_timeout, self.num_retries)
if not all((self.connection_data["aws_access_key_id"],
self.connection_data["aws_secret_access_key"])):
if all(self.ks_cred.itervalues()):
diff --git a/tempest/services/boto/clients.py b/tempest/services/boto/clients.py
index 5fabcae..9cfe234 100644
--- a/tempest/services/boto/clients.py
+++ b/tempest/services/boto/clients.py
@@ -15,12 +15,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import boto
-from boto.s3.connection import OrdinaryCallingFormat
-from boto.ec2.regioninfo import RegionInfo
-from tempest.services.boto import BotoClientBase
import urlparse
+import boto
+from boto.ec2.regioninfo import RegionInfo
+from boto.s3.connection import OrdinaryCallingFormat
+
+from tempest.services.boto import BotoClientBase
+
class APIClientEC2(BotoClientBase):
diff --git a/tempest/services/compute/json/console_output_client.py b/tempest/services/compute/json/console_output_client.py
index d12fd7d..9d37de7 100644
--- a/tempest/services/compute/json/console_output_client.py
+++ b/tempest/services/compute/json/console_output_client.py
@@ -15,15 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
import json
+from tempest.common.rest_client import RestClient
-class ConsoleOutputsClient(RestClient):
+
+class ConsoleOutputsClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
- super(ConsoleOutputsClient, self).__init__(config, username, password,
- auth_url, tenant_name)
+ super(ConsoleOutputsClientJSON, self).__init__(config, username,
+ password,
+ auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def get_console_output(self, server_id, length):
diff --git a/tempest/services/compute/json/extensions_client.py b/tempest/services/compute/json/extensions_client.py
index c0200df..583c3b4 100644
--- a/tempest/services/compute/json/extensions_client.py
+++ b/tempest/services/compute/json/extensions_client.py
@@ -15,9 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
import json
+from tempest.common.rest_client import RestClient
+
class ExtensionsClientJSON(RestClient):
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 01708a2..56546de 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -15,8 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
import json
+import urllib
+
+from tempest.common.rest_client import RestClient
class FlavorsClientJSON(RestClient):
@@ -28,12 +30,8 @@
def list_flavors(self, params=None):
url = 'flavors'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "flavors?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
@@ -41,12 +39,8 @@
def list_flavors_with_detail(self, params=None):
url = 'flavors/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "flavors/detail?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
@@ -59,7 +53,7 @@
def create_flavor(self, name, ram, vcpus, disk, ephemeral, flavor_id,
swap, rxtx):
- """Creates a new flavor or instance type"""
+ """Creates a new flavor or instance type."""
post_body = {
'name': name,
'ram': ram,
@@ -78,5 +72,5 @@
return resp, body['flavor']
def delete_flavor(self, flavor_id):
- """Deletes the given flavor"""
+ """Deletes the given flavor."""
return self.delete("flavors/%s" % str(flavor_id))
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 6219f34..d73b8a9 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -15,9 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+import urllib
+
from tempest.common.rest_client import RestClient
from tempest import exceptions
-import json
class FloatingIPsClientJSON(RestClient):
@@ -27,19 +29,17 @@
self.service = self.config.compute.catalog_type
def list_floating_ips(self, params=None):
- """Returns a list of all floating IPs filtered by any parameters"""
+ """Returns a list of all floating IPs filtered by any parameters."""
url = 'os-floating-ips'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
- url += '?' + ' &'.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
resp, body = self.get(url)
body = json.loads(body)
return resp, body['floating_ips']
def get_floating_ip_details(self, floating_ip_id):
- """Get the details of a floating IP"""
+ """Get the details of a floating IP."""
url = "os-floating-ips/%s" % str(floating_ip_id)
resp, body = self.get(url)
body = json.loads(body)
@@ -48,20 +48,20 @@
return resp, body['floating_ip']
def create_floating_ip(self):
- """Allocate a floating IP to the project"""
+ """Allocate a floating IP to the project."""
url = 'os-floating-ips'
resp, body = self.post(url, None, None)
body = json.loads(body)
return resp, body['floating_ip']
def delete_floating_ip(self, floating_ip_id):
- """Deletes the provided floating IP from the project"""
+ """Deletes the provided floating IP from the project."""
url = "os-floating-ips/%s" % str(floating_ip_id)
resp, body = self.delete(url)
return resp, body
def associate_floating_ip_to_server(self, floating_ip, server_id):
- """Associate the provided floating IP to a specific server"""
+ """Associate the provided floating IP to a specific server."""
url = "servers/%s/action" % str(server_id)
post_body = {
'addFloatingIp': {
@@ -74,7 +74,7 @@
return resp, body
def disassociate_floating_ip_from_server(self, floating_ip, server_id):
- """Disassociate the provided floating IP from a specific server"""
+ """Disassociate the provided floating IP from a specific server."""
url = "servers/%s/action" % str(server_id)
post_body = {
'removeFloatingIp': {
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index a53d00d..dc3c524 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -1,6 +1,7 @@
-from tempest.common.rest_client import RestClient
import json
+from tempest.common.rest_client import RestClient
+
class HostsClientJSON(RestClient):
@@ -10,7 +11,7 @@
self.service = self.config.compute.catalog_type
def list_hosts(self):
- """Lists all hosts"""
+ """Lists all hosts."""
url = 'os-hosts'
resp, body = self.get(url)
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 102590c..376dafc 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -15,10 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
-from tempest import exceptions
import json
import time
+import urllib
+
+from tempest.common.rest_client import RestClient
+from tempest import exceptions
class ImagesClientJSON(RestClient):
@@ -31,7 +33,7 @@
self.build_timeout = self.config.compute.build_timeout
def create_image(self, server_id, name, meta=None):
- """Creates an image of the original server"""
+ """Creates an image of the original server."""
post_body = {
'createImage': {
@@ -48,41 +50,33 @@
return resp, body
def list_images(self, params=None):
- """Returns a list of all images filtered by any parameters"""
+ """Returns a list of all images filtered by any parameters."""
url = 'images'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "images?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['images']
def list_images_with_detail(self, params=None):
- """Returns a detailed list of images filtered by any parameters"""
+ """Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "images/detail?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['images']
def get_image(self, image_id):
- """Returns the details of a single image"""
+ """Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
- """Deletes the provided image"""
+ """Deletes the provided image."""
return self.delete("images/%s" % str(image_id))
def wait_for_image_resp_code(self, image_id, code):
@@ -116,13 +110,13 @@
raise exceptions.TimeoutException
def list_image_metadata(self, image_id):
- """Lists all metadata items for an image"""
+ """Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
return resp, body['metadata']
def set_image_metadata(self, image_id, meta):
- """Sets the metadata for an image"""
+ """Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id),
post_body, self.headers)
@@ -130,7 +124,7 @@
return resp, body['metadata']
def update_image_metadata(self, image_id, meta):
- """Updates the metadata for an image"""
+ """Updates the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id),
post_body, self.headers)
@@ -138,13 +132,13 @@
return resp, body['metadata']
def get_image_metadata_item(self, image_id, key):
- """Returns the value for a specific image metadata key"""
+ """Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
return resp, body['meta']
def set_image_metadata_item(self, image_id, key, meta):
- """Sets the value for a specific image metadata key"""
+ """Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body, self.headers)
@@ -152,7 +146,7 @@
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
- """Deletes a single image metadata key/value pair"""
+ """Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
return resp, body
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 553936c..90b2096 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -15,9 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
import json
+from tempest.common.rest_client import RestClient
+
class KeyPairsClientJSON(RestClient):
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 2cc417f..543b015 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -28,7 +28,7 @@
self.service = self.config.compute.catalog_type
def get_quota_set(self, tenant_id):
- """List the quota set for a tenant"""
+ """List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
resp, body = self.get(url)
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 9d8de23..95f2831 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -15,8 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClient
import json
+import urllib
+
+from tempest.common.rest_client import RestClient
class SecurityGroupsClientJSON(RestClient):
@@ -28,22 +30,18 @@
self.service = self.config.compute.catalog_type
def list_security_groups(self, params=None):
- """List all security groups for a user"""
+ """List all security groups for a user."""
url = 'os-security-groups'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
-
- url += '?' + ' &'.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['security_groups']
def get_security_group(self, security_group_id):
- """Get the details of a Security Group"""
+ """Get the details of a Security Group."""
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
@@ -65,7 +63,7 @@
return resp, body['security_group']
def delete_security_group(self, security_group_id):
- """Deletes the provided Security Group"""
+ """Deletes the provided Security Group."""
return self.delete('os-security-groups/%s' % str(security_group_id))
def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
@@ -95,5 +93,5 @@
return resp, body['security_group_rule']
def delete_security_group_rule(self, group_rule_id):
- """Deletes the provided Security Group rule"""
+ """Deletes the provided Security Group rule."""
return self.delete('os-security-group-rules/%s' % str(group_rule_id))
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 94fe637..b832af0 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -15,10 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import exceptions
-from tempest.common.rest_client import RestClient
import json
import time
+import urllib
+
+from tempest.common.rest_client import RestClient
+from tempest import exceptions
class ServersClientJSON(RestClient):
@@ -108,47 +110,39 @@
return resp, body['server']
def get_server(self, server_id):
- """Returns the details of an existing server"""
+ """Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
return resp, body['server']
def delete_server(self, server_id):
- """Deletes the given server"""
+ """Deletes the given server."""
return self.delete("servers/%s" % str(server_id))
def list_servers(self, params=None):
- """Lists all servers for a user"""
+ """Lists all servers for a user."""
url = 'servers'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "servers?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def list_servers_with_detail(self, params=None):
- """Lists all servers in detail for a user"""
+ """Lists all servers in detail for a user."""
url = 'servers/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = "servers/detail?" + "".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def wait_for_server_status(self, server_id, status):
- """Waits for a server to reach a given status"""
+ """Waits for a server to reach a given status."""
resp, body = self.get_server(server_id)
server_status = body['status']
start = int(time.time())
@@ -171,7 +165,7 @@
raise exceptions.TimeoutException(message)
def wait_for_server_termination(self, server_id, ignore_error=False):
- """Waits for server to reach termination"""
+ """Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
@@ -189,20 +183,20 @@
time.sleep(self.build_interval)
def list_addresses(self, server_id):
- """Lists all addresses for a server"""
+ """Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
- """Lists all addresses of a specific network type for a server"""
+ """Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
return resp, body
def change_password(self, server_id, password):
- """Changes the root password for the server"""
+ """Changes the root password for the server."""
post_body = {
'changePassword': {
'adminPass': password,
@@ -214,7 +208,7 @@
post_body, self.headers)
def reboot(self, server_id, reboot_type):
- """Reboots a server"""
+ """Reboots a server."""
post_body = {
'reboot': {
'type': reboot_type,
@@ -227,7 +221,7 @@
def rebuild(self, server_id, image_ref, name=None, meta=None,
personality=None, adminPass=None, disk_config=None):
- """Rebuilds a server with a new image"""
+ """Rebuilds a server with a new image."""
post_body = {
'imageRef': image_ref,
}
@@ -270,7 +264,7 @@
return resp, body
def confirm_resize(self, server_id):
- """Confirms the flavor change for a server"""
+ """Confirms the flavor change for a server."""
post_body = {
'confirmResize': None,
}
@@ -281,7 +275,7 @@
return resp, body
def revert_resize(self, server_id):
- """Reverts a server back to its original flavor"""
+ """Reverts a server back to its original flavor."""
post_body = {
'revertResize': None,
}
@@ -292,7 +286,7 @@
return resp, body
def create_image(self, server_id, image_name):
- """Creates an image of the given server"""
+ """Creates an image of the given server."""
post_body = {
'createImage': {
'name': image_name,
@@ -351,7 +345,7 @@
post_body, self.headers)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
- """Attaches a volume to a server instance"""
+ """Attaches a volume to a server instance."""
post_body = json.dumps({
'volumeAttachment': {
'volumeId': volume_id,
@@ -363,13 +357,13 @@
return resp, body
def detach_volume(self, server_id, volume_id):
- """Detaches a volume from a server instance"""
+ """Detaches a volume from a server instance."""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
return resp, body
def add_security_group(self, server_id, security_group_name):
- """Adds a security group to the server"""
+ """Adds a security group to the server."""
post_body = {
'addSecurityGroup': {
'name': security_group_name
@@ -380,7 +374,7 @@
post_body, self.headers)
def remove_security_group(self, server_id, security_group_name):
- """Removes a security group from the server"""
+ """Removes a security group from the server."""
post_body = {
'removeSecurityGroup': {
'name': security_group_name
@@ -391,7 +385,7 @@
post_body, self.headers)
def live_migrate_server(self, server_id, dest_host, use_block_migration):
- """ This should be called with administrator privileges """
+ """This should be called with administrator privileges ."""
migrate_params = {
"disk_over_commit": False,
@@ -415,7 +409,7 @@
return resp, body['servers']
def migrate_server(self, server_id):
- """Migrates a server to a new host"""
+ """Migrates a server to a new host."""
post_body = {'migrate': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
@@ -423,7 +417,7 @@
return resp, body
def confirm_migration(self, server_id):
- """Confirms the migration of a server"""
+ """Confirms the migration of a server."""
post_body = {'confirmResize': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
@@ -431,63 +425,63 @@
return resp, body
def lock_server(self, server_id):
- """Locks the given server"""
+ """Locks the given server."""
post_body = {'lock': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def unlock_server(self, server_id):
- """UNlocks the given server"""
+ """UNlocks the given server."""
post_body = {'unlock': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def start_server(self, server_id):
- """Starts the given server"""
+ """Starts the given server."""
post_body = {'os-start': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def stop_server(self, server_id):
- """Stops the given server"""
+ """Stops the given server."""
post_body = {'os-stop': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def suspend_server(self, server_id):
- """Suspends the provded server"""
+ """Suspends the provded server."""
post_body = {'suspend': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def resume_server(self, server_id):
- """Un-suspends the provded server"""
+ """Un-suspends the provded server."""
post_body = {'resume': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def pause_server(self, server_id):
- """Pauses the provded server"""
+ """Pauses the provded server."""
post_body = {'pause': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def unpause_server(self, server_id):
- """Un-pauses the provded server"""
+ """Un-pauses the provded server."""
post_body = {'unpause': 'null'}
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % server_id,
post_body, self.headers)
def reset_state(self, server_id, new_state='error'):
- """Resets the state of a server to active/error"""
+ """Resets the state of a server to active/error."""
post_body = {
'os-resetState': {
'state': new_state
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 240bcfe..a5f6ec3 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -15,10 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import exceptions
-from tempest.common.rest_client import RestClient
import json
import time
+import urllib
+
+from tempest.common.rest_client import RestClient
+from tempest import exceptions
class VolumesExtensionsClientJSON(RestClient):
@@ -32,35 +34,27 @@
self.build_timeout = self.config.volume.build_timeout
def list_volumes(self, params=None):
- """List all the volumes created"""
+ """List all the volumes created."""
url = 'os-volumes'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url += '?' + ' '.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['volumes']
def list_volumes_with_detail(self, params=None):
- """List all the details of volumes"""
+ """List all the details of volumes."""
url = 'os-volumes/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = '?' + ' '.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['volumes']
def get_volume(self, volume_id, wait=None):
- """Returns the details of a single volume"""
+ """Returns the details of a single volume."""
url = "os-volumes/%s" % str(volume_id)
resp, body = self.get(url, wait=wait)
body = json.loads(body)
@@ -86,11 +80,11 @@
return resp, body['volume']
def delete_volume(self, volume_id):
- """Deletes the Specified Volume"""
+ """Deletes the Specified Volume."""
return self.delete("os-volumes/%s" % str(volume_id))
def wait_for_volume_status(self, volume_id, status):
- """Waits for a Volume to reach a given status"""
+ """Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
volume_name = body['displayName']
volume_status = body['status']
diff --git a/tempest/services/compute/xml/console_output_client.py b/tempest/services/compute/xml/console_output_client.py
new file mode 100644
index 0000000..e618d63
--- /dev/null
+++ b/tempest/services/compute/xml/console_output_client.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from lxml import etree
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class ConsoleOutputsClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(ConsoleOutputsClientXML, self).__init__(config, username,
+ password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def get_console_output(self, server_id, length):
+ post_body = Element("os-getConsoleOutput", length=length)
+ resp, body = self.post("/servers/%s/action" % server_id,
+ headers=self.headers,
+ body=str(Document(post_body)))
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 63ce267..2a8ad0d 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -63,7 +63,7 @@
return [self._format_flavor(xml_to_json(x)) for x in node]
def _list_flavors(self, url, params):
- if params is not None:
+ if params:
url += "?%s" % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
@@ -86,7 +86,7 @@
def create_flavor(self, name, ram, vcpus, disk, ephemeral, flavor_id,
swap, rxtx):
- """Creates a new flavor or instance type"""
+ """Creates a new flavor or instance type."""
flavor = Element("flavor",
xmlns=XMLNS_11,
ram=ram,
@@ -105,5 +105,5 @@
return resp, flavor
def delete_flavor(self, flavor_id):
- """Deletes the given flavor"""
+ """Deletes the given flavor."""
return self.delete("flavors/%s" % str(flavor_id), self.headers)
diff --git a/tempest/services/compute/xml/floating_ips_client.py b/tempest/services/compute/xml/floating_ips_client.py
index 2f87926..74b4be2 100644
--- a/tempest/services/compute/xml/floating_ips_client.py
+++ b/tempest/services/compute/xml/floating_ips_client.py
@@ -16,12 +16,13 @@
# under the License.
from lxml import etree
+import urllib
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
-from tempest.services.compute.xml.common import xml_to_json
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
class FloatingIPsClientXML(RestClientXML):
@@ -41,20 +42,17 @@
return json
def list_floating_ips(self, params=None):
- """Returns a list of all floating IPs filtered by any parameters"""
+ """Returns a list of all floating IPs filtered by any parameters."""
url = 'os-floating-ips'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
- url += "?" + "&".join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def get_floating_ip_details(self, floating_ip_id):
- """Get the details of a floating IP"""
+ """Get the details of a floating IP."""
url = "os-floating-ips/%s" % str(floating_ip_id)
resp, body = self.get(url, self.headers)
body = self._parse_floating_ip(etree.fromstring(body))
@@ -63,20 +61,20 @@
return resp, body
def create_floating_ip(self):
- """Allocate a floating IP to the project"""
+ """Allocate a floating IP to the project."""
url = 'os-floating-ips'
resp, body = self.post(url, None, self.headers)
body = self._parse_floating_ip(etree.fromstring(body))
return resp, body
def delete_floating_ip(self, floating_ip_id):
- """Deletes the provided floating IP from the project"""
+ """Deletes the provided floating IP from the project."""
url = "os-floating-ips/%s" % str(floating_ip_id)
resp, body = self.delete(url, self.headers)
return resp, body
def associate_floating_ip_to_server(self, floating_ip, server_id):
- """Associate the provided floating IP to a specific server"""
+ """Associate the provided floating IP to a specific server."""
url = "servers/%s/action" % str(server_id)
doc = Document()
server = Element("addFloatingIp")
@@ -86,7 +84,7 @@
return resp, body
def disassociate_floating_ip_from_server(self, floating_ip, server_id):
- """Disassociate the provided floating IP from a specific server"""
+ """Disassociate the provided floating IP from a specific server."""
url = "servers/%s/action" % str(server_id)
doc = Document()
server = Element("removeFloatingIp")
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 12ad4d4..1e8b250 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -20,8 +20,8 @@
from lxml import etree
-from tempest import exceptions
from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
@@ -43,7 +43,7 @@
return self._parse_links(node, json)
def _parse_image(self, node):
- """Parses detailed XML image information into dictionary"""
+ """Parses detailed XML image information into dictionary."""
json = xml_to_json(node)
self._parse_links(node, json)
@@ -61,7 +61,7 @@
return json
def _parse_links(self, node, json):
- """Append multiple links under a list"""
+ """Append multiple links under a list."""
# look for links
if 'link' in json:
# remove single link element
@@ -71,7 +71,7 @@
return json
def create_image(self, server_id, name, meta=None):
- """Creates an image of the original server"""
+ """Creates an image of the original server."""
post_body = Element('createImage', name=name)
if meta:
@@ -86,18 +86,17 @@
return resp, body
def list_images(self, params=None):
- """Returns a list of all images filtered by any parameters"""
+ """Returns a list of all images filtered by any parameters."""
url = 'images'
if params:
- param_list = urllib.urlencode(params)
- url += "?" + param_list
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body['images']
def list_images_with_detail(self, params=None):
- """Returns a detailed list of images filtered by any parameters"""
+ """Returns a detailed list of images filtered by any parameters."""
url = 'images/detail'
if params:
param_list = urllib.urlencode(params)
@@ -109,13 +108,13 @@
return resp, body['images']
def get_image(self, image_id):
- """Returns the details of a single image"""
+ """Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id), self.headers)
body = self._parse_image(etree.fromstring(body))
return resp, body
def delete_image(self, image_id):
- """Deletes the provided image"""
+ """Deletes the provided image."""
return self.delete("images/%s" % str(image_id), self.headers)
def wait_for_image_resp_code(self, image_id, code):
@@ -148,14 +147,14 @@
raise exceptions.TimeoutException
def list_image_metadata(self, image_id):
- """Lists all metadata items for an image"""
+ """Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id),
self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body['metadata']
def set_image_metadata(self, image_id, meta):
- """Sets the metadata for an image"""
+ """Sets the metadata for an image."""
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id),
post_body, self.headers)
@@ -163,7 +162,7 @@
return resp, body['metadata']
def update_image_metadata(self, image_id, meta):
- """Updates the metadata for an image"""
+ """Updates the metadata for an image."""
post_body = Element('metadata', meta)
for k, v in meta:
metadata = Element('meta', key=k)
@@ -177,14 +176,14 @@
return resp, body['metadata']
def get_image_metadata_item(self, image_id, key):
- """Returns the value for a specific image metadata key"""
+ """Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s.xml" %
(str(image_id), key), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body['meta']
def set_image_metadata_item(self, image_id, key, meta):
- """Sets the value for a specific image metadata key"""
+ """Sets the value for a specific image metadata key."""
post_body = json.dumps({'meta': meta})
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body, self.headers)
@@ -192,7 +191,7 @@
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
- """Deletes a single image metadata key/value pair"""
+ """Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" % (str(image_id), key,
self.headers))
return resp, body
diff --git a/tempest/services/compute/xml/limits_client.py b/tempest/services/compute/xml/limits_client.py
index 229dbee..473952b 100644
--- a/tempest/services/compute/xml/limits_client.py
+++ b/tempest/services/compute/xml/limits_client.py
@@ -15,10 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.rest_client import RestClientXML
from lxml import etree
from lxml import objectify
+from tempest.common.rest_client import RestClientXML
+
NS = "{http://docs.openstack.org/common/api/v1.0}"
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index 0e35112..ac70f1b 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -16,6 +16,7 @@
# under the License.
from lxml import etree
+import urllib
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
@@ -43,22 +44,18 @@
return json
def list_security_groups(self, params=None):
- """List all security groups for a user"""
+ """List all security groups for a user."""
url = 'os-security-groups'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
-
- url += '?' + ' &'.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def get_security_group(self, security_group_id):
- """Get the details of a Security Group"""
+ """Get the details of a Security Group."""
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url, self.headers)
body = self._parse_body(etree.fromstring(body))
@@ -81,7 +78,7 @@
return resp, body
def delete_security_group(self, security_group_id):
- """Deletes the provided Security Group"""
+ """Deletes the provided Security Group."""
return self.delete('os-security-groups/%s' %
str(security_group_id), self.headers)
@@ -128,6 +125,6 @@
return resp, body
def delete_security_group_rule(self, group_rule_id):
- """Deletes the provided Security Group rule"""
+ """Deletes the provided Security Group rule."""
return self.delete('os-security-group-rules/%s' %
str(group_rule_id), self.headers)
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index c60175d..4a84646 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -16,15 +16,19 @@
# under the License.
import logging
+import time
+import urllib
+
from lxml import etree
-from tempest import exceptions
+
from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
from tempest.services.compute.xml.common import XMLNS_11
-import time
+
LOG = logging.getLogger(__name__)
@@ -100,7 +104,7 @@
self.service = self.config.compute.catalog_type
def _parse_key_value(self, node):
- """Parse <foo key='key'>value</foo> data into {'key': 'value'}"""
+ """Parse <foo key='key'>value</foo> data into {'key': 'value'}."""
data = {}
for node in node.getchildren():
data[node.get('key')] = node.text
@@ -128,13 +132,13 @@
return json
def get_server(self, server_id):
- """Returns the details of an existing server"""
+ """Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id), self.headers)
server = self._parse_server(etree.fromstring(body))
return resp, server
def delete_server(self, server_id):
- """Deletes the given server"""
+ """Deletes the given server."""
return self.delete("servers/%s" % str(server_id))
def _parse_array(self, node):
@@ -145,24 +149,18 @@
def list_servers(self, params=None):
url = 'servers/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
+ if params:
+ url += '?%s' % urllib.urlencode(params)
- url += "?" + "&".join(param_list)
resp, body = self.get(url, self.headers)
servers = self._parse_array(etree.fromstring(body))
return resp, {"servers": servers}
def list_servers_with_detail(self, params=None):
url = 'servers/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s" % (param, value))
+ if params:
+ url += '?%s' % urllib.urlencode(params)
- url += "?" + "&".join(param_list)
resp, body = self.get(url, self.headers)
servers = self._parse_array(etree.fromstring(body))
return resp, {"servers": servers}
@@ -244,7 +242,7 @@
return resp, server
def wait_for_server_status(self, server_id, status):
- """Waits for a server to reach a given status"""
+ """Waits for a server to reach a given status."""
resp, body = self.get_server(server_id)
server_status = body['status']
start = int(time.time())
@@ -267,7 +265,7 @@
raise exceptions.TimeoutException(message)
def wait_for_server_termination(self, server_id, ignore_error=False):
- """Waits for server to reach termination"""
+ """Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
@@ -292,7 +290,7 @@
return {node.get('id'): addrs}
def list_addresses(self, server_id):
- """Lists all addresses for a server"""
+ """Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id), self.headers)
networks = {}
@@ -303,7 +301,7 @@
return resp, networks
def list_addresses_by_network(self, server_id, network_id):
- """Lists all addresses of a specific network type for a server"""
+ """Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" % (str(server_id),
network_id),
self.headers)
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index 0fbc070..60ef398 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -16,15 +16,17 @@
# under the License.
import time
+import urllib
+
from lxml import etree
-from tempest import exceptions
from tempest.common.rest_client import RestClientXML
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import xml_to_json
+from tempest.services.compute.xml.common import XMLNS_11
class VolumesExtensionsClientXML(RestClientXML):
@@ -52,7 +54,7 @@
return vol
def list_volumes(self, params=None):
- """List all the volumes created"""
+ """List all the volumes created."""
url = 'os-volumes'
if params:
@@ -66,7 +68,7 @@
return resp, volumes
def list_volumes_with_detail(self, params=None):
- """List all the details of volumes"""
+ """List all the details of volumes."""
url = 'os-volumes/detail'
if params:
@@ -80,7 +82,7 @@
return resp, volumes
def get_volume(self, volume_id, wait=None):
- """Returns the details of a single volume"""
+ """Returns the details of a single volume."""
url = "os-volumes/%s" % str(volume_id)
resp, body = self.get(url, self.headers, wait=wait)
body = etree.fromstring(body)
@@ -114,11 +116,11 @@
return resp, body
def delete_volume(self, volume_id):
- """Deletes the Specified Volume"""
+ """Deletes the Specified Volume."""
return self.delete("os-volumes/%s" % str(volume_id))
def wait_for_volume_status(self, volume_id, status):
- """Waits for a Volume to reach a given status"""
+ """Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
volume_name = body['displayName']
volume_status = body['status']
diff --git a/tempest/services/identity/json/admin_client.py b/tempest/services/identity/json/admin_client.py
index 9e171b6..c4e6c95 100644
--- a/tempest/services/identity/json/admin_client.py
+++ b/tempest/services/identity/json/admin_client.py
@@ -1,8 +1,9 @@
-from tempest.common.rest_client import RestClient
-from tempest import exceptions
import httplib2
import json
+from tempest.common.rest_client import RestClient
+from tempest import exceptions
+
class AdminClientJSON(RestClient):
@@ -24,7 +25,7 @@
return self._has_admin_extensions
def create_role(self, name):
- """Create a role"""
+ """Create a role."""
post_body = {
'name': name,
}
@@ -51,19 +52,19 @@
return resp, body['tenant']
def delete_role(self, role_id):
- """Delete a role"""
+ """Delete a role."""
resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
return resp, body
def list_user_roles(self, tenant_id, user_id):
- """Returns a list of roles assigned to a user for a tenant"""
+ """Returns a list of roles assigned to a user for a tenant."""
url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['roles']
def assign_user_role(self, tenant_id, user_id, role_id):
- """Add roles to a user on a tenant"""
+ """Add roles to a user on a tenant."""
post_body = json.dumps({})
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), post_body,
@@ -72,29 +73,29 @@
return resp, body['role']
def remove_user_role(self, tenant_id, user_id, role_id):
- """Removes a role assignment for a user on a tenant"""
+ """Removes a role assignment for a user on a tenant."""
return self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id))
def delete_tenant(self, tenant_id):
- """Delete a tenant"""
+ """Delete a tenant."""
resp, body = self.delete('tenants/%s' % str(tenant_id))
return resp, body
def get_tenant(self, tenant_id):
- """Get tenant details"""
+ """Get tenant details."""
resp, body = self.get('tenants/%s' % str(tenant_id))
body = json.loads(body)
return resp, body['tenant']
def list_roles(self):
- """Returns roles"""
+ """Returns roles."""
resp, body = self.get('OS-KSADM/roles')
body = json.loads(body)
return resp, body['roles']
def list_tenants(self):
- """Returns tenants"""
+ """Returns tenants."""
resp, body = self.get('tenants')
body = json.loads(body)
return resp, body['tenants']
@@ -107,7 +108,7 @@
raise exceptions.NotFound('No such tenant')
def update_tenant(self, tenant_id, **kwargs):
- """Updates a tenant"""
+ """Updates a tenant."""
resp, body = self.get_tenant(tenant_id)
name = kwargs.get('name', body['name'])
desc = kwargs.get('description', body['description'])
@@ -125,7 +126,7 @@
return resp, body['tenant']
def create_user(self, name, password, tenant_id, email):
- """Create a user"""
+ """Create a user."""
post_body = {
'name': name,
'password': password,
@@ -138,18 +139,18 @@
return resp, body['user']
def delete_user(self, user_id):
- """Delete a user"""
+ """Delete a user."""
resp, body = self.delete("users/%s" % user_id)
return resp, body
def get_users(self):
- """Get the list of users"""
+ """Get the list of users."""
resp, body = self.get("users")
body = json.loads(body)
return resp, body['users']
def enable_disable_user(self, user_id, enabled):
- """Enables or disables a user"""
+ """Enables or disables a user."""
put_body = {
'enabled': enabled
}
@@ -160,12 +161,12 @@
return resp, body
def delete_token(self, token_id):
- """Delete a token"""
+ """Delete a token."""
resp, body = self.delete("tokens/%s" % token_id)
return resp, body
def list_users_for_tenant(self, tenant_id):
- """List users for a Tenant"""
+ """List users for a Tenant."""
resp, body = self.get('/tenants/%s/users' % tenant_id)
body = json.loads(body)
return resp, body['users']
@@ -178,25 +179,26 @@
raise exceptions.NotFound('No such user')
def create_service(self, name, type, **kwargs):
- """Create a service"""
+ """Create a service."""
post_body = {
- 'name': name,
- 'type': type,
- 'description': kwargs.get('description')}
+ 'name': name,
+ 'type': type,
+ 'description': kwargs.get('description')
+ }
post_body = json.dumps({'OS-KSADM:service': post_body})
resp, body = self.post('/OS-KSADM/services', post_body, self.headers)
body = json.loads(body)
return resp, body['OS-KSADM:service']
def get_service(self, service_id):
- """Get Service"""
+ """Get Service."""
url = '/OS-KSADM/services/%s' % service_id
resp, body = self.get(url)
body = json.loads(body)
return resp, body['OS-KSADM:service']
def delete_service(self, service_id):
- """Delete Service"""
+ """Delete Service."""
url = '/OS-KSADM/services/%s' % service_id
return self.delete(url)
diff --git a/tempest/services/identity/xml/admin_client.py b/tempest/services/identity/xml/admin_client.py
index 0ace184..8448ae0 100644
--- a/tempest/services/identity/xml/admin_client.py
+++ b/tempest/services/identity/xml/admin_client.py
@@ -15,17 +15,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import httplib2
+import json
import logging
+
from lxml import etree
+
from tempest.common.rest_client import RestClient
from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
-from tempest import exceptions
-import httplib2
-import json
+
XMLNS = "http://docs.openstack.org/identity/api/v2.0"
@@ -60,7 +63,7 @@
return self._has_admin_extensions
def create_role(self, name):
- """Create a role"""
+ """Create a role."""
create_role = Element("role", xmlns=XMLNS, name=name)
resp, body = self.post('OS-KSADM/roles', str(Document(create_role)),
self.headers)
@@ -86,55 +89,55 @@
return resp, body
def delete_role(self, role_id):
- """Delete a role"""
+ """Delete a role."""
resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id),
self.headers)
return resp, body
def list_user_roles(self, tenant_id, user_id):
- """Returns a list of roles assigned to a user for a tenant"""
+ """Returns a list of roles assigned to a user for a tenant."""
url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
resp, body = self.get(url, self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def assign_user_role(self, tenant_id, user_id, role_id):
- """Add roles to a user on a tenant"""
+ """Add roles to a user on a tenant."""
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), '', self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def remove_user_role(self, tenant_id, user_id, role_id):
- """Removes a role assignment for a user on a tenant"""
+ """Removes a role assignment for a user on a tenant."""
return self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), self.headers)
def delete_tenant(self, tenant_id):
- """Delete a tenant"""
+ """Delete a tenant."""
resp, body = self.delete('tenants/%s' % str(tenant_id), self.headers)
return resp, body
def get_tenant(self, tenant_id):
- """Get tenant details"""
+ """Get tenant details."""
resp, body = self.get('tenants/%s' % str(tenant_id), self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def list_roles(self):
- """Returns roles"""
+ """Returns roles."""
resp, body = self.get('OS-KSADM/roles', self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def list_tenants(self):
- """Returns tenants"""
+ """Returns tenants."""
resp, body = self.get('tenants', self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def update_tenant(self, tenant_id, **kwargs):
- """Updates a tenant"""
+ """Updates a tenant."""
resp, body = self.get_tenant(tenant_id)
name = kwargs.get('name', body['name'])
desc = kwargs.get('description', body['description'])
@@ -153,7 +156,7 @@
return resp, body
def create_user(self, name, password, tenant_id, email):
- """Create a user"""
+ """Create a user."""
create_user = Element("user",
xmlns=XMLNS,
name=name,
@@ -166,18 +169,18 @@
return resp, body
def delete_user(self, user_id):
- """Delete a user"""
+ """Delete a user."""
resp, body = self.delete("users/%s" % user_id, self.headers)
return resp, body
def get_users(self):
- """Get the list of users"""
+ """Get the list of users."""
resp, body = self.get("users", self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def enable_disable_user(self, user_id, enabled):
- """Enables or disables a user"""
+ """Enables or disables a user."""
enable_user = Element("user", enabled=str(enabled).lower())
resp, body = self.put('users/%s/enabled' % user_id,
str(Document(enable_user)), self.headers)
@@ -185,18 +188,18 @@
return resp, body
def delete_token(self, token_id):
- """Delete a token"""
+ """Delete a token."""
resp, body = self.delete("tokens/%s" % token_id, self.headers)
return resp, body
def list_users_for_tenant(self, tenant_id):
- """List users for a Tenant"""
+ """List users for a Tenant."""
resp, body = self.get('/tenants/%s/users' % tenant_id, self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def create_service(self, name, type, **kwargs):
- """Create a service"""
+ """Create a service."""
OS_KSADM = "http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0"
create_service = Element("service",
xmlns=OS_KSADM,
@@ -210,14 +213,14 @@
return resp, body
def get_service(self, service_id):
- """Get Service"""
+ """Get Service."""
url = '/OS-KSADM/services/%s' % service_id
resp, body = self.get(url, self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
def delete_service(self, service_id):
- """Delete Service"""
+ """Delete Service."""
url = '/OS-KSADM/services/%s' % service_id
return self.delete(url, self.headers)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 4eaeb34..26f8329 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -16,6 +16,7 @@
# under the License.
import json
+import urllib
from tempest.common.rest_client import RestClient
@@ -39,7 +40,7 @@
def create_account_metadata(self, metadata,
metadata_prefix='X-Account-Meta-'):
- """Creates an account metadata entry"""
+ """Creates an account metadata entry."""
headers = {}
for key in metadata:
headers[metadata_prefix + key] = metadata[key]
@@ -81,11 +82,9 @@
DEFAULT: Python-List returned in response body
"""
- param_list = ['format=%s&' % self.format]
- if params is not None:
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
- url = '?' + ''.join(param_list)
+ url = '?format=%s' % self.format
+ if params:
+ url += '&%s' + urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index bb08de2..7b5efff 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -16,6 +16,7 @@
# under the License.
import json
+import urllib
from tempest.common.rest_client import RestClient
@@ -36,7 +37,7 @@
Creates a container, with optional metadata passed in as a
dictonary
"""
- url = container_name
+ url = str(container_name)
headers = {}
if metadata is not None:
@@ -47,15 +48,15 @@
return resp, body
def delete_container(self, container_name):
- """Deletes the container (if it's empty)"""
- url = container_name
+ """Deletes the container (if it's empty)."""
+ url = str(container_name)
resp, body = self.delete(url)
return resp, body
def update_container_metadata(self, container_name, metadata,
metadata_prefix='X-Container-Meta-'):
- """Updates arbitrary metadata on container"""
- url = container_name
+ """Updates arbitrary metadata on container."""
+ url = str(container_name)
headers = {}
if metadata is not None:
@@ -67,8 +68,8 @@
def delete_container_metadata(self, container_name, metadata,
metadata_prefix='X-Remove-Container-Meta-'):
- """Deletes arbitrary metadata on container"""
- url = container_name
+ """Deletes arbitrary metadata on container."""
+ url = str(container_name)
headers = {}
if metadata is not None:
@@ -82,7 +83,7 @@
"""
Retrieves container metadata headers
"""
- url = container_name
+ url = str(container_name)
headers = {"X-Storage-Token": self.token}
resp, body = self.head(url, headers=headers)
return resp, body
@@ -93,8 +94,8 @@
item count is beyond 10,000 item listing limit.
Does not require any paramaters aside from container name.
"""
- #TODO: Rewite using json format to avoid newlines at end of obj names
- #Set limit to API limit - 1 (max returned items = 9999)
+ #TODO(dwalleck): Rewite using json format to avoid newlines at end of
+ #obj names. Set limit to API limit - 1 (max returned items = 9999)
limit = 9999
marker = None
if params is not None:
@@ -162,11 +163,9 @@
"""
url = str(container)
- param_list = ['format=%s&' % self.format]
- if params is not None:
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
- url += '?' + ''.join(param_list)
+ url += '?format=%s' % self.format
+ if params:
+ url += '&%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 440d043..c05c905 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -15,8 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import httplib2
+import json
import re
from tempest.common.rest_client import RestClient
+from tempest import exceptions
class ObjectClient(RestClient):
@@ -27,25 +30,25 @@
self.service = self.config.object_storage.catalog_type
def create_object(self, container, object_name, data):
- """Create storage object"""
+ """Create storage object."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.put(url, data, self.headers)
return resp, body
def update_object(self, container, object_name, data):
- """Upload data to replace current storage object"""
+ """Upload data to replace current storage object."""
return create_object(container, object_name, data)
def delete_object(self, container, object_name):
- """Delete storage object"""
+ """Delete storage object."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.delete(url)
return resp, body
def update_object_metadata(self, container, object_name, metadata,
metadata_prefix='X-Object-Meta-'):
- """Add, remove, or change X-Object-Meta metadata for storage object"""
+ """Add, remove, or change X-Object-Meta metadata for storage object."""
headers = {}
for key in metadata:
@@ -56,7 +59,7 @@
return resp, body
def list_object_metadata(self, container, object_name):
- """List all storage object X-Object-Meta- metadata"""
+ """List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.head(url)
@@ -69,9 +72,9 @@
resp, body = self.get(url)
return resp, body
- def copy_object(self, container, src_object_name, dest_object_name,
- metadata=None):
- """Copy storage object's data to the new object using PUT"""
+ def copy_object_in_same_container(self, container, src_object_name,
+ dest_object_name, metadata=None):
+ """Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(container, dest_object_name)
headers = {}
@@ -85,9 +88,26 @@
resp, body = self.put(url, None, headers=headers)
return resp, body
+ def copy_object_across_containers(self, src_container, src_object_name,
+ dst_container, dst_object_name,
+ metadata=None):
+ """Copy storage object's data to the new object using PUT."""
+
+ url = "{0}/{1}".format(dst_container, dst_object_name)
+ headers = {}
+ headers['X-Copy-From'] = "%s/%s" % (str(src_container),
+ str(src_object_name))
+ headers['content-length'] = '0'
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
+
+ resp, body = self.put(url, None, headers=headers)
+ return resp, body
+
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
- """Copy storage object's data to the new object using COPY"""
+ """Copy storage object's data to the new object using COPY."""
url = "{0}/{1}".format(container, src_object_name)
headers = {}
@@ -99,3 +119,62 @@
resp, body = self.copy(url, headers=headers)
return resp, body
+
+
+class ObjectClientCustomizedHeader(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(ObjectClientCustomizedHeader, self).__init__(config, username,
+ password, auth_url,
+ tenant_name)
+ #Overwrites json-specific header encoding in RestClient
+ self.service = self.config.object_storage.catalog_type
+ self.format = 'json'
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """A simple HTTP request interface."""
+ self.http_obj = httplib2.Http()
+ if headers is None:
+ headers = {}
+ if self.base_url is None:
+ self._set_auth()
+
+ req_url = "%s/%s" % (self.base_url, url)
+ resp, resp_body = self.http_obj.request(req_url, method,
+ headers=headers, body=body)
+
+ if resp.status == 401 or resp.status == 403:
+ self._log(req_url, body, resp, resp_body)
+ raise exceptions.Unauthorized()
+
+ return resp, resp_body
+
+ def get_object(self, container, object_name, metadata=None):
+ """Retrieve object's data."""
+ headers = {}
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
+
+ url = "{0}/{1}".format(container, object_name)
+ resp, body = self.get(url, headers=headers)
+ return resp, body
+
+ def create_object(self, container, object_name, data, metadata=None):
+ """Create storage object."""
+
+ headers = {}
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
+
+ url = "%s/%s" % (str(container), str(object_name))
+ resp, body = self.put(url, data, headers=headers)
+ return resp, body
+
+ def delete_object(self, container, object_name):
+ """Delete storage object."""
+
+ url = "%s/%s" % (str(container), str(object_name))
+ resp, body = self.delete(url)
+ return resp, body
diff --git a/tempest/services/volume/json/admin/__init__.py b/tempest/services/volume/json/admin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/volume/json/admin/__init__.py
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
new file mode 100644
index 0000000..0cadcb5
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -0,0 +1,124 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common.rest_client import RestClient
+
+
+class VolumeTypesClientJSON(RestClient):
+ """
+ Client class to send CRUD Volume Types API requests to a Cinder endpoint
+ """
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(VolumeTypesClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
+
+ self.service = self.config.volume.catalog_type
+ self.build_interval = self.config.volume.build_interval
+ self.build_timeout = self.config.volume.build_timeout
+
+ def list_volume_types(self, params=None):
+ """List all the volume_types created."""
+ url = 'types'
+ if params is not None:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['volume_types']
+
+ def get_volume_type(self, volume_id):
+ """Returns the details of a single volume_type."""
+ url = "types/%s" % str(volume_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['volume_type']
+
+ def create_volume_type(self, name, **kwargs):
+ """
+ Creates a new Volume_type.
+ name(Required): Name of volume_type.
+ Following optional keyword arguments are accepted:
+ extra_specs: A dictionary of values to be used as extra_specs.
+ """
+ post_body = {
+ 'name': name,
+ 'extra_specs': kwargs.get('extra_specs'),
+ }
+
+ post_body = json.dumps({'volume_type': post_body})
+ resp, body = self.post('types', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['volume_type']
+
+ def delete_volume_type(self, volume_id):
+ """Deletes the Specified Volume_type."""
+ return self.delete("types/%s" % str(volume_id))
+
+ def list_volume_types_extra_specs(self, vol_type_id, params=None):
+ """List all the volume_types extra specs created."""
+ url = 'types/%s/extra_specs' % str(vol_type_id)
+ if params is not None:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['extra_specs']
+
+ def get_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
+ """Returns the details of a single volume_type extra spec."""
+ url = "types/%s/extra_specs/%s" % (str(vol_type_id),
+ str(extra_spec_name))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body
+
+ def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
+ """
+ Creates a new Volume_type extra spec.
+ vol_type_id: Id of volume_type.
+ extra_specs: A dictionary of values to be used as extra_specs.
+ """
+ url = "types/%s/extra_specs" % str(vol_type_id)
+ post_body = json.dumps({'extra_specs': extra_spec})
+ resp, body = self.post(url, post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['extra_specs']
+
+ def delete_volume_type_extra_specs(self, vol_id, extra_spec_name):
+ """Deletes the Specified Volume_type extra spec."""
+ return self.delete("types/%s/extra_specs/%s" % ((str(vol_id)),
+ str(extra_spec_name)))
+
+ def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
+ extra_spec):
+ """
+ Update a volume_type extra spec.
+ vol_type_id: Id of volume_type.
+ extra_spec_name: Name of the extra spec to be updated.
+ extra_spec: A dictionary of with key as extra_spec_name and the
+ updated value.
+ """
+ url = "types/%s/extra_specs/%s" % (str(vol_type_id),
+ str(extra_spec_name))
+ put_body = json.dumps(extra_spec)
+ resp, body = self.put(url, put_body, self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index 28dae4e..cc5a115 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -17,6 +17,7 @@
import json
import time
+import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
@@ -36,34 +37,27 @@
self.build_timeout = self.config.volume.build_timeout
def list_volumes(self, params=None):
- """List all the volumes created"""
+ """List all the volumes created."""
url = 'volumes'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
+ if params:
+ url += '?%s' % urllib.urlencode(params)
- url += '?' + ' '.join(param_list)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['volumes']
def list_volumes_with_detail(self, params=None):
- """List the details of all volumes"""
+ """List the details of all volumes."""
url = 'volumes/detail'
- if params is not None:
- param_list = []
- for param, value in params.iteritems():
- param_list.append("%s=%s&" % (param, value))
-
- url = '?' + ' '.join(param_list)
+ if params:
+ url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['volumes']
def get_volume(self, volume_id, wait=None):
- """Returns the details of a single volume"""
+ """Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url, wait=wait)
body = json.loads(body)
@@ -76,11 +70,13 @@
Following optional keyword arguments are accepted:
display_name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
+ volume_type: Optional Name of volume_type for the volume
"""
post_body = {
'size': size,
'display_name': kwargs.get('display_name'),
'metadata': kwargs.get('metadata'),
+ 'volume_type': kwargs.get('volume_type')
}
post_body = json.dumps({'volume': post_body})
@@ -89,11 +85,11 @@
return resp, body['volume']
def delete_volume(self, volume_id):
- """Deletes the Specified Volume"""
+ """Deletes the Specified Volume."""
return self.delete("volumes/%s" % str(volume_id))
def attach_volume(self, volume_id, instance_uuid, mountpoint):
- """Attaches a volume to a given instance on a given mountpoint"""
+ """Attaches a volume to a given instance on a given mountpoint."""
post_body = {
'instance_uuid': instance_uuid,
'mountpoint': mountpoint,
@@ -104,7 +100,7 @@
return resp, body
def detach_volume(self, volume_id):
- """Detaches a volume from an instance"""
+ """Detaches a volume from an instance."""
post_body = {}
post_body = json.dumps({'os-detach': post_body})
url = 'volumes/%s/action' % (volume_id)
@@ -112,7 +108,7 @@
return resp, body
def wait_for_volume_status(self, volume_id, status):
- """Waits for a Volume to reach a given status"""
+ """Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
volume_name = body['display_name']
volume_status = body['status']
diff --git a/tempest/services/volume/xml/admin/__init__.py b/tempest/services/volume/xml/admin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/volume/xml/admin/__init__.py
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
new file mode 100644
index 0000000..74d4631
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -0,0 +1,195 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+from tempest.services.compute.xml.common import XMLNS_11
+
+
+class VolumeTypesClientXML(RestClientXML):
+ """
+ Client class to send CRUD Volume Types API requests to a Cinder endpoint
+ """
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(VolumeTypesClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.volume.catalog_type
+ self.build_interval = self.config.compute.build_interval
+ self.build_timeout = self.config.compute.build_timeout
+
+ def _parse_volume_type(self, body):
+ vol_type = dict((attr, body.get(attr)) for attr in body.keys())
+
+ for child in body.getchildren():
+ tag = child.tag
+ if tag.startswith("{"):
+ ns, tag = tag.split("}", 1)
+ if tag == 'extra_specs':
+ vol_type['extra_specs'] = dict((meta.get('key'),
+ meta.text)
+ for meta in list(child))
+ else:
+ vol_type[tag] = xml_to_json(child)
+ return vol_type
+
+ def _parse_volume_type_extra_specs(self, body):
+ extra_spec = dict((attr, body.get(attr)) for attr in body.keys())
+
+ for child in body.getchildren():
+ tag = child.tag
+ if tag.startswith("{"):
+ ns, tag = tag.split("}", 1)
+ else:
+ extra_spec[tag] = xml_to_json(child)
+ return extra_spec
+
+ def list_volume_types(self, params=None):
+ """List all the volume_types created."""
+ url = 'types'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url, self.headers)
+ body = etree.fromstring(body)
+ volume_types = []
+ if body is not None:
+ volume_types += [self._parse_volume_type(vol)
+ for vol in list(body)]
+ return resp, volume_types
+
+ def get_volume_type(self, type_id):
+ """Returns the details of a single volume_type."""
+ url = "types/%s" % str(type_id)
+ resp, body = self.get(url, self.headers)
+ body = etree.fromstring(body)
+ return resp, self._parse_volume_type(body)
+
+ def create_volume_type(self, name, **kwargs):
+ """
+ Creates a new Volume_type.
+ name(Required): Name of volume_type.
+ Following optional keyword arguments are accepted:
+ extra_specs: A dictionary of values to be used as extra_specs.
+ """
+ vol_type = Element("volume_type", xmlns=XMLNS_11)
+ if name:
+ vol_type.add_attr('name', name)
+
+ extra_specs = kwargs.get('extra_specs')
+ if extra_specs:
+ _extra_specs = Element('extra_specs')
+ vol_type.append(_extra_specs)
+ for key, value in extra_specs.items():
+ spec = Element('extra_spec')
+ spec.add_attr('key', key)
+ spec.append(Text(value))
+ _extra_specs.append(spec)
+
+ resp, body = self.post('types', str(Document(vol_type)),
+ self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def delete_volume_type(self, type_id):
+ """Deletes the Specified Volume_type."""
+ return self.delete("types/%s" % str(type_id))
+
+ def list_volume_types_extra_specs(self, vol_type_id, params=None):
+ """List all the volume_types extra specs created."""
+ url = 'types/%s/extra_specs' % str(vol_type_id)
+
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url, self.headers)
+ body = etree.fromstring(body)
+ extra_specs = []
+ if body is not None:
+ extra_specs += [self._parse_volume_type_extra_specs(spec)
+ for spec in list(body)]
+ return resp, extra_specs
+
+ def get_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
+ """Returns the details of a single volume_type extra spec."""
+ url = "types/%s/extra_specs/%s" % (str(vol_type_id),
+ str(extra_spec_name))
+ resp, body = self.get(url, self.headers)
+ body = etree.fromstring(body)
+ return resp, self._parse_volume_type_extra_specs(body)
+
+ def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
+ """
+ Creates a new Volume_type extra spec.
+ vol_type_id: Id of volume_type.
+ extra_specs: A dictionary of values to be used as extra_specs.
+ """
+ url = "types/%s/extra_specs" % str(vol_type_id)
+ extra_specs = Element("extra_specs", xmlns=XMLNS_11)
+ if extra_spec:
+ for key, value in extra_spec.items():
+ spec = Element('extra_spec')
+ spec.add_attr('key', key)
+ spec.append(Text(value))
+ extra_specs.append(spec)
+
+ resp, body = self.post(url, str(Document(extra_specs)),
+ self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def delete_volume_type_extra_specs(self, vol_id, extra_spec_name):
+ """Deletes the Specified Volume_type extra spec."""
+ return self.delete("types/%s/extra_specs/%s" % ((str(vol_id)),
+ str(extra_spec_name)))
+
+ def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
+ extra_spec):
+ """
+ Update a volume_type extra spec.
+ vol_type_id: Id of volume_type.
+ extra_spec_name: Name of the extra spec to be updated.
+ extra_spec: A dictionary of with key as extra_spec_name and the
+ updated value.
+ """
+ url = "types/%s/extra_specs/%s" % (str(vol_type_id),
+ str(extra_spec_name))
+ extra_specs = Element("extra_specs", xmlns=XMLNS_11)
+ for key, value in extra_spec.items():
+ spec = Element('extra_spec')
+ spec.add_attr('key', key)
+ spec.append(Text(value))
+ extra_specs.append(spec)
+ resp, body = self.put(url, str(Document(extra_specs)),
+ self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def is_resource_deleted(self, id):
+ try:
+ self.get_volume_type(id)
+ except exceptions.NotFound:
+ return True
+ return False
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 9d2f159..b0104e0 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -16,16 +16,17 @@
# under the License.
import time
+import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
+from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import xml_to_json
+from tempest.services.compute.xml.common import XMLNS_11
class VolumesClientXML(RestClientXML):
@@ -55,7 +56,7 @@
return vol
def list_volumes(self, params=None):
- """List all the volumes created"""
+ """List all the volumes created."""
url = 'volumes'
if params:
@@ -69,7 +70,7 @@
return resp, volumes
def list_volumes_with_detail(self, params=None):
- """List all the details of volumes"""
+ """List all the details of volumes."""
url = 'volumes/detail'
if params:
@@ -83,7 +84,7 @@
return resp, volumes
def get_volume(self, volume_id, wait=None):
- """Returns the details of a single volume"""
+ """Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url, self.headers, wait=wait)
body = etree.fromstring(body)
@@ -115,11 +116,11 @@
return resp, body
def delete_volume(self, volume_id):
- """Deletes the Specified Volume"""
+ """Deletes the Specified Volume."""
return self.delete("volumes/%s" % str(volume_id))
def wait_for_volume_status(self, volume_id, status):
- """Waits for a Volume to reach a given status"""
+ """Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
volume_name = body['displayName']
volume_status = body['status']
diff --git a/tempest/smoke.py b/tempest/smoke.py
index c929273..1e7da8e 100644
--- a/tempest/smoke.py
+++ b/tempest/smoke.py
@@ -51,15 +51,35 @@
# order, and because test methods in smoke tests generally create
# resources in a particular order, we destroy resources in the reverse
# order in which resources are added to the smoke test class object
- if not cls.resources:
- return
- thing = cls.resources.pop()
- while True:
+ while cls.resources:
+ thing = cls.resources.pop()
LOG.debug("Deleting %r from shared resources of %s" %
(thing, cls.__name__))
- # Resources in novaclient all have a delete() method
- # which destroys the resource...
- thing.delete()
- if not cls.resources:
- return
- thing = cls.resources.pop()
+
+ try:
+ # OpenStack resources are assumed to have a delete()
+ # method which destroys the resource...
+ thing.delete()
+ except Exception as e:
+ # If the resource is already missing, mission accomplished.
+ if e.__class__.__name__ == 'NotFound':
+ continue
+ raise
+
+ def is_deletion_complete():
+ # Deletion testing is only required for objects whose
+ # existence cannot be checked via retrieval.
+ if isinstance(thing, dict):
+ return True
+ try:
+ thing.get()
+ except Exception as e:
+ # Clients are expected to return an exception
+ # called 'NotFound' if retrieval fails.
+ if e.__class__.__name__ == 'NotFound':
+ return True
+ raise
+ return False
+
+ # Block until resource deletion has completed or timed-out
+ test.call_until_true(is_deletion_complete, 10, 1)
diff --git a/tempest/testboto.py b/tempest/testboto.py
index 6c51346..c38bf99 100644
--- a/tempest/testboto.py
+++ b/tempest/testboto.py
@@ -15,21 +15,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest2 as unittest
-import nose
-import tempest.tests.boto
-from tempest.exceptions import TearDownException
-from tempest.tests.boto.utils.wait import state_wait, wait_no_exception
-from tempest.tests.boto.utils.wait import re_search_wait, wait_exception
-import boto
-from boto.s3.key import Key
-from boto.s3.bucket import Bucket
-from boto.exception import BotoServerError
from contextlib import closing
-import re
import logging
+import re
import time
+import boto
+from boto.exception import BotoServerError
+from boto.s3.bucket import Bucket
+from boto.s3.key import Key
+import nose
+import unittest2 as unittest
+
+from tempest.exceptions import TearDownException
+import tempest.tests.boto
+from tempest.tests.boto.utils.wait import re_search_wait
+from tempest.tests.boto.utils.wait import state_wait
+from tempest.tests.boto.utils.wait import wait_exception
+from tempest.tests.boto.utils.wait import wait_no_exception
+
LOG = logging.getLogger(__name__)
@@ -118,7 +122,7 @@
class BotoTestCase(unittest.TestCase):
- """Recommended to use as base class for boto related test"""
+ """Recommended to use as base class for boto related test."""
@classmethod
def setUpClass(cls):
# The trash contains cleanup functions and paramaters in tuples
@@ -144,7 +148,7 @@
@classmethod
def cancelResourceCleanUp(cls, key):
- """Cancel Clean up request"""
+ """Cancel Clean up request."""
del cls._resource_trash_bin[key]
#TODO(afazekas): Add "with" context handling
@@ -289,7 +293,7 @@
@classmethod
def destroy_bucket(cls, connection_data, bucket):
- """Destroys the bucket and its content, just for teardown"""
+ """Destroys the bucket and its content, just for teardown."""
exc_num = 0
try:
with closing(boto.connect_s3(**connection_data)) as conn:
@@ -312,7 +316,7 @@
@classmethod
def destroy_reservation(cls, reservation):
- """Terminate instances in a reservation, just for teardown"""
+ """Terminate instances in a reservation, just for teardown."""
exc_num = 0
def _instance_state():
@@ -379,7 +383,7 @@
@classmethod
def destroy_snapshot_wait(cls, snapshot):
- """delete snaphot, wait until not exists"""
+ """delete snaphot, wait until not exists."""
snapshot.delete()
def _update():
diff --git a/tempest/tests/boto/__init__.py b/tempest/tests/boto/__init__.py
index 11fa077..62918c2 100644
--- a/tempest/tests/boto/__init__.py
+++ b/tempest/tests/boto/__init__.py
@@ -15,16 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tempest.config
-from tempest.common.utils.file_utils import have_effective_read_access
-import os
-import tempest.openstack
-import re
-import keystoneclient.exceptions
-import boto.exception
import logging
+import os
+import re
import urlparse
+import boto.exception
+import keystoneclient.exceptions
+
+import tempest.clients
+from tempest.common.utils.file_utils import have_effective_read_access
+import tempest.config
+
A_I_IMAGES_READY = False # ari,ami,aki
S3_CAN_CONNECT_ERROR = "Unknown Error"
EC2_CAN_CONNECT_ERROR = "Unknown Error"
@@ -57,7 +59,7 @@
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
raise Exception("Invalid AWS secret Key")
raise Exception("Unknown (Authentication?) Error")
- openstack = tempest.openstack.Manager()
+ openstack = tempest.clients.Manager()
try:
if urlparse.urlparse(config.boto.ec2_url).hostname is None:
raise Exception("Failed to get hostname from the ec2_url")
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index e5c61fb..95ef23c 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -15,21 +15,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-import nose
-from nose.plugins.attrib import attr
-import unittest2 as unittest
-from tempest.testboto import BotoTestCase
-from tempest.tests.boto.utils.s3 import s3_upload_dir
-import tempest.tests.boto
-from tempest.common.utils.data_utils import rand_name
-from tempest.exceptions import EC2RegisterImageException
-from tempest.tests.boto.utils.wait import state_wait, re_search_wait
-from tempest import openstack
-from tempest.common.utils.linux.remote_client import RemoteClient
-from boto.s3.key import Key
from contextlib import closing
import logging
+from boto.s3.key import Key
+import nose
+from nose.plugins.attrib import attr
+import unittest2 as unittest
+
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.exceptions import EC2RegisterImageException
+from tempest.testboto import BotoTestCase
+import tempest.tests.boto
+from tempest.tests.boto.utils.s3 import s3_upload_dir
+from tempest.tests.boto.utils.wait import re_search_wait
+from tempest.tests.boto.utils.wait import state_wait
LOG = logging.getLogger(__name__)
@@ -43,7 +45,7 @@
if not tempest.tests.boto.A_I_IMAGES_READY:
raise nose.SkipTest("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
config = cls.os.config
@@ -91,7 +93,7 @@
@attr(type='smoke')
def test_run_stop_terminate_instance(self):
- """EC2 run, stop and terminate instance"""
+ # EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
@@ -120,7 +122,7 @@
@attr(type='smoke')
def test_run_terminate_instance(self):
- """EC2 run, terminate immediately"""
+ # EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
@@ -137,7 +139,7 @@
# with normal validation it would fail
@attr("slow", type='smoke')
def test_integration_1(self):
- """EC2 1. integration test (not strict)"""
+ # EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = rand_name("securitygroup-")
group_desc = sec_group_name + " security group description "
diff --git a/tempest/tests/boto/test_ec2_keys.py b/tempest/tests/boto/test_ec2_keys.py
index 79d0b2b..fcec02d 100644
--- a/tempest/tests/boto/test_ec2_keys.py
+++ b/tempest/tests/boto/test_ec2_keys.py
@@ -15,12 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest.testboto import BotoTestCase
+
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest import openstack
+from tempest.testboto import BotoTestCase
def compare_key_pairs(a, b):
@@ -34,12 +34,12 @@
@classmethod
def setUpClass(cls):
super(EC2KeysTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
@attr(type='smoke')
def test_create_ec2_keypair(self):
- """EC2 create KeyPair"""
+ # EC2 create KeyPair
key_name = rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
@@ -49,7 +49,7 @@
@attr(type='smoke')
@unittest.skip("Skipped until the Bug #1072318 is resolved")
def test_delete_ec2_keypair(self):
- """EC2 delete KeyPair"""
+ # EC2 delete KeyPair
key_name = rand_name("keypair-")
self.client.create_key_pair(key_name)
self.client.delete_key_pair(key_name)
@@ -57,7 +57,7 @@
@attr(type='smoke')
def test_get_ec2_keypair(self):
- """EC2 get KeyPair"""
+ # EC2 get KeyPair
key_name = rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
@@ -67,7 +67,7 @@
@attr(type='smoke')
@unittest.skip("Skipped until the Bug #1072762 is resolved")
def test_duplicate_ec2_keypair(self):
- """EC2 duplicate KeyPair"""
+ # EC2 duplicate KeyPair
key_name = rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
diff --git a/tempest/tests/boto/test_ec2_network.py b/tempest/tests/boto/test_ec2_network.py
index accf677..27649e6 100644
--- a/tempest/tests/boto/test_ec2_network.py
+++ b/tempest/tests/boto/test_ec2_network.py
@@ -17,8 +17,9 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
+
+from tempest import clients
from tempest.testboto import BotoTestCase
-from tempest import openstack
@attr("EC2")
@@ -27,14 +28,14 @@
@classmethod
def setUpClass(cls):
super(EC2NetworkTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
#Note(afazekas): these tests for things duable without an instance
@unittest.skip("Skipped until the Bug #1080406 is resolved")
@attr(type='smoke')
def test_disassociate_not_associated_floating_ip(self):
- """EC2 disassociate not associated floating ip"""
+ # EC2 disassociate not associated floating ip
ec2_codes = self.ec2_error_code
address = self.client.allocate_address()
public_ip = address.public_ip
diff --git a/tempest/tests/boto/test_ec2_security_groups.py b/tempest/tests/boto/test_ec2_security_groups.py
index 3d50e8b..09da82c 100644
--- a/tempest/tests/boto/test_ec2_security_groups.py
+++ b/tempest/tests/boto/test_ec2_security_groups.py
@@ -17,9 +17,10 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest.testboto import BotoTestCase
+
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest import openstack
+from tempest.testboto import BotoTestCase
@attr("EC2")
@@ -28,19 +29,19 @@
@classmethod
def setUpClass(cls):
super(EC2SecurityGroupTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
@attr(type='smoke')
def test_create_authorize_security_group(self):
- """EC2 Create, authorize/revoke security group"""
+ # EC2 Create, authorize/revoke security group
group_name = rand_name("securty_group-")
group_description = group_name + " security group description "
group = self.client.create_security_group(group_name,
group_description)
self.addResourceCleanUp(self.client.delete_security_group, group_name)
groups_get = self.client.get_all_security_groups(
- groupnames=(group_name,))
+ groupnames=(group_name,))
self.assertEqual(len(groups_get), 1)
group_get = groups_get[0]
self.assertEqual(group.name, group_get.name)
@@ -62,7 +63,7 @@
self.assertTrue(success)
#TODO(afazekas): Duplicate tests
group_get = self.client.get_all_security_groups(
- groupnames=(group_name,))[0]
+ groupnames=(group_name,))[0]
#remove listed rules
for ip_permission in group_get.rules:
for cidr in ip_permission.grants:
@@ -73,6 +74,6 @@
to_port=ip_permission.to_port))
group_get = self.client.get_all_security_groups(
- groupnames=(group_name,))[0]
+ groupnames=(group_name,))[0]
#all rules shuld be removed now
self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index 8b7e6be..7898926 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -15,14 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-from nose.plugins.attrib import attr
-from tempest.testboto import BotoTestCase
-from tempest import openstack
-import unittest2 as unittest
import logging
import time
+from nose.plugins.attrib import attr
+import unittest2 as unittest
+
+from tempest import clients
+from tempest.testboto import BotoTestCase
+
LOG = logging.getLogger(__name__)
@@ -37,14 +38,14 @@
@classmethod
def setUpClass(cls):
super(EC2VolumesTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
cls.zone = cls.client.get_good_zone()
#NOTE(afazekas): as admin it can trigger the Bug #1074901
@attr(type='smoke')
def test_create_get_delete(self):
- """EC2 Create, get, delete Volume"""
+ # EC2 Create, get, delete Volume
volume = self.client.create_volume(1, self.zone)
cuk = self.addResourceCleanUp(self.client.delete_volume, volume.id)
self.assertIn(volume.status, self.valid_volume_status)
@@ -60,9 +61,9 @@
self.client.delete_volume(volume.id)
self.cancelResourceCleanUp(cuk)
- @unittest.skip("Skipped until the Bug #1080284 is resolved")
+ @attr(type='smoke')
def test_create_volme_from_snapshot(self):
- """EC2 Create volume from snapshot"""
+ # EC2 Create volume from snapshot
volume = self.client.create_volume(1, self.zone)
self.addResourceCleanUp(self.client.delete_volume, volume.id)
@@ -78,7 +79,6 @@
snap.update(validate=True)
return snap.status
- #self.assertVolumeStatusWait(_snap_status, "available") # not a volume
self.assertSnapshotStatusWait(_snap_status, "completed")
svol = self.client.create_volume(1, self.zone, snapshot=snap)
diff --git a/tempest/tests/boto/test_s3_buckets.py b/tempest/tests/boto/test_s3_buckets.py
index 56cf52c..beed28b 100644
--- a/tempest/tests/boto/test_s3_buckets.py
+++ b/tempest/tests/boto/test_s3_buckets.py
@@ -17,9 +17,10 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest.testboto import BotoTestCase
+
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest import openstack
+from tempest.testboto import BotoTestCase
@attr("S3")
@@ -28,14 +29,14 @@
@classmethod
def setUpClass(cls):
super(S3BucketsTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.s3_client
cls.config = cls.os.config
@unittest.skip("Skipped until the Bug #1076965 is resolved")
@attr(type='smoke')
def test_create_and_get_delete_bucket(self):
- """S3 Create, get and delete bucket"""
+ # S3 Create, get and delete bucket
bucket_name = rand_name("s3bucket-")
cleanup_key = self.addResourceCleanUp(self.client.delete_bucket,
bucket_name)
diff --git a/tempest/tests/boto/test_s3_ec2_images.py b/tempest/tests/boto/test_s3_ec2_images.py
index eeb7039..f14115a 100644
--- a/tempest/tests/boto/test_s3_ec2_images.py
+++ b/tempest/tests/boto/test_s3_ec2_images.py
@@ -15,19 +15,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+from contextlib import closing
+import os
+
+from boto.s3.key import Key
+import nose
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import openstack
+
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
from tempest.testboto import BotoTestCase
import tempest.tests.boto
-from tempest.tests.boto.utils.wait import state_wait
from tempest.tests.boto.utils.s3 import s3_upload_dir
-from tempest.common.utils.data_utils import rand_name
-from contextlib import closing
-from boto.s3.key import Key
-import logging
-import nose
-import os
+from tempest.tests.boto.utils.wait import state_wait
@attr("S3", "EC2")
@@ -39,7 +40,7 @@
if not tempest.tests.boto.A_I_IMAGES_READY:
raise nose.SkipTest("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.s3_client = cls.os.s3_client
cls.images_client = cls.os.ec2api_client
config = cls.os.config
@@ -61,7 +62,7 @@
# otherwise I would skip it too
@attr(type='smoke')
def test_register_get_deregister_ami_image(self):
- """Register and deregister ami image"""
+ # Register and deregister ami image
image = {"name": rand_name("ami-name-"),
"location": self.bucket_name + "/" + self.ami_manifest,
"type": "ami"}
@@ -89,7 +90,7 @@
@unittest.skip("Skipped until the Bug #1074904 is resolved")
def test_register_get_deregister_aki_image(self):
- """Register and deregister aki image"""
+ # Register and deregister aki image
image = {"name": rand_name("aki-name-"),
"location": self.bucket_name + "/" + self.ari_manifest,
"type": "aki"}
@@ -117,7 +118,7 @@
@unittest.skip("Skipped until the Bug #1074908 and #1074904 is resolved")
def test_register_get_deregister_ari_image(self):
- """Register and deregister ari image"""
+ # Register and deregister ari image
image = {"name": rand_name("ari-name-"),
"location": "/" + self.bucket_name + "/" + self.ari_manifest,
"type": "ari"}
diff --git a/tempest/tests/boto/test_s3_objects.py b/tempest/tests/boto/test_s3_objects.py
index c31ad6e..6e89539 100644
--- a/tempest/tests/boto/test_s3_objects.py
+++ b/tempest/tests/boto/test_s3_objects.py
@@ -15,14 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from contextlib import closing
+
+from boto.s3.key import Key
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest.testboto import BotoTestCase
+
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest import openstack
+from tempest.testboto import BotoTestCase
from tempest.tests import boto
-from boto.s3.key import Key
-from contextlib import closing
@attr("S3")
@@ -31,14 +33,14 @@
@classmethod
def setUpClass(cls):
super(S3BucketsTest, cls).setUpClass()
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.client = cls.os.s3_client
cls.config = cls.os.config
@unittest.skip("Skipped until the Bug #1076534 is resolved")
@attr(type='smoke')
def test_create_get_delete_object(self):
- """S3 Create, get and delete object"""
+ # S3 Create, get and delete object
bucket_name = rand_name("s3bucket-")
object_name = rand_name("s3object-")
content = 'x' * 42
diff --git a/tempest/tests/boto/utils/s3.py b/tempest/tests/boto/utils/s3.py
index 70d9263..4c3229b 100644
--- a/tempest/tests/boto/utils/s3.py
+++ b/tempest/tests/boto/utils/s3.py
@@ -15,13 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import boto
-from boto.s3.key import Key
from contextlib import closing
+import logging
import os
import re
-import logging
+import boto
+from boto.s3.key import Key
LOG = logging.getLogger(__name__)
diff --git a/tempest/tests/boto/utils/wait.py b/tempest/tests/boto/utils/wait.py
index 38b6ba1..77fe037 100644
--- a/tempest/tests/boto/utils/wait.py
+++ b/tempest/tests/boto/utils/wait.py
@@ -15,12 +15,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tempest.config
-import time
-from unittest2 import TestCase
import logging
import re
+import time
+
from boto.exception import BotoServerError
+from unittest2 import TestCase
+
+import tempest.config
LOG = logging.getLogger(__name__)
@@ -60,7 +62,7 @@
def re_search_wait(lfunction, regexp):
- """Stops waiting on success"""
+ """Stops waiting on success."""
start_time = time.time()
while True:
text = lfunction()
@@ -82,7 +84,7 @@
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
- """Stops waiting on success"""
+ """Stops waiting on success."""
start_time = time.time()
if exc_matcher is not None:
exc_class = BotoServerError
@@ -112,7 +114,7 @@
#NOTE(afazekas): EC2/boto normally raise exception instead of empty list
def wait_exception(lfunction):
- """Returns with the exception or raises one"""
+ """Returns with the exception or raises one."""
start_time = time.time()
while True:
try:
diff --git a/tempest/tests/compute/__init__.py b/tempest/tests/compute/__init__.py
index 7396833..0258708 100644
--- a/tempest/tests/compute/__init__.py
+++ b/tempest/tests/compute/__init__.py
@@ -19,8 +19,8 @@
import nose
+from tempest import clients
from tempest import config
-from tempest import openstack
LOG = logging.getLogger(__name__)
@@ -30,6 +30,7 @@
CHANGE_PASSWORD_AVAILABLE = CONFIG.compute.change_password_available
WHITEBOX_ENABLED = CONFIG.compute.whitebox_enabled
DISK_CONFIG_ENABLED = False
+DISK_CONFIG_ENABLED_OVERRIDE = CONFIG.compute.disk_config_enabled_override
FLAVOR_EXTRA_DATA_ENABLED = False
MULTI_USER = False
@@ -39,11 +40,12 @@
LOG.debug("Entering tempest.tests.compute.setup_package")
global MULTI_USER, DISK_CONFIG_ENABLED, FLAVOR_EXTRA_DATA_ENABLED
- os = openstack.Manager()
+ os = clients.Manager()
images_client = os.images_client
flavors_client = os.flavors_client
extensions_client = os.extensions_client
- DISK_CONFIG_ENABLED = extensions_client.is_enabled('DiskConfig')
+ DISK_CONFIG_ENABLED = (DISK_CONFIG_ENABLED_OVERRIDE and
+ extensions_client.is_enabled('DiskConfig'))
FLAVOR_EXTRA_DATA_ENABLED = extensions_client.is_enabled('FlavorExtraData')
# Validate reference data exists
diff --git a/tempest/tests/compute/admin/test_flavors.py b/tempest/tests/compute/admin/test_flavors.py
index dc9248d..b5ee13a 100644
--- a/tempest/tests/compute/admin/test_flavors.py
+++ b/tempest/tests/compute/admin/test_flavors.py
@@ -19,8 +19,8 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest.tests.compute import base
from tempest.tests import compute
+from tempest.tests.compute import base
class FlavorsAdminTestBase(object):
@@ -47,8 +47,8 @@
@attr(type='positive')
def test_create_flavor(self):
- """Create a flavor and ensure it is listed
- This operation requires the user to have 'admin' role"""
+ # Create a flavor and ensure it is listed
+ # This operation requires the user to have 'admin' role
#Create the flavor
resp, flavor = self.client.create_flavor(self.flavor_name,
self.ram, self.vcpus,
@@ -77,8 +77,8 @@
@attr(type='positive')
def test_create_flavor_verify_entry_in_list_details(self):
- """Create a flavor and ensure it's details are listed
- This operation requires the user to have 'admin' role"""
+ # Create a flavor and ensure it's details are listed
+ # This operation requires the user to have 'admin' role
#Create the flavor
resp, flavor = self.client.create_flavor(self.flavor_name,
self.ram, self.vcpus,
@@ -101,7 +101,7 @@
@attr(type='negative')
def test_get_flavor_details_for_deleted_flavor(self):
- """Delete a flavor and ensure it is not listed"""
+ # Delete a flavor and ensure it is not listed
# Create a test flavor
resp, flavor = self.client.create_flavor(self.flavor_name,
self.ram,
diff --git a/tempest/tests/compute/admin/test_quotas.py b/tempest/tests/compute/admin/test_quotas.py
index 98ca169..452de80 100644
--- a/tempest/tests/compute/admin/test_quotas.py
+++ b/tempest/tests/compute/admin/test_quotas.py
@@ -17,9 +17,9 @@
from nose.plugins.attrib import attr
-from tempest.tests.compute.base import BaseComputeTest
-from tempest.services.compute.admin.json import quotas_client as adm_quotas
from tempest import exceptions
+from tempest.services.compute.admin.json import quotas_client as adm_quotas
+from tempest.tests.compute.base import BaseComputeTest
class QuotasTest(BaseComputeTest):
@@ -66,18 +66,18 @@
@attr(type='smoke')
def test_get_default_quotas(self):
- """Admin can get the default resource quota set for a tenant"""
+ # Admin can get the default resource quota set for a tenant
expected_quota_set = self.default_quota_set.copy()
expected_quota_set['id'] = self.demo_tenant_id
try:
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
self.assertEqual(200, resp.status)
self.assertSequenceEqual(expected_quota_set, quota_set)
- except:
+ except Exception:
self.fail("Admin could not get the default quota set for a tenant")
def test_update_all_quota_resources_for_tenant(self):
- """Admin can update all the resource quota limits for a tenant"""
+ # Admin can update all the resource quota limits for a tenant
new_quota_set = {'injected_file_content_bytes': 20480,
'metadata_items': 256, 'injected_files': 10,
'ram': 10240, 'floating_ips': 20, 'key_pairs': 200,
@@ -91,7 +91,7 @@
**new_quota_set)
self.assertEqual(200, resp.status)
self.assertSequenceEqual(new_quota_set, quota_set)
- except:
+ except Exception:
self.fail("Admin could not update quota set for the tenant")
finally:
# Reset quota resource limits to default values
@@ -102,14 +102,14 @@
"defaults")
def test_get_updated_quotas(self):
- """Verify that GET shows the updated quota set"""
+ # Verify that GET shows the updated quota set
self.adm_client.update_quota_set(self.demo_tenant_id,
ram='5120')
try:
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(quota_set['ram'], 5120)
- except:
+ except Exception:
self.fail("Could not get the update quota limit for resource")
finally:
# Reset quota resource limits to default values
@@ -120,7 +120,7 @@
"defaults")
def test_create_server_when_cpu_quota_is_full(self):
- """Disallow server creation when tenant's vcpu quota is full"""
+ # Disallow server creation when tenant's vcpu quota is full
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_vcpu_quota = quota_set['cores']
vcpu_quota = 0 # Set the quota to zero to conserve resources
@@ -138,7 +138,7 @@
cores=default_vcpu_quota)
def test_create_server_when_memory_quota_is_full(self):
- """Disallow server creation when tenant's memory quota is full"""
+ # Disallow server creation when tenant's memory quota is full
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_mem_quota = quota_set['ram']
mem_quota = 0 # Set the quota to zero to conserve resources
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index bb2ff8b..e315d78 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -18,13 +18,13 @@
import logging
import time
-import unittest2 as unittest
import nose
+import unittest2 as unittest
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
from tempest import config
from tempest import exceptions
-from tempest import openstack
-from tempest.common.utils.data_utils import rand_name
__all__ = ['BaseComputeTest', 'BaseComputeTestJSON', 'BaseComputeTestXML',
'BaseComputeAdminTestJSON', 'BaseComputeAdminTestXML']
@@ -34,7 +34,7 @@
class BaseCompTest(unittest.TestCase):
- """Base test case class for all Compute API tests"""
+ """Base test case class for all Compute API tests."""
@classmethod
def setUpClass(cls):
@@ -44,12 +44,12 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- os = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
- interface=cls._interface)
+ os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name,
+ interface=cls._interface)
else:
- os = openstack.Manager(interface=cls._interface)
+ os = clients.Manager(interface=cls._interface)
cls.os = os
cls.servers_client = os.servers_client
@@ -78,7 +78,7 @@
"""
Returns an instance of the Identity Admin API client
"""
- os = openstack.IdentityManager(interface=cls._interface)
+ os = clients.IdentityManager(interface=cls._interface)
admin_client = os.admin_client
return admin_client
@@ -179,7 +179,7 @@
@classmethod
def create_server(cls, image_id=None, flavor=None):
- """Wrapper utility that returns a test server"""
+ """Wrapper utility that returns a test server."""
server_name = rand_name(cls.__name__ + "-instance")
if not flavor:
@@ -193,13 +193,31 @@
cls.servers.append(server)
return server
+ @classmethod
+ def create_server_with_extras(cls, name, image_id=None,
+ flavor=None, **kwargs):
+ # TODO(sdague) transitional function because many
+ # server tests were using extra args and resp so can't
+ # easily be ported to create_server. Will be merged
+ # later
+ if not flavor:
+ flavor = cls.flavor_ref
+ if not image_id:
+ image_id = cls.image_ref
+
+ resp, server = cls.servers_client.create_server(name,
+ image_id, flavor,
+ **kwargs)
+ cls.servers.append(server)
+ return resp, server
+
def wait_for(self, condition):
- """Repeatedly calls condition() until a timeout"""
+ """Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
- except:
+ except Exception:
pass
else:
return
@@ -228,7 +246,7 @@
class BaseComputeAdminTest(unittest.TestCase):
- """Base test case class for all Compute Admin API tests"""
+ """Base test case class for all Compute Admin API tests."""
@classmethod
def setUpClass(cls):
@@ -242,7 +260,7 @@
"in configuration.")
raise nose.SkipTest(msg)
- cls.os = openstack.AdminManager(interface=cls._interface)
+ cls.os = clients.AdminManager(interface=cls._interface)
class BaseComputeAdminTestJSON(BaseComputeAdminTest):
diff --git a/tempest/tests/compute/flavors/test_flavors.py b/tempest/tests/compute/flavors/test_flavors.py
index 31cf66d..53cad65 100644
--- a/tempest/tests/compute/flavors/test_flavors.py
+++ b/tempest/tests/compute/flavors/test_flavors.py
@@ -24,7 +24,7 @@
@attr(type='smoke')
def test_list_flavors(self):
- """List of all flavors should contain the expected flavor"""
+ # List of all flavors should contain the expected flavor
resp, flavors = self.client.list_flavors()
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
@@ -33,40 +33,40 @@
@attr(type='smoke')
def test_list_flavors_with_detail(self):
- """Detailed list of all flavors should contain the expected flavor"""
+ # Detailed list of all flavors should contain the expected flavor
resp, flavors = self.client.list_flavors_with_detail()
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
self.assertTrue(flavor in flavors)
@attr(type='smoke')
def test_get_flavor(self):
- """The expected flavor details should be returned"""
+ # The expected flavor details should be returned
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertEqual(self.flavor_ref, str(flavor['id']))
+ self.assertEqual(self.flavor_ref, int(flavor['id']))
@attr(type='negative')
def test_get_non_existant_flavor(self):
- """flavor details are not returned for non existant flavors"""
+ # flavor details are not returned for non existant flavors
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
999)
@attr(type='positive', bug='lp912922')
def test_list_flavors_limit_results(self):
- """Only the expected number of flavors should be returned"""
+ # Only the expected number of flavors should be returned
params = {'limit': 1}
resp, flavors = self.client.list_flavors(params)
self.assertEqual(1, len(flavors))
@attr(type='positive', bug='lp912922')
def test_list_flavors_detailed_limit_results(self):
- """Only the expected number of flavors (detailed) should be returned"""
+ # Only the expected number of flavors (detailed) should be returned
params = {'limit': 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertEqual(1, len(flavors))
@attr(type='positive')
def test_list_flavors_using_marker(self):
- """The list of flavors should start from the provided marker"""
+ # The list of flavors should start from the provided marker
resp, flavors = self.client.list_flavors()
flavor_id = flavors[0]['id']
@@ -77,7 +77,7 @@
@attr(type='positive')
def test_list_flavors_detailed_using_marker(self):
- """The list of flavors should start from the provided marker"""
+ # The list of flavors should start from the provided marker
resp, flavors = self.client.list_flavors_with_detail()
flavor_id = flavors[0]['id']
@@ -88,7 +88,7 @@
@attr(type='positive')
def test_list_flavors_detailed_filter_by_min_disk(self):
- """The detailed list of flavors should be filtered by disk space"""
+ # The detailed list of flavors should be filtered by disk space
resp, flavors = self.client.list_flavors_with_detail()
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
@@ -99,7 +99,7 @@
@attr(type='positive')
def test_list_flavors_detailed_filter_by_min_ram(self):
- """The detailed list of flavors should be filtered by RAM"""
+ # The detailed list of flavors should be filtered by RAM
resp, flavors = self.client.list_flavors_with_detail()
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
@@ -110,7 +110,7 @@
@attr(type='positive')
def test_list_flavors_filter_by_min_disk(self):
- """The list of flavors should be filtered by disk space"""
+ # The list of flavors should be filtered by disk space
resp, flavors = self.client.list_flavors_with_detail()
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
@@ -121,7 +121,7 @@
@attr(type='positive')
def test_list_flavors_filter_by_min_ram(self):
- """The list of flavors should be filtered by RAM"""
+ # The list of flavors should be filtered by RAM
resp, flavors = self.client.list_flavors_with_detail()
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
@@ -132,7 +132,7 @@
@attr(type='negative')
def test_get_flavor_details_for_invalid_flavor_id(self):
- """Ensure 404 returned for non-existant flavor ID"""
+ # Ensure 404 returned for non-existant flavor ID
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
9999)
diff --git a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
index 7d30eeb..9a9914a 100644
--- a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
@@ -18,9 +18,9 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import openstack
-from tempest import exceptions
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
@@ -63,10 +63,8 @@
@attr(type='positive')
def test_allocate_floating_ip(self):
- """
- Positive test:Allocation of a new floating IP to a project
- should be successful
- """
+ # Positive test:Allocation of a new floating IP to a project
+ # should be successful
try:
resp, body = self.client.create_floating_ip()
self.assertEqual(200, resp.status)
@@ -82,10 +80,8 @@
@attr(type='positive')
def test_delete_floating_ip(self):
- """
- Positive test:Deletion of valid floating IP from project
- should be successful
- """
+ # Positive test:Deletion of valid floating IP from project
+ # should be successful
#Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
#Storing the details of floating IP before deleting it
@@ -99,10 +95,9 @@
@attr(type='positive')
def test_associate_disassociate_floating_ip(self):
- """
- Positive test:Associate and disassociate the provided floating IP to a
- specific server should be successful
- """
+ # Positive test:Associate and disassociate the provided floating IP
+ # to a specific server should be successful
+
#Association of floating IP to fixed IP address
resp, body =\
self.client.associate_floating_ip_to_server(self.floating_ip,
@@ -116,26 +111,21 @@
@attr(type='negative')
def test_delete_nonexistant_floating_ip(self):
- """
+ # Negative test:Deletion of a nonexistent floating IP
+ # from project should fail
- Negative test:Deletion of a nonexistent floating IP
- from project should fail
- """
#Deleting the non existent floating IP
try:
resp, body = self.client.delete_floating_ip(self.non_exist_id)
- except:
+ except Exception:
pass
else:
self.fail('Should not be able to delete a nonexistent floating IP')
- @unittest.skip("Skipped until the Bug #957706 is resolved")
@attr(type='negative')
def test_associate_nonexistant_floating_ip(self):
- """
- Negative test:Association of a non existent floating IP
- to specific server should fail
- """
+ # Negative test:Association of a non existent floating IP
+ # to specific server should fail
#Associating non existent floating IP
try:
resp, body = \
@@ -149,9 +139,7 @@
@attr(type='negative')
def test_dissociate_nonexistant_floating_ip(self):
- """
- Negative test:Dissociation of a non existent floating IP should fail
- """
+ # Negative test:Dissociation of a non existent floating IP should fail
#Dissociating non existent floating IP
try:
resp, body = \
@@ -165,10 +153,8 @@
@attr(type='positive')
def test_associate_already_associated_floating_ip(self):
- """
- positive test:Association of an already associated floating IP
- to specific server should change the association of the Floating IP
- """
+ # positive test:Association of an already associated floating IP
+ # to specific server should change the association of the Floating IP
#Create server so as to use for Multiple association
resp, body = self.servers_client.create_server('floating-server2',
self.image_ref,
@@ -204,13 +190,10 @@
#Deletion of server created in this method
resp, body = self.servers_client.delete_server(self.new_server_id)
- @unittest.skip("Skipped until the Bug #957706 is resolved")
@attr(type='negative')
def test_associate_ip_to_server_without_passing_floating_ip(self):
- """
- Negative test:Association of empty floating IP to specific server
- should raise NotFound exception
- """
+ # Negative test:Association of empty floating IP to specific server
+ # should raise NotFound exception
try:
resp, body =\
self.client.associate_floating_ip_to_server('',
diff --git a/tempest/tests/compute/floating_ips/test_list_floating_ips.py b/tempest/tests/compute/floating_ips/test_list_floating_ips.py
index 34d7369..9eec27c 100644
--- a/tempest/tests/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/tests/compute/floating_ips/test_list_floating_ips.py
@@ -18,8 +18,8 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
@@ -43,7 +43,7 @@
@attr(type='positive')
def test_list_floating_ips(self):
- """Positive test:Should return the list of floating IPs"""
+ # Positive test:Should return the list of floating IPs
resp, body = self.client.list_floating_ips()
self.assertEqual(200, resp.status)
floating_ips = body
@@ -54,7 +54,7 @@
@attr(type='positive')
def test_get_floating_ip_details(self):
- """Positive test:Should be able to GET the details of floatingIP"""
+ # Positive test:Should be able to GET the details of floatingIP
#Creating a floating IP for which details are to be checked
try:
resp, body = self.client.create_floating_ip()
@@ -78,10 +78,8 @@
@attr(type='negative')
def test_get_nonexistant_floating_ip_details(self):
- """
- Negative test:Should not be able to GET the details
- of nonexistant floating IP
- """
+ # Negative test:Should not be able to GET the details
+ # of nonexistant floating IP
floating_ip_id = []
resp, body = self.client.list_floating_ips()
for i in range(len(body)):
diff --git a/tempest/tests/compute/images/test_image_metadata.py b/tempest/tests/compute/images/test_image_metadata.py
index 1ac7de7..cdf4249 100644
--- a/tempest/tests/compute/images/test_image_metadata.py
+++ b/tempest/tests/compute/images/test_image_metadata.py
@@ -58,13 +58,13 @@
self.assertEqual(resp.status, 200)
def test_list_image_metadata(self):
- """All metadata key/value pairs for an image should be returned"""
+ # All metadata key/value pairs for an image should be returned
resp, resp_metadata = self.client.list_image_metadata(self.image_id)
expected = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
def test_set_image_metadata(self):
- """The metadata for the image should match the new values"""
+ # The metadata for the image should match the new values
req_metadata = {'meta2': 'value2', 'meta3': 'value3'}
resp, body = self.client.set_image_metadata(self.image_id,
req_metadata)
@@ -73,7 +73,7 @@
self.assertEqual(req_metadata, resp_metadata)
def test_update_image_metadata(self):
- """The metadata for the image should match the updated values"""
+ # The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key3': 'value3'}
resp, metadata = self.client.update_image_metadata(self.image_id,
req_metadata)
@@ -83,15 +83,14 @@
self.assertEqual(expected, resp_metadata)
def test_get_image_metadata_item(self):
- """The value for a specific metadata key should be returned"""
+ # The value for a specific metadata key should be returned
resp, meta = self.client.get_image_metadata_item(self.image_id,
'key2')
self.assertTrue('value2', meta['key2'])
def test_set_image_metadata_item(self):
- """
- The value provided for the given meta item should be set for the image
- """
+ # The value provided for the given meta item should be set for
+ # the image
meta = {'key1': 'alt'}
resp, body = self.client.set_image_metadata_item(self.image_id,
'key1', meta)
@@ -100,7 +99,7 @@
self.assertEqual(expected, resp_metadata)
def test_delete_image_metadata_item(self):
- """The metadata value/key pair should be deleted from the image"""
+ # The metadata value/key pair should be deleted from the image
resp, body = self.client.delete_image_metadata_item(self.image_id,
'key1')
resp, resp_metadata = self.client.list_image_metadata(self.image_id)
@@ -109,8 +108,8 @@
@attr(type='negative')
def test_list_nonexistant_image_metadata(self):
- """Negative test: List on nonexistant image
- metadata should not happen"""
+ # Negative test: List on nonexistant image
+ # metadata should not happen
try:
resp, resp_metadata = self.client.list_image_metadata(999)
except exceptions.NotFound:
@@ -121,7 +120,7 @@
@attr(type='negative')
def test_update_nonexistant_image_metadata(self):
- """Negative test:An update should not happen for a nonexistant image"""
+ # Negative test:An update should not happen for a nonexistant image
meta = {'key1': 'alt1', 'key2': 'alt2'}
try:
resp, metadata = self.client.update_image_metadata(999, meta)
@@ -132,7 +131,7 @@
@attr(type='negative')
def test_get_nonexistant_image_metadata_item(self):
- """Negative test: Get on nonexistant image should not happen"""
+ # Negative test: Get on nonexistant image should not happen
try:
resp, metadata = self.client.get_image_metadata_item(999, 'key2')
except exceptions.NotFound:
@@ -142,7 +141,7 @@
@attr(type='negative')
def test_set_nonexistant_image_metadata(self):
- """Negative test: Metadata should not be set to a nonexistant image"""
+ # Negative test: Metadata should not be set to a nonexistant image
meta = {'key1': 'alt1', 'key2': 'alt2'}
try:
resp, meta = self.client.set_image_metadata(999, meta)
@@ -153,8 +152,8 @@
@attr(type='negative')
def test_set_nonexistant_image_metadata_item(self):
- """Negative test: Metadata item should not be set to a
- nonexistant image"""
+ # Negative test: Metadata item should not be set to a
+ # nonexistant image
meta = {'key1': 'alt'}
try:
resp, body = self.client.set_image_metadata_item(999, 'key1', meta)
@@ -166,8 +165,8 @@
@attr(type='negative')
def test_delete_nonexistant_image_metadata_item(self):
- """Negative test: Shouldnt be able to delete metadata
- item from nonexistant image"""
+ # Negative test: Shouldnt be able to delete metadata
+ # item from nonexistant image
try:
resp, body = self.client.delete_image_metadata_item(999, 'key1')
resp, metadata = self.client.list_image_metadata(999)
diff --git a/tempest/tests/compute/images/test_images.py b/tempest/tests/compute/images/test_images.py
index 5937811..6ebcbbc 100644
--- a/tempest/tests/compute/images/test_images.py
+++ b/tempest/tests/compute/images/test_images.py
@@ -15,22 +15,23 @@
# License for the specific language governing permissions and limitations
# under the License.
+import nose
from nose.plugins.attrib import attr
import unittest2 as unittest
-import nose
-from tempest.common.utils.data_utils import rand_name, parse_image_id
+from tempest import clients
+from tempest.common.utils.data_utils import parse_image_id
+from tempest.common.utils.data_utils import rand_name
import tempest.config
from tempest import exceptions
-from tempest import openstack
-from tempest.tests.compute import base
from tempest.tests import compute
+from tempest.tests.compute import base
class ImagesTestBase(object):
def tearDown(self):
- """Terminate test instances created after a test is executed"""
+ """Terminate test instances created after a test is executed."""
for server in self.servers:
resp, body = self.servers_client.delete_server(server['id'])
if resp['status'] == '204':
@@ -45,7 +46,7 @@
@unittest.skipUnless(compute.CREATE_IMAGE_ENABLED,
'Environment unable to create images.')
def test_create_delete_image(self):
- """An image for the provided server should be created"""
+ # An image for the provided server should be created
server_name = rand_name('server')
resp, server = self.servers_client.create_server(server_name,
self.image_ref,
@@ -76,7 +77,7 @@
@attr(type='negative')
def test_create_image_from_deleted_server(self):
- """An image should not be created if the server instance is removed """
+ # An image should not be created if the server instance is removed
server_name = rand_name('server')
resp, server = self.servers_client.create_server(server_name,
self.image_ref,
@@ -92,7 +93,7 @@
meta = {'image_type': 'test'}
resp, body = self.client.create_image(server['id'], name, meta)
- except:
+ except Exception:
pass
else:
@@ -104,7 +105,7 @@
@attr(type='negative')
def test_create_image_from_invalid_server(self):
- """An image should not be created with invalid server id"""
+ # An image should not be created with invalid server id
try:
# Create a new image with invalid server id
name = rand_name('image')
@@ -126,7 +127,7 @@
@attr(type='negative')
@unittest.skipUnless(compute.MULTI_USER, 'Second user not configured')
def test_create_image_for_server_in_another_tenant(self):
- """Creating image of another tenant's server should be return error"""
+ # Creating image of another tenant's server should be return error
server = self.create_server()
snapshot_name = rand_name('test-snap-')
@@ -135,7 +136,7 @@
@attr(type='negative')
def test_create_image_when_server_is_building(self):
- """Return error when creating an image of a server that is building"""
+ # Return error when creating an image of a server that is building
server_name = rand_name('test-vm-')
resp, server = self.servers_client.create_server(server_name,
self.image_ref,
@@ -148,7 +149,7 @@
@unittest.skip("Until Bug 1039739 is fixed")
@attr(type='negative')
def test_create_image_when_server_is_rebooting(self):
- """Return error when creating an image of server that is rebooting"""
+ # Return error when creating an image of server that is rebooting
server = self.create_server()
self.servers_client.reboot(server['id'], 'HARD')
@@ -158,7 +159,7 @@
@attr(type='negative')
def test_create_image_when_server_is_terminating(self):
- """Return an error when creating image of server that is terminating"""
+ # Return an error when creating image of server that is terminating
server = self.create_server()
self.servers_client.delete_server(server['id'])
@@ -168,7 +169,7 @@
@attr(type='negative')
def test_create_second_image_when_first_image_is_being_saved(self):
- """Disallow creating another image when first image is being saved"""
+ # Disallow creating another image when first image is being saved
server = self.create_server()
try:
@@ -191,45 +192,45 @@
@attr(type='negative')
@unittest.skip("Until Bug 1004564 is fixed")
def test_create_image_specify_name_over_256_chars(self):
- """Return an error if snapshot name over 256 characters is passed"""
+ # Return an error if snapshot name over 256 characters is passed
server = self.create_server()
try:
snapshot_name = rand_name('a' * 260)
self.assertRaises(exceptions.BadRequest, self.client.create_image,
server['id'], snapshot_name)
- except:
+ except Exception:
self.fail("Should return 400 Bad Request if image name is over 256"
" characters")
@attr(type='negative')
def test_create_image_specify_uuid_35_characters_or_less(self):
- """Return an error if Image ID passed is 35 characters or less"""
+ # Return an error if Image ID passed is 35 characters or less
try:
snapshot_name = rand_name('test-snap-')
test_uuid = ('a' * 35)
self.assertRaises(exceptions.NotFound, self.client.create_image,
test_uuid, snapshot_name)
- except:
+ except Exception:
self.fail("Should return 404 Not Found if server uuid is 35"
" characters or less")
@attr(type='negative')
def test_create_image_specify_uuid_37_characters_or_more(self):
- """Return an error if Image ID passed is 37 characters or more"""
+ # Return an error if Image ID passed is 37 characters or more
try:
snapshot_name = rand_name('test-snap-')
test_uuid = ('a' * 37)
self.assertRaises(exceptions.NotFound, self.client.create_image,
test_uuid, snapshot_name)
- except:
+ except Exception:
self.fail("Should return 404 Not Found if server uuid is 37"
" characters or more")
@attr(type='negative')
@unittest.skip("Until Bug 1006725 is fixed")
def test_create_image_specify_multibyte_character_image_name(self):
- """Return an error if the image name has multi-byte characters"""
+ # Return an error if the image name has multi-byte characters
server = self.create_server()
try:
@@ -237,14 +238,14 @@
self.assertRaises(exceptions.BadRequest,
self.client.create_image, server['id'],
snapshot_name)
- except:
+ except Exception:
self.fail("Should return 400 Bad Request if multi byte characters"
" are used for image name")
@attr(type='negative')
@unittest.skip("Until Bug 1005423 is fixed")
def test_create_image_specify_invalid_metadata(self):
- """Return an error when creating image with invalid metadata"""
+ # Return an error when creating image with invalid metadata
server = self.create_server()
try:
@@ -253,13 +254,13 @@
self.assertRaises(exceptions.BadRequest, self.client.create_image,
server['id'], snapshot_name, meta)
- except:
+ except Exception:
self.fail("Should raise 400 Bad Request if meta data is invalid")
@attr(type='negative')
@unittest.skip("Until Bug 1005423 is fixed")
def test_create_image_specify_metadata_over_limits(self):
- """Return an error when creating image with meta data over 256 chars"""
+ # Return an error when creating image with meta data over 256 chars
server = self.create_server()
try:
@@ -268,12 +269,12 @@
self.assertRaises(exceptions.OverLimit, self.client.create_image,
server['id'], snapshot_name, meta)
- except:
+ except Exception:
self.fail("Should raise 413 Over Limit if meta data was too long")
@attr(type='negative')
def test_delete_image_with_invalid_image_id(self):
- """An image should not be deleted with invalid image id"""
+ # An image should not be deleted with invalid image id
try:
# Delete an image with invalid image id
resp, _ = self.client.delete_image('!@$%^&*()')
@@ -287,7 +288,7 @@
@attr(type='negative')
def test_delete_non_existent_image(self):
- """Return an error while trying to delete a non-existent image"""
+ # Return an error while trying to delete a non-existent image
non_existent_image_id = '11a22b9-12a9-5555-cc11-00ab112223fa'
self.assertRaises(exceptions.NotFound, self.client.delete_image,
@@ -295,50 +296,50 @@
@attr(type='negative')
def test_delete_image_blank_id(self):
- """Return an error while trying to delete an image with blank Id"""
+ # Return an error while trying to delete an image with blank Id
try:
self.assertRaises(exceptions.NotFound, self.client.delete_image,
'')
- except:
+ except Exception:
self.fail("Did not return HTTP 404 NotFound for blank image id")
@attr(type='negative')
def test_delete_image_non_hex_string_id(self):
- """Return an error while trying to delete an image with non hex id"""
+ # Return an error while trying to delete an image with non hex id
image_id = '11a22b9-120q-5555-cc11-00ab112223gj'
try:
self.assertRaises(exceptions.NotFound, self.client.delete_image,
image_id)
- except:
+ except Exception:
self.fail("Did not return HTTP 404 NotFound for non hex image")
@attr(type='negative')
def test_delete_image_negative_image_id(self):
- """Return an error while trying to delete an image with negative id"""
+ # Return an error while trying to delete an image with negative id
try:
self.assertRaises(exceptions.NotFound, self.client.delete_image,
-1)
- except:
+ except Exception:
self.fail("Did not return HTTP 404 NotFound for negative image id")
@attr(type='negative')
def test_delete_image_id_is_over_35_character_limit(self):
- """Return an error while trying to delete image with id over limit"""
+ # Return an error while trying to delete image with id over limit
try:
self.assertRaises(exceptions.NotFound, self.client.delete_image,
'11a22b9-120q-5555-cc11-00ab112223gj-3fac')
- except:
+ except Exception:
self.fail("Did not return HTTP 404 NotFound for image id that "
"exceeds 35 character ID length limit")
@attr(type='negative')
@unittest.skipUnless(compute.MULTI_USER, 'Second user not configured')
def test_delete_image_of_another_tenant(self):
- """Return an error while trying to delete another tenant's image"""
+ # Return an error while trying to delete another tenant's image
server = self.create_server()
@@ -355,7 +356,7 @@
@attr(type='negative')
def test_delete_image_that_is_not_yet_active(self):
- """Return an error while trying to delete an active that is creating"""
+ # Return an error while trying to delete an active that is creating
server = self.create_server()
@@ -390,12 +391,12 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- cls.alt_manager = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
- cls.alt_manager = openstack.AltManager()
+ cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
@@ -417,10 +418,10 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- cls.alt_manager = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
- cls.alt_manager = openstack.AltManager()
+ cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/tests/compute/images/test_images_whitebox.py
index 40433a7..2987534 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/tests/compute/images/test_images_whitebox.py
@@ -36,7 +36,7 @@
@classmethod
def tearDownClass(cls):
- """Terminate test instances created after a test is executed"""
+ """Terminate test instances created after a test is executed."""
for server in cls.servers:
cls.update_state(server['id'], "active", None)
@@ -51,20 +51,20 @@
@classmethod
def update_state(self, server_id, vm_state, task_state, deleted=False):
- """Update states of an instance in database for validation"""
+ """Update states of an instance in database for validation."""
if not task_state:
task_state = "NULL"
instances = self.meta.tables['instances']
stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
+ deleted=deleted,
+ vm_state=vm_state,
+ task_state=task_state)
self.connection.execute(stmt, autocommit=True)
def _test_create_image_409_base(self, vm_state, task_state, deleted=False):
- """Base method for create image tests based on vm and task states"""
+ """Base method for create image tests based on vm and task states."""
try:
self.update_state(self.shared_server['id'], vm_state,
task_state, deleted)
@@ -73,96 +73,96 @@
self.assertRaises(exceptions.Duplicate,
self.client.create_image,
self.shared_server['id'], image_name)
- except:
+ except Exception:
self.fail("Should not allow create image when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
finally:
self.update_state(self.shared_server['id'], 'active', None)
def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
- """409 error when instance states are building,scheduling"""
+ # 409 error when instance states are building,scheduling
self._test_create_image_409_base("building", "scheduling")
def test_create_image_when_vm_eq_building_task_eq_networking(self):
- """409 error when instance states are building,networking"""
+ # 409 error when instance states are building,networking
self._test_create_image_409_base("building", "networking")
def test_create_image_when_vm_eq_building_task_eq_bdm(self):
- """409 error when instance states are building,block_device_mapping"""
+ # 409 error when instance states are building,block_device_mapping
self._test_create_image_409_base("building", "block_device_mapping")
def test_create_image_when_vm_eq_building_task_eq_spawning(self):
- """409 error when instance states are building,spawning"""
+ # 409 error when instance states are building,spawning
self._test_create_image_409_base("building", "spawning")
def test_create_image_when_vm_eq_active_task_eq_image_backup(self):
- """409 error when instance states are active,image_backup"""
+ # 409 error when instance states are active,image_backup
self._test_create_image_409_base("active", "image_backup")
def test_create_image_when_vm_eq_resized_task_eq_resize_prep(self):
- """409 error when instance states are resized,resize_prep"""
+ # 409 error when instance states are resized,resize_prep
self._test_create_image_409_base("resized", "resize_prep")
def test_create_image_when_vm_eq_resized_task_eq_resize_migrating(self):
- """409 error when instance states are resized,resize_migrating"""
+ # 409 error when instance states are resized,resize_migrating
self._test_create_image_409_base("resized", "resize_migrating")
def test_create_image_when_vm_eq_resized_task_eq_resize_migrated(self):
- """409 error when instance states are resized,resize_migrated"""
+ # 409 error when instance states are resized,resize_migrated
self._test_create_image_409_base("resized", "resize_migrated")
def test_create_image_when_vm_eq_resized_task_eq_resize_finish(self):
- """409 error when instance states are resized,resize_finish"""
+ # 409 error when instance states are resized,resize_finish
self._test_create_image_409_base("resized", "resize_finish")
def test_create_image_when_vm_eq_resized_task_eq_resize_reverting(self):
- """409 error when instance states are resized,resize_reverting"""
+ # 409 error when instance states are resized,resize_reverting
self._test_create_image_409_base("resized", "resize_reverting")
def test_create_image_when_vm_eq_resized_task_eq_resize_confirming(self):
- """409 error when instance states are resized,resize_confirming"""
+ # 409 error when instance states are resized,resize_confirming
self._test_create_image_409_base("resized", "resize_confirming")
def test_create_image_when_vm_eq_active_task_eq_resize_verify(self):
- """409 error when instance states are active,resize_verify"""
+ # 409 error when instance states are active,resize_verify
self._test_create_image_409_base("active", "resize_verify")
def test_create_image_when_vm_eq_active_task_eq_updating_password(self):
- """409 error when instance states are active,updating_password"""
+ # 409 error when instance states are active,updating_password
self._test_create_image_409_base("active", "updating_password")
def test_create_image_when_vm_eq_active_task_eq_rebuilding(self):
- """409 error when instance states are active,rebuilding"""
+ # 409 error when instance states are active,rebuilding
self._test_create_image_409_base("active", "rebuilding")
def test_create_image_when_vm_eq_active_task_eq_rebooting(self):
- """409 error when instance states are active,rebooting"""
+ # 409 error when instance states are active,rebooting
self._test_create_image_409_base("active", "rebooting")
def test_create_image_when_vm_eq_building_task_eq_deleting(self):
- """409 error when instance states are building,deleting"""
+ # 409 error when instance states are building,deleting
self._test_create_image_409_base("building", "deleting")
def test_create_image_when_vm_eq_active_task_eq_deleting(self):
- """409 error when instance states are active,deleting"""
+ # 409 error when instance states are active,deleting
self._test_create_image_409_base("active", "deleting")
def test_create_image_when_vm_eq_error_task_eq_building(self):
- """409 error when instance states are error,building"""
+ # 409 error when instance states are error,building
self._test_create_image_409_base("error", "building")
def test_create_image_when_vm_eq_error_task_eq_none(self):
- """409 error when instance states are error,None"""
+ # 409 error when instance states are error,None
self._test_create_image_409_base("error", None)
def test_create_image_when_vm_eq_deleted_task_eq_none(self):
- """409 error when instance states are deleted,None"""
+ # 409 error when instance states are deleted,None
self._test_create_image_409_base("deleted", None)
def test_create_image_when_vm_eq_resized_task_eq_none(self):
- """409 error when instance states are resized,None"""
+ # 409 error when instance states are resized,None
self._test_create_image_409_base("resized", None)
def test_create_image_when_vm_eq_error_task_eq_resize_prep(self):
- """409 error when instance states are error,resize_prep"""
+ # 409 error when instance states are error,resize_prep
self._test_create_image_409_base("error", "resize_prep")
diff --git a/tempest/tests/compute/images/test_list_image_filters.py b/tempest/tests/compute/images/test_list_image_filters.py
index b6be358..26119e3 100644
--- a/tempest/tests/compute/images/test_list_image_filters.py
+++ b/tempest/tests/compute/images/test_list_image_filters.py
@@ -17,8 +17,9 @@
from nose.plugins.attrib import attr
+from tempest.common.utils.data_utils import parse_image_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
-from tempest.common.utils.data_utils import rand_name, parse_image_id
from tempest.tests.compute.base import BaseComputeTest
@@ -76,15 +77,14 @@
@attr(type='negative')
def test_get_image_not_existing(self):
- """Check raises a NotFound"""
+ # Check raises a NotFound
self.assertRaises(exceptions.NotFound, self.client.get_image,
"nonexistingimageid")
@attr(type='positive')
def test_list_images_filter_by_status(self):
- """
- The list of images should contain only images with the provided status
- """
+ # The list of images should contain only images with the
+ # provided status
params = {'status': 'ACTIVE'}
resp, images = self.client.list_images(params)
@@ -94,9 +94,8 @@
@attr(type='positive')
def test_list_images_filter_by_name(self):
- """
- List of all images should contain the expected images filtered by name
- """
+ # List of all images should contain the expected images filtered
+ # by name
params = {'name': self.image1['name']}
resp, images = self.client.list_images(params)
@@ -106,7 +105,7 @@
@attr(type='positive')
def test_list_images_filter_by_server_id(self):
- """The images should contain images filtered by server id"""
+ # The images should contain images filtered by server id
params = {'server': self.server1['id']}
resp, images = self.client.list_images(params)
@@ -118,7 +117,7 @@
@attr(type='positive')
def test_list_images_filter_by_server_ref(self):
- """The list of servers should be filtered by server ref"""
+ # The list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
@@ -135,7 +134,7 @@
@attr(type='positive')
def test_list_images_filter_by_type(self):
- """The list of servers should be filtered by image type"""
+ # The list of servers should be filtered by image type
params = {'type': 'snapshot'}
resp, images = self.client.list_images(params)
@@ -146,14 +145,14 @@
@attr(type='positive')
def test_list_images_limit_results(self):
- """Verify only the expected number of results are returned"""
+ # Verify only the expected number of results are returned
params = {'limit': '1'}
resp, images = self.client.list_images(params)
self.assertEqual(1, len(images))
@attr(type='positive')
def test_list_images_filter_by_changes_since(self):
- """Verify only updated images are returned in the detailed list"""
+ # Verify only updated images are returned in the detailed list
#Becoming ACTIVE will modify the updated time
#Filter by the image's created time
@@ -164,10 +163,8 @@
@attr(type='positive')
def test_list_images_with_detail_filter_by_status(self):
- """
- Detailed list of all images should only contain images
- with the provided status
- """
+ # Detailed list of all images should only contain images
+ # with the provided status
params = {'status': 'ACTIVE'}
resp, images = self.client.list_images_with_detail(params)
@@ -177,10 +174,8 @@
@attr(type='positive')
def test_list_images_with_detail_filter_by_name(self):
- """
- Detailed list of all images should contain the expected
- images filtered by name
- """
+ # Detailed list of all images should contain the expected
+ # images filtered by name
params = {'name': self.image1['name']}
resp, images = self.client.list_images_with_detail(params)
@@ -190,17 +185,15 @@
@attr(type='positive')
def test_list_images_with_detail_limit_results(self):
- """
- Verify only the expected number of results (with full details)
- are returned
- """
+ # Verify only the expected number of results (with full details)
+ # are returned
params = {'limit': '1'}
resp, images = self.client.list_images_with_detail(params)
self.assertEqual(1, len(images))
@attr(type='positive')
def test_list_images_with_detail_filter_by_server_ref(self):
- """Detailed list of servers should be filtered by server ref"""
+ # Detailed list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
@@ -217,7 +210,7 @@
@attr(type='positive')
def test_list_images_with_detail_filter_by_type(self):
- """The detailed list of servers should be filtered by image type"""
+ # The detailed list of servers should be filtered by image type
params = {'type': 'snapshot'}
resp, images = self.client.list_images_with_detail(params)
resp, image4 = self.client.get_image(self.image_ref)
@@ -229,7 +222,7 @@
@attr(type='positive')
def test_list_images_with_detail_filter_by_changes_since(self):
- """Verify an update image is returned"""
+ # Verify an update image is returned
#Becoming ACTIVE will modify the updated time
#Filter by the image's created time
@@ -239,10 +232,10 @@
@attr(type='negative')
def test_get_nonexistant_image(self):
- """Negative test: GET on non existant image should fail"""
+ # Negative test: GET on non existant image should fail
try:
resp, image = self.client.get_image(999)
- except:
+ except Exception:
pass
else:
self.fail('GET on non existant image should fail')
diff --git a/tempest/tests/compute/images/test_list_images.py b/tempest/tests/compute/images/test_list_images.py
index ca8ec18..da92ca8 100644
--- a/tempest/tests/compute/images/test_list_images.py
+++ b/tempest/tests/compute/images/test_list_images.py
@@ -17,8 +17,9 @@
from nose.plugins.attrib import attr
+from tempest.common.utils.data_utils import parse_image_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
-from tempest.common.utils.data_utils import rand_name, parse_image_id
from tempest.tests.compute.base import BaseComputeTest
@@ -35,20 +36,20 @@
@attr(type='smoke')
def test_get_image(self):
- """Returns the correct details for a single image"""
+ # Returns the correct details for a single image
resp, image = self.client.get_image(self.image_ref)
self.assertEqual(self.image_ref, image['id'])
@attr(type='smoke')
def test_list_images(self):
- """The list of all images should contain the image"""
+ # The list of all images should contain the image
resp, images = self.client.list_images()
found = any([i for i in images if i['id'] == self.image_ref])
self.assertTrue(found)
@attr(type='smoke')
def test_list_images_with_detail(self):
- """Detailed list of all images should contain the expected images"""
+ # Detailed list of all images should contain the expected images
resp, images = self.client.list_images_with_detail()
found = any([i for i in images if i['id'] == self.image_ref])
self.assertTrue(found)
diff --git a/tempest/tests/compute/keypairs/test_keypairs.py b/tempest/tests/compute/keypairs/test_keypairs.py
index 95520b5..7d95a9b 100644
--- a/tempest/tests/compute/keypairs/test_keypairs.py
+++ b/tempest/tests/compute/keypairs/test_keypairs.py
@@ -18,8 +18,8 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute.base import BaseComputeTestJSON
from tempest.tests.compute.base import BaseComputeTestXML
@@ -28,7 +28,7 @@
@attr(type='positive')
def test_keypairs_create_list_delete(self):
- """Keypairs created should be available in the response list"""
+ # Keypairs created should be available in the response list
#Create 3 keypairs
key_list = list()
for i in range(3):
@@ -62,7 +62,7 @@
@attr(type='positive')
def test_keypair_create_delete(self):
- """Keypair should be created, verified and deleted"""
+ # Keypair should be created, verified and deleted
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
@@ -79,7 +79,7 @@
@attr(type='positive')
@unittest.skip("Skipped until the Bug #980688 is resolved")
def test_get_keypair_detail(self):
- """Keypair should be created, Got details by name and deleted"""
+ # Keypair should be created, Got details by name and deleted
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
try:
@@ -93,7 +93,7 @@
public_key = keypair_detail['public_key']
self.assertTrue(public_key is not None,
"Field public_key is empty or not found.")
- except:
+ except Exception:
self.fail("GET keypair details requested by keypair name "
"has failed")
finally:
@@ -102,7 +102,7 @@
@attr(type='positive')
def test_keypair_create_with_pub_key(self):
- """Keypair should be created with a given public key"""
+ # Keypair should be created with a given public key
k_name = rand_name('keypair-')
pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
"Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
@@ -126,7 +126,7 @@
@attr(type='negative')
def test_keypair_create_with_invalid_pub_key(self):
- """Keypair should not be created with a non RSA public key"""
+ # Keypair should not be created with a non RSA public key
k_name = rand_name('keypair-')
pub_key = "ssh-rsa JUNK nova@ubuntu"
try:
@@ -137,8 +137,9 @@
self.fail('Expected BadRequest for invalid public key')
@attr(type='negative')
+ @unittest.skip("Skipped until the Bug #1086980 is resolved")
def test_keypair_delete_nonexistant_key(self):
- """Non-existant key deletion should throw a proper error"""
+ # Non-existant key deletion should throw a proper error
k_name = rand_name("keypair-non-existant-")
try:
resp, _ = self.client.delete_keypair(k_name)
@@ -149,7 +150,7 @@
@attr(type='negative')
def test_create_keypair_with_empty_public_key(self):
- """Keypair should not be created with an empty public key"""
+ # Keypair should not be created with an empty public key
k_name = rand_name("keypair-")
pub_key = ' '
try:
@@ -161,7 +162,7 @@
@attr(type='negative')
def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
- """Keypair should not be created when public key bits are too long"""
+ # Keypair should not be created when public key bits are too long
k_name = rand_name("keypair-")
pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
try:
@@ -173,7 +174,7 @@
@attr(type='negative')
def test_create_keypair_with_duplicate_name(self):
- """Keypairs with duplicate names should not be created"""
+ # Keypairs with duplicate names should not be created
k_name = rand_name('keypair-')
resp, _ = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
@@ -190,7 +191,7 @@
@attr(type='negative')
def test_create_keypair_with_empty_name_string(self):
- """Keypairs with name being an empty string should not be created"""
+ # Keypairs with name being an empty string should not be created
try:
resp, _ = self.client.create_keypair('')
except exceptions.BadRequest:
@@ -200,7 +201,7 @@
@attr(type='negative')
def test_create_keypair_with_long_keynames(self):
- """Keypairs with name longer than 255 chars should not be created"""
+ # Keypairs with name longer than 255 chars should not be created
k_name = 'keypair-'.ljust(260, '0')
try:
resp, _ = self.client.create_keypair(k_name)
@@ -211,7 +212,7 @@
@attr(type='negative')
def test_create_keypair_invalid_name(self):
- """Keypairs with name being an invalid name should not be created"""
+ # Keypairs with name being an invalid name should not be created
k_name = 'key_/.\@:'
try:
resp, _ = self.client.create_keypair(k_name)
diff --git a/tempest/tests/compute/limits/test_absolute_limits.py b/tempest/tests/compute/limits/test_absolute_limits.py
index ede0dc2..89c5b25 100644
--- a/tempest/tests/compute/limits/test_absolute_limits.py
+++ b/tempest/tests/compute/limits/test_absolute_limits.py
@@ -28,9 +28,7 @@
@unittest.skip("Skipped until the Bug #1025294 is resolved")
def test_absLimits_get(self):
- """
- To check if all limits are present in the response
- """
+ # To check if all limits are present in the response
resp, absolute_limits = self.client.get_absolute_limits()
expected_elements = ['maxImageMeta', 'maxPersonality',
'maxPersonalitySize',
diff --git a/tempest/tests/compute/security_groups/test_security_group_rules.py b/tempest/tests/compute/security_groups/test_security_group_rules.py
index fd56dc3..805adf4 100644
--- a/tempest/tests/compute/security_groups/test_security_group_rules.py
+++ b/tempest/tests/compute/security_groups/test_security_group_rules.py
@@ -17,8 +17,8 @@
from nose.plugins.attrib import attr
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
@@ -30,10 +30,8 @@
@attr(type='positive')
def test_security_group_rules_create(self):
- """
- Positive test: Creation of Security Group rule
- should be successfull
- """
+ # Positive test: Creation of Security Group rule
+ # should be successfull
try:
#Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
@@ -60,52 +58,55 @@
@attr(type='positive')
def test_security_group_rules_create_with_optional_arguments(self):
- """
- Positive test: Creation of Security Group rule
- with optional arguments
- should be successfull
- """
+ # Positive test: Creation of Security Group rule
+ # with optional arguments
+ # should be successfull
+
+ rule_id = None
+ secgroup1 = None
+ secgroup2 = None
try:
#Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup =\
self.client.create_security_group(s_name, s_description)
- securitygroup_id1 = securitygroup['id']
+ secgroup1 = securitygroup['id']
#Creating a Security Group so as to assign group_id to the rule
s_name2 = rand_name('securitygroup-')
s_description2 = rand_name('description-')
resp, securitygroup =\
self.client.create_security_group(s_name2, s_description2)
- securitygroup_id2 = securitygroup['id']
+ secgroup2 = securitygroup['id']
#Adding rules to the created Security Group with optional arguments
- parent_group_id = securitygroup_id1
+ parent_group_id = secgroup1
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.2.3.124/24'
- group_id = securitygroup_id2
+ group_id = secgroup2
resp, rule =\
self.client.create_security_group_rule(parent_group_id,
ip_protocol,
from_port, to_port,
cidr=cidr,
group_id=group_id)
+ rule_id = rule['id']
self.assertEqual(200, resp.status)
finally:
#Deleting the Security Group rule, created in this method
- group_rule_id = rule['id']
- self.client.delete_security_group_rule(group_rule_id)
+ if rule_id:
+ self.client.delete_security_group_rule(rule_id)
#Deleting the Security Groups created in this method
- resp, _ = self.client.delete_security_group(securitygroup_id1)
- resp, _ = self.client.delete_security_group(securitygroup_id2)
+ if secgroup1:
+ self.client.delete_security_group(secgroup1)
+ if secgroup2:
+ self.client.delete_security_group(secgroup2)
@attr(type='positive')
def test_security_group_rules_create_delete(self):
- """
- Positive test: Deletion of Security Group rule
- should be successfull
- """
+ # Positive test: Deletion of Security Group rule
+ # should be successfull
try:
#Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
@@ -131,10 +132,8 @@
@attr(type='negative')
def test_security_group_rules_create_with_invalid_id(self):
- """
- Negative test: Creation of Security Group rule should FAIL
- with invalid Parent group id
- """
+ # Negative test: Creation of Security Group rule should FAIL
+ # with invalid Parent group id
#Adding rules to the invalid Security Group id
parent_group_id = rand_name('999')
ip_protocol = 'tcp'
@@ -153,10 +152,8 @@
@attr(type='negative')
def test_security_group_rules_create_with_invalid_ip_protocol(self):
- """
- Negative test: Creation of Security Group rule should FAIL
- with invalid ip_protocol
- """
+ # Negative test: Creation of Security Group rule should FAIL
+ # with invalid ip_protocol
#Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -182,10 +179,8 @@
@attr(type='negative')
def test_security_group_rules_create_with_invalid_from_port(self):
- """
- Negative test: Creation of Security Group rule should FAIL
- with invalid from_port
- """
+ # Negative test: Creation of Security Group rule should FAIL
+ # with invalid from_port
#Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -211,10 +206,8 @@
@attr(type='negative')
def test_security_group_rules_create_with_invalid_to_port(self):
- """
- Negative test: Creation of Security Group rule should FAIL
- with invalid from_port
- """
+ # Negative test: Creation of Security Group rule should FAIL
+ # with invalid from_port
#Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -240,10 +233,8 @@
@attr(type='negative')
def test_security_group_rules_delete_with_invalid_id(self):
- """
- Negative test: Deletion of Security Group rule should be FAIL
- with invalid rule id
- """
+ # Negative test: Deletion of Security Group rule should be FAIL
+ # with invalid rule id
try:
self.client.delete_security_group_rule(rand_name('999'))
except exceptions.NotFound:
diff --git a/tempest/tests/compute/security_groups/test_security_groups.py b/tempest/tests/compute/security_groups/test_security_groups.py
index 81e84ce..5c0bd82 100644
--- a/tempest/tests/compute/security_groups/test_security_groups.py
+++ b/tempest/tests/compute/security_groups/test_security_groups.py
@@ -17,8 +17,8 @@
from nose.plugins.attrib import attr
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
@@ -30,7 +30,7 @@
@attr(type='positive')
def test_security_groups_create_list_delete(self):
- """Positive test:Should return the list of Security Groups"""
+ # Positive test:Should return the list of Security Groups
try:
#Create 3 Security Groups
security_group_list = list()
@@ -61,7 +61,7 @@
@attr(type='positive')
def test_security_group_create_delete(self):
- """Security Group should be created, verified and deleted"""
+ # Security Group should be created, verified and deleted
try:
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -83,7 +83,7 @@
@attr(type='positive')
def test_security_group_create_get_delete(self):
- """Security Group should be created, fetched and deleted"""
+ # Security Group should be created, fetched and deleted
try:
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -104,10 +104,8 @@
@attr(type='negative')
def test_security_group_get_nonexistant_group(self):
- """
- Negative test:Should not be able to GET the details
- of nonexistant Security Group
- """
+ # Negative test:Should not be able to GET the details
+ # of nonexistant Security Group
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
@@ -128,10 +126,8 @@
@attr(type='negative')
def test_security_group_create_with_invalid_group_name(self):
- """
- Negative test: Security Group should not be created with group name as
- an empty string/with white spaces/chars more than 255
- """
+ # Negative test: Security Group should not be created with group name
+ # as an empty string/with white spaces/chars more than 255
s_description = rand_name('description-')
#Create Security Group with empty string as group name
try:
@@ -161,10 +157,8 @@
@attr(type='negative')
def test_security_group_create_with_invalid_group_description(self):
- """
- Negative test:Security Group should not be created with description as
- an empty string/with white spaces/chars more than 255
- """
+ # Negative test:Security Group should not be created with description
+ # as an empty string/with white spaces/chars more than 255
s_name = rand_name('securitygroup-')
#Create Security Group with empty string as description
try:
@@ -194,10 +188,8 @@
@attr(type='negative')
def test_security_group_create_with_duplicate_name(self):
- """
- Negative test:Security Group with duplicate name should not
- be created
- """
+ # Negative test:Security Group with duplicate name should not
+ # be created
try:
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -220,9 +212,7 @@
@attr(type='negative')
def test_delete_nonexistant_security_group(self):
- """
- Negative test:Deletion of a nonexistant Security Group should Fail
- """
+ # Negative test:Deletion of a nonexistant Security Group should Fail
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
@@ -242,10 +232,8 @@
@attr(type='negative')
def test_delete_security_group_without_passing_id(self):
- """
- Negative test:Deletion of a Security Group with out passing ID
- should Fail
- """
+ # Negative test:Deletion of a Security Group with out passing ID
+ # should Fail
try:
resp, body = self.client.delete_security_group('')
except exceptions.NotFound:
@@ -255,10 +243,8 @@
'with out passing ID')
def test_server_security_groups(self):
- """
- Checks that security groups may be added and linked to a server
- and not deleted if the server is active.
- """
+ # Checks that security groups may be added and linked to a server
+ # and not deleted if the server is active.
# Create a couple security groups that we will use
# for the server resource this test creates
sg_name = rand_name('sg')
diff --git a/tempest/tests/compute/servers/test_console_output.py b/tempest/tests/compute/servers/test_console_output.py
index b08dcc2..3ad29a1 100644
--- a/tempest/tests/compute/servers/test_console_output.py
+++ b/tempest/tests/compute/servers/test_console_output.py
@@ -18,16 +18,15 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
-from tempest.tests.compute.base import BaseComputeTest
+from tempest import exceptions
+from tempest.tests.compute import base
-class ConsoleOutputTest(BaseComputeTest):
+class ConsoleOutputTest(object):
@classmethod
- def setUpClass(cls):
- super(ConsoleOutputTest, cls).setUpClass()
+ def setUpClass(self, cls):
cls.client = cls.console_outputs_client
cls.servers_client = cls.servers_client
cls.name = rand_name('server')
@@ -39,16 +38,13 @@
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
@classmethod
- def tearDownClass(cls):
+ def tearDownClass(self, cls):
cls.servers_client.delete_server(cls.server_id)
- super(ConsoleOutputTest, cls).tearDownClass()
@attr(type='positive')
def test_get_console_output(self):
- """
- Positive test:Should be able to GET the console output
- for a given server_id and number of lines
- """
+ # Positive test:Should be able to GET the console output
+ # for a given server_id and number of lines
def get_output():
resp, output = self.client.get_console_output(self.server_id, 10)
self.assertEqual(200, resp.status)
@@ -59,10 +55,8 @@
@attr(type='negative')
def test_get_console_output_invalid_server_id(self):
- """
- Negative test: Should not be able to get the console output
- for an invalid server_id
- """
+ # Negative test: Should not be able to get the console output
+ # for an invalid server_id
try:
resp, output = self.client.get_console_output('!@#$%^&*()', 10)
except exceptions.NotFound:
@@ -71,10 +65,8 @@
@attr(type='positive')
@unittest.skip('Until tempest bug 1014683 is fixed.')
def test_get_console_output_server_id_in_reboot_status(self):
- """
- Positive test:Should be able to GET the console output
- for a given server_id in reboot status
- """
+ # Positive test:Should be able to GET the console output
+ # for a given server_id in reboot status
try:
resp, output = self.servers_client.reboot(self.server_id, 'SOFT')
self.servers_client.wait_for_server_status(self.server_id,
@@ -92,3 +84,31 @@
finally:
self.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
+
+
+@attr(type='smoke')
+class ConsoleOutputTestJSON(base.BaseComputeTestJSON,
+ ConsoleOutputTest):
+ @classmethod
+ def setUpClass(cls):
+ super(ConsoleOutputTestJSON, cls).setUpClass()
+ ConsoleOutputTest.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ ConsoleOutputTest.tearDownClass(cls)
+ super(ConsoleOutputTestJSON, cls).tearDownClass()
+
+
+@attr(type='smoke')
+class ConsoleOutputTestXML(base.BaseComputeTestXML,
+ ConsoleOutputTest):
+ @classmethod
+ def setUpClass(cls):
+ super(ConsoleOutputTestXML, cls).setUpClass()
+ ConsoleOutputTest.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ ConsoleOutputTest.tearDownClass(cls)
+ super(ConsoleOutputTestXML, cls).tearDownClass()
diff --git a/tempest/tests/compute/servers/test_create_server.py b/tempest/tests/compute/servers/test_create_server.py
index 5d6c2ba..c5a54dc 100644
--- a/tempest/tests/compute/servers/test_create_server.py
+++ b/tempest/tests/compute/servers/test_create_server.py
@@ -16,13 +16,16 @@
# under the License.
import base64
+import nose
from nose.plugins.attrib import attr
import unittest2 as unittest
-import tempest.config
+
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+import tempest.config
+from tempest.tests import compute
from tempest.tests.compute import base
@@ -35,9 +38,12 @@
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '::babe:220.12.22.2'
+ # See: http://tools.ietf.org/html/rfc5952 (section 4)
+ # This is the canonicalized form of the above.
+ cls.accessIPv6canon = '::babe:dc0c:1602'
cls.name = rand_name('server')
file_contents = 'This is a test file.'
- personality = [{'path': '/etc/test.txt',
+ personality = [{'path': '/test.txt',
'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cli_resp = cls.client.create_server(cls.name,
@@ -46,7 +52,8 @@
meta=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
- personality=personality)
+ personality=personality,
+ disk_config=cls.disk_config)
cls.resp, cls.server_initial = cli_resp
cls.password = cls.server_initial['adminPass']
cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
@@ -58,17 +65,17 @@
@attr(type='smoke')
def test_create_server_response(self):
- """Check that the required fields are returned with values"""
+ # Check that the required fields are returned with values
self.assertEqual(202, self.resp.status)
self.assertTrue(self.server_initial['id'] is not None)
self.assertTrue(self.server_initial['adminPass'] is not None)
@attr(type='smoke')
def test_verify_server_details(self):
- """Verify the specified server attributes are set correctly"""
-
+ # Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
- self.assertEqual(self.accessIPv6, self.server['accessIPv6'])
+ self.assertIn(self.server['accessIPv6'],
+ [self.accessIPv6, self.accessIPv6canon])
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(str(self.flavor_ref), self.server['flavor']['id'])
@@ -76,7 +83,7 @@
@attr(type='smoke')
def test_list_servers(self):
- """The created server should be in the list of all servers"""
+ # The created server should be in the list of all servers
resp, body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
@@ -84,7 +91,7 @@
@attr(type='smoke')
def test_list_servers_with_detail(self):
- """The created server should be in the detailed list of all servers"""
+ # The created server should be in the detailed list of all servers
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
@@ -93,17 +100,15 @@
@attr(type='positive')
@unittest.skipIf(not run_ssh, 'Instance validation tests are disabled.')
def test_can_log_into_created_server(self):
- """Check that the user can authenticate with the generated password"""
+ # Check that the user can authenticate with the generated password
linux_client = RemoteClient(self.server, self.ssh_user, self.password)
self.assertTrue(linux_client.can_authenticate())
@attr(type='positive')
@unittest.skipIf(not run_ssh, 'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
- """
- Verify that the number of vcpus reported by the instance matches
- the amount stated by the flavor
- """
+ # Verify that the number of vcpus reported by the instance matches
+ # the amount stated by the flavor
resp, flavor = self.flavors_client.get_flavor_details(self.flavor_ref)
linux_client = RemoteClient(self.server, self.ssh_user, self.password)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@@ -111,16 +116,54 @@
@attr(type='positive')
@unittest.skipIf(not run_ssh, 'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
- """Verify the instance host name is the same as the server name"""
+ # Verify the instance host name is the same as the server name
linux_client = RemoteClient(self.server, self.ssh_user, self.password)
self.assertTrue(linux_client.hostname_equals_servername(self.name))
+@attr(type='positive')
+class ServersTestAutoDisk(base.BaseComputeTestJSON,
+ ServersTest):
+ @classmethod
+ def setUpClass(cls):
+ if not compute.DISK_CONFIG_ENABLED:
+ msg = "DiskConfig extension not enabled."
+ raise nose.SkipTest(msg)
+ super(ServersTestAutoDisk, cls).setUpClass()
+ cls.disk_config = 'AUTO'
+ ServersTest.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ ServersTest.tearDownClass(cls)
+ super(ServersTestAutoDisk, cls).tearDownClass()
+
+
+@attr(type='positive')
+class ServersTestManualDisk(base.BaseComputeTestJSON,
+ ServersTest):
+ @classmethod
+ def setUpClass(cls):
+ if not compute.DISK_CONFIG_ENABLED:
+ msg = "DiskConfig extension not enabled."
+ raise nose.SkipTest(msg)
+ super(ServersTestManualDisk, cls).setUpClass()
+ cls.disk_config = 'MANUAL'
+ ServersTest.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ ServersTest.tearDownClass(cls)
+ super(ServersTestManualDisk, cls).tearDownClass()
+
+
+@attr(type='smoke')
class ServersTestJSON(base.BaseComputeTestJSON,
ServersTest):
@classmethod
def setUpClass(cls):
super(ServersTestJSON, cls).setUpClass()
+ cls.disk_config = None
ServersTest.setUpClass(cls)
@classmethod
@@ -129,11 +172,13 @@
super(ServersTestJSON, cls).tearDownClass()
+@attr(type='smoke')
class ServersTestXML(base.BaseComputeTestXML,
ServersTest):
@classmethod
def setUpClass(cls):
super(ServersTestXML, cls).setUpClass()
+ cls.disk_config = None
ServersTest.setUpClass(cls)
@classmethod
diff --git a/tempest/tests/compute/servers/test_disk_config.py b/tempest/tests/compute/servers/test_disk_config.py
index a18fabb..490156b 100644
--- a/tempest/tests/compute/servers/test_disk_config.py
+++ b/tempest/tests/compute/servers/test_disk_config.py
@@ -19,10 +19,10 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
-from tempest.tests.compute.base import BaseComputeTest
+from tempest import exceptions
from tempest.tests import compute
+from tempest.tests.compute.base import BaseComputeTest
class TestServerDiskConfig(BaseComputeTest):
@@ -36,51 +36,13 @@
cls.client = cls.os.servers_client
@attr(type='positive')
- def test_create_server_with_manual_disk_config(self):
- """A server should be created with manual disk config"""
- name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='MANUAL')
-
- #Wait for the server to become active
- self.client.wait_for_server_status(server['id'], 'ACTIVE')
-
- #Verify the specified attributes are set correctly
- resp, server = self.client.get_server(server['id'])
- self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
-
- #Delete the server
- resp, body = self.client.delete_server(server['id'])
-
- @attr(type='positive')
- def test_create_server_with_auto_disk_config(self):
- """A server should be created with auto disk config"""
- name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='AUTO')
-
- #Wait for the server to become active
- self.client.wait_for_server_status(server['id'], 'ACTIVE')
-
- #Verify the specified attributes are set correctly
- resp, server = self.client.get_server(server['id'])
- self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
-
- #Delete the server
- resp, body = self.client.delete_server(server['id'])
-
- @attr(type='positive')
def test_rebuild_server_with_manual_disk_config(self):
- """A server should be rebuilt using the manual disk config option"""
+ # A server should be rebuilt using the manual disk config option
name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='AUTO')
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ disk_config='AUTO')
#Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
@@ -105,12 +67,12 @@
@attr(type='positive')
def test_rebuild_server_with_auto_disk_config(self):
- """A server should be rebuilt using the auto disk config option"""
+ # A server should be rebuilt using the auto disk config option
name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='MANUAL')
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ disk_config='MANUAL')
#Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
@@ -136,12 +98,12 @@
@attr(type='positive')
@unittest.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
def test_resize_server_from_manual_to_auto(self):
- """A server should be resized from manual to auto disk config"""
+ # A server should be resized from manual to auto disk config
name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='MANUAL')
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ disk_config='MANUAL')
#Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
@@ -162,12 +124,12 @@
@attr(type='positive')
@unittest.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
def test_resize_server_from_auto_to_manual(self):
- """A server should be resized from auto to manual disk config"""
+ # A server should be resized from auto to manual disk config
name = rand_name('server')
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- disk_config='AUTO')
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ disk_config='AUTO')
#Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_list_server_filters.py b/tempest/tests/compute/servers/test_list_server_filters.py
index 5e4b267..5eea24f 100644
--- a/tempest/tests/compute/servers/test_list_server_filters.py
+++ b/tempest/tests/compute/servers/test_list_server_filters.py
@@ -15,15 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import nose.plugins.skip
+
+import nose
from nose.plugins.attrib import attr
+import nose.plugins.skip
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
from tempest.tests import utils
-import nose
class ListServerFiltersTest(object):
@@ -93,7 +94,7 @@
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='positive')
def test_list_servers_filter_by_image(self):
- """Filter the list of servers by image"""
+ # Filter the list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers(params)
servers = body['servers']
@@ -104,7 +105,7 @@
@attr(type='positive')
def test_list_servers_filter_by_flavor(self):
- """Filter the list of servers by flavor"""
+ # Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers(params)
servers = body['servers']
@@ -115,7 +116,7 @@
@attr(type='positive')
def test_list_servers_filter_by_server_name(self):
- """Filter the list of servers by server name"""
+ # Filter the list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
@@ -126,7 +127,7 @@
@attr(type='positive')
def test_list_servers_filter_by_server_status(self):
- """Filter the list of servers by server status"""
+ # Filter the list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers(params)
servers = body['servers']
@@ -137,7 +138,7 @@
@attr(type='positive')
def test_list_servers_limit_results(self):
- """Verify only the expected number of servers are returned"""
+ # Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
@@ -145,7 +146,7 @@
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@attr(type='positive')
def test_list_servers_detailed_filter_by_image(self):
- """Filter the detailed list of servers by image"""
+ # Filter the detailed list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
@@ -156,7 +157,7 @@
@attr(type='positive')
def test_list_servers_detailed_filter_by_flavor(self):
- """Filter the detailed list of servers by flavor"""
+ # Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
@@ -167,7 +168,7 @@
@attr(type='positive')
def test_list_servers_detailed_filter_by_server_name(self):
- """Filter the detailed list of servers by server name"""
+ # Filter the detailed list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
@@ -178,7 +179,7 @@
@attr(type='positive')
def test_list_servers_detailed_filter_by_server_status(self):
- """Filter the detailed list of servers by server status"""
+ # Filter the detailed list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
@@ -189,7 +190,7 @@
@attr(type='positive')
def test_list_servers_detailed_limit_results(self):
- """Verify only the expected number of detailed results are returned"""
+ # Verify only the expected number of detailed results are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
diff --git a/tempest/tests/compute/servers/test_list_servers_negative.py b/tempest/tests/compute/servers/test_list_servers_negative.py
index b2d053d..e98d8f0 100644
--- a/tempest/tests/compute/servers/test_list_servers_negative.py
+++ b/tempest/tests/compute/servers/test_list_servers_negative.py
@@ -18,14 +18,14 @@
import re
import sys
-import unittest2 as unittest
import nose
+import unittest2 as unittest
-from tempest import exceptions
-from tempest import openstack
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest.tests.compute.base import BaseComputeTest
+from tempest import exceptions
from tempest.tests import compute
+from tempest.tests.compute.base import BaseComputeTest
class ListServersNegativeTest(BaseComputeTest):
@@ -40,12 +40,12 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- cls.alt_manager = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
- cls.alt_manager = openstack.AltManager()
+ cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.servers_client
# Under circumstances when there is not a tenant/user
@@ -97,7 +97,7 @@
cls.deleted_fixtures.append(srv)
def test_list_servers_with_a_deleted_server(self):
- """Verify deleted servers do not show by default in list servers"""
+ # Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
resp, body = self.client.list_servers()
servers = body['servers']
@@ -108,7 +108,7 @@
self.assertEqual([], actual)
def test_list_servers_by_non_existing_image(self):
- """Listing servers for a non existing image returns empty list"""
+ # Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
resp, body = self.client.list_servers(dict(image=non_existing_image))
servers = body['servers']
@@ -116,7 +116,7 @@
self.assertEqual([], servers)
def test_list_servers_by_non_existing_flavor(self):
- """Listing servers by non existing flavor returns empty list"""
+ # Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
resp, body = self.client.list_servers(dict(flavor=non_existing_flavor))
servers = body['servers']
@@ -124,7 +124,7 @@
self.assertEqual([], servers)
def test_list_servers_by_non_existing_server_name(self):
- """Listing servers for a non existent server name returns empty list"""
+ # Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
resp, body = self.client.list_servers(dict(name=non_existing_name))
servers = body['servers']
@@ -133,7 +133,7 @@
@unittest.skip("Skip until bug 1061712 is resolved")
def test_list_servers_status_non_existing(self):
- """Return an empty list when invalid status is specified"""
+ # Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
resp, body = self.client.list_servers(dict(status=non_existing_status))
servers = body['servers']
@@ -141,31 +141,31 @@
self.assertEqual([], servers)
def test_list_servers_by_limits(self):
- """List servers by specifying limits"""
+ # List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
self.assertEqual(1, len(body['servers']))
def test_list_servers_by_limits_greater_than_actual_count(self):
- """List servers by specifying a greater value for limit"""
+ # List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
def test_list_servers_by_limits_pass_string(self):
- """Return an error if a string value is passed for limit"""
+ # Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
def test_list_servers_by_limits_pass_negative_value(self):
- """Return an error if a negative value for limit is passed"""
+ # Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
def test_list_servers_by_changes_since(self):
- """Servers are listed by specifying changes-since date"""
- resp, body = self.client.list_servers(
- {'changes-since': '2011-01-01T12:34:00Z'})
+ # Servers are listed by specifying changes-since date
+ changes_since = {'changes-since': '2011-01-01T12:34:00Z'}
+ resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
# changes-since returns all instances, including deleted.
num_expected = (len(self.existing_fixtures) +
@@ -173,19 +173,19 @@
self.assertEqual(num_expected, len(body['servers']))
def test_list_servers_by_changes_since_invalid_date(self):
- """Return an error when invalid date format is passed"""
+ # Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes-since': '2011/01/01'})
def test_list_servers_by_changes_since_future_date(self):
- """Return an empty list when a date in the future is passed"""
- resp, body = self.client.list_servers(
- {'changes-since': '2051-01-01T12:34:00Z'})
+ # Return an empty list when a date in the future is passed
+ changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
+ resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
def test_list_servers_detail_server_is_deleted(self):
- """Server details are not listed for a deleted server"""
+ # Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
diff --git a/tempest/tests/compute/servers/test_server_actions.py b/tempest/tests/compute/servers/test_server_actions.py
index dd6b02f..f4e62b1 100644
--- a/tempest/tests/compute/servers/test_server_actions.py
+++ b/tempest/tests/compute/servers/test_server_actions.py
@@ -21,12 +21,12 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-import tempest.config
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
-from tempest.tests.compute import base
+import tempest.config
+from tempest import exceptions
from tempest.tests import compute
+from tempest.tests.compute import base
class ServerActionsTestBase(object):
@@ -36,9 +36,9 @@
def setUp(self):
self.name = rand_name('server')
- resp, server = self.client.create_server(self.name,
- self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(self.name,
+ self.image_ref,
+ self.flavor_ref)
self.server_id = server['id']
self.password = server['adminPass']
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@@ -50,7 +50,7 @@
@unittest.skipUnless(compute.CHANGE_PASSWORD_AVAILABLE,
'Change password not available.')
def test_change_server_password(self):
- """The server's password should be set to the provided password"""
+ # The server's password should be set to the provided password
new_password = 'Newpass1234'
resp, body = self.client.change_password(self.server_id, new_password)
self.assertEqual(202, resp.status)
@@ -64,7 +64,7 @@
@attr(type='smoke')
def test_reboot_server_hard(self):
- """ The server should be power cycled """
+ # The server should be power cycled
if self.run_ssh:
# Get the time the server was last rebooted,
# waiting for one minute as who doesn't have seconds precision
@@ -86,7 +86,7 @@
@attr(type='smoke')
@unittest.skip('Until bug 1014647 is dealt with.')
def test_reboot_server_soft(self):
- """The server should be signaled to reboot gracefully"""
+ # The server should be signaled to reboot gracefully
if self.run_ssh:
# Get the time the server was last rebooted,
# waiting for one minute as who doesn't have seconds precision
@@ -107,7 +107,7 @@
@attr(type='smoke')
def test_rebuild_server(self):
- """ The server should be rebuilt using the provided image and data """
+ # The server should be rebuilt using the provided image and data
meta = {'rebuild': 'server'}
new_name = rand_name('server')
file_contents = 'Test server rebuild.'
@@ -124,7 +124,7 @@
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
- self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
+ self.assertEqual(self.flavor_ref, int(rebuilt_server['flavor']['id']))
#Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
@@ -141,10 +141,8 @@
@attr(type='smoke')
@unittest.skipIf(not resize_available, 'Resize not available.')
def test_resize_server_confirm(self):
- """
- The server's RAM and disk space should be modified to that of
- the provided flavor
- """
+ # The server's RAM and disk space should be modified to that of
+ # the provided flavor
resp, server = self.client.resize(self.server_id, self.flavor_ref_alt)
self.assertEqual(202, resp.status)
@@ -159,10 +157,8 @@
@attr(type='positive')
@unittest.skipIf(not resize_available, 'Resize not available.')
def test_resize_server_revert(self):
- """
- The server's RAM and disk space should return to its original
- values after a resize is reverted
- """
+ # The server's RAM and disk space should return to its original
+ # values after a resize is reverted
resp, server = self.client.resize(self.server_id, self.flavor_ref_alt)
self.assertEqual(202, resp.status)
@@ -186,18 +182,14 @@
@attr(type='negative')
def test_reboot_nonexistent_server_soft(self):
- """
- Negative Test: The server reboot on non existent server should return
- an error
- """
+ # Negative Test: The server reboot on non existent server should return
+ # an error
self.assertRaises(exceptions.NotFound, self.client.reboot, 999, 'SOFT')
@attr(type='negative')
def test_rebuild_nonexistent_server(self):
- """
- Negative test: The server rebuild for a non existing server should not
- be allowed
- """
+ # Negative test: The server rebuild for a non existing server
+ # should not be allowed
meta = {'rebuild': 'server'}
new_name = rand_name('server')
file_contents = 'Test server rebuild.'
diff --git a/tempest/tests/compute/servers/test_server_addresses.py b/tempest/tests/compute/servers/test_server_addresses.py
index 164548d..6e819a2 100644
--- a/tempest/tests/compute/servers/test_server_addresses.py
+++ b/tempest/tests/compute/servers/test_server_addresses.py
@@ -17,8 +17,8 @@
from nose.plugins.attrib import attr
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute.base import BaseComputeTest
@@ -42,7 +42,7 @@
@attr(type='negative', category='server-addresses')
def test_list_server_addresses_invalid_server_id(self):
- """List addresses request should fail if server id not in system"""
+ # List addresses request should fail if server id not in system
try:
self.client.list_addresses('999')
@@ -54,7 +54,7 @@
@attr(type='negative', category='server-addresses')
def test_list_server_addresses_by_network_neg(self):
- """List addresses by network should fail if network name not valid"""
+ # List addresses by network should fail if network name not valid
try:
self.client.list_addresses_by_network(self.server['id'], 'invalid')
@@ -66,8 +66,8 @@
@attr(type='smoke', category='server-addresses')
def test_list_server_addresses(self):
- """All public and private addresses for
- a server should be returned"""
+ # All public and private addresses for
+ # a server should be returned
resp, addresses = self.client.list_addresses(self.server['id'])
self.assertEqual('200', resp['status'])
@@ -83,8 +83,8 @@
@attr(type='smoke', category='server-addresses')
def test_list_server_addresses_by_network(self):
- """Providing a network type should filter
- the addresses return by that type"""
+ # Providing a network type should filter
+ # the addresses return by that type
resp, addresses = self.client.list_addresses(self.server['id'])
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/tests/compute/servers/test_server_basic_ops.py
index 04b1451..3453d86 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/tests/compute/servers/test_server_basic_ops.py
@@ -79,7 +79,7 @@
try:
self.compute_client.security_group_rules.create(
self.secgroup.id, **ruleset)
- except:
+ except Exception:
self.fail("Failed to create rule in security group.")
def test_003_boot_instance(self):
diff --git a/tempest/tests/compute/servers/test_server_metadata.py b/tempest/tests/compute/servers/test_server_metadata.py
index 4022dad..6c44c3c 100644
--- a/tempest/tests/compute/servers/test_server_metadata.py
+++ b/tempest/tests/compute/servers/test_server_metadata.py
@@ -49,7 +49,7 @@
self.assertEqual(resp.status, 200)
def test_list_server_metadata(self):
- """All metadata key/value pairs for a server should be returned"""
+ # All metadata key/value pairs for a server should be returned
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
#Verify the expected metadata items are in the list
@@ -58,7 +58,7 @@
self.assertEqual(expected, resp_metadata)
def test_set_server_metadata(self):
- """The server's metadata should be replaced with the provided values"""
+ # The server's metadata should be replaced with the provided values
#Create a new set of metadata for the server
req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
resp, metadata = self.client.set_server_metadata(self.server_id,
@@ -71,25 +71,24 @@
self.assertEqual(resp_metadata, req_metadata)
def test_server_create_metadata_key_too_long(self):
- """
- Attempt to start a server with a meta-data key that is > 255 characters
- Try a few values
- """
+ # Attempt to start a server with a meta-data key that is > 255
+ # characters
+
+ # Try a few values
for sz in [256, 257, 511, 1023]:
key = "k" * sz
meta = {key: 'data1'}
name = rand_name('server')
self.assertRaises(exceptions.OverLimit,
- self.client.create_server, name, self.image_ref,
+ self.create_server_with_extras,
+ name, self.image_ref,
self.flavor_ref, meta=meta)
# no teardown - all creates should fail
def test_update_server_metadata(self):
- """
- The server's metadata values should be updated to the
- provided values
- """
+ # The server's metadata values should be updated to the
+ # provided values
meta = {'key1': 'alt1', 'key3': 'value3'}
resp, metadata = self.client.update_server_metadata(self.server_id,
meta)
@@ -101,13 +100,13 @@
self.assertEqual(expected, resp_metadata)
def test_get_server_metadata_item(self):
- """ The value for a specic metadata key should be returned """
+ # The value for a specic metadata key should be returned
resp, meta = self.client.get_server_metadata_item(self.server_id,
'key2')
self.assertTrue('value2', meta['key2'])
def test_set_server_metadata_item(self):
- """The item's value should be updated to the provided value"""
+ # The item's value should be updated to the provided value
#Update the metadata value
meta = {'nova': 'alt'}
resp, body = self.client.set_server_metadata_item(self.server_id,
@@ -120,7 +119,7 @@
self.assertEqual(expected, resp_metadata)
def test_delete_server_metadata_item(self):
- """The metadata value/key pair should be deleted from the server"""
+ # The metadata value/key pair should be deleted from the server
resp, meta = self.client.delete_server_metadata_item(self.server_id,
'key1')
self.assertEqual(204, resp.status)
@@ -132,22 +131,21 @@
@attr(type='negative')
def test_get_nonexistant_server_metadata_item(self):
- """Negative test: GET on nonexistant server should not succeed"""
+ # Negative test: GET on nonexistant server should not succeed
try:
resp, meta = self.client.get_server_metadata_item(999, 'test2')
- except:
+ except Exception:
pass
else:
self.fail('GET on nonexistant server should not succeed')
@attr(type='negative')
def test_list_nonexistant_server_metadata(self):
- """
- Negative test:List metadata on a non existant server should not succeed
- """
+ # Negative test:List metadata on a non existant server should
+ # not succeed
try:
resp, metadata = self.client.list_server_metadata(999)
- except:
+ except Exception:
pass
else:
self.fail('List metadata on a non existant server should'
@@ -155,13 +153,12 @@
@attr(type='negative')
def test_set_nonexistant_server_metadata(self):
- """
- Negative test: Set metadata on a non existant server should not succeed
- """
+ # Negative test: Set metadata on a non existant server should not
+ # succeed
meta = {'meta1': 'data1'}
try:
resp, metadata = self.client.set_server_metadata(999, meta)
- except:
+ except Exception:
pass
else:
self.fail('Set metadata on a non existant server should'
@@ -169,29 +166,25 @@
@attr(type='negative')
def test_update_nonexistant_server_metadata(self):
- """
- Negative test: An update should not happen for a nonexistant image
- """
+ # Negative test: An update should not happen for a nonexistant image
meta = {'key1': 'value1', 'key2': 'value2'}
try:
resp, metadata = self.client.update_server_metadata(999, meta)
- except:
+ except Exception:
pass
else:
self.fail('An update should not happen for a nonexistant image')
@attr(type='negative')
def test_delete_nonexistant_server_metadata_item(self):
- """
- Negative test: Should not be able to delete metadata item from a
- nonexistant server
- """
+ # Negative test: Should not be able to delete metadata item from a
+ # nonexistant server
meta = {'d': 'delvalue'}
#Delete the metadata item
try:
resp, metadata = self.client.delete_server_metadata_item(999, 'd')
- except:
+ except Exception:
pass
else:
self.fail('A delete should not happen for a nonexistant image')
diff --git a/tempest/tests/compute/servers/test_server_personality.py b/tempest/tests/compute/servers/test_server_personality.py
index 75457d1..6ea0959 100644
--- a/tempest/tests/compute/servers/test_server_personality.py
+++ b/tempest/tests/compute/servers/test_server_personality.py
@@ -19,18 +19,16 @@
from nose.plugins.attrib import attr
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
class ServerPersonalityTestBase(object):
def test_personality_files_exceed_limit(self):
- """
- Server creation should fail if greater than the maximum allowed
- number of files are injected into the server.
- """
+ # Server creation should fail if greater than the maximum allowed
+ # number of files are injected into the server.
name = rand_name('server')
file_contents = 'This is a test file.'
personality = []
@@ -41,8 +39,9 @@
personality.append({'path': path,
'contents': base64.b64encode(file_contents)})
try:
- self.client.create_server(name, self.image_ref, self.flavor_ref,
- personality=personality)
+ self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref,
+ personality=personality)
except exceptions.OverLimit:
pass
else:
@@ -50,10 +49,8 @@
@attr(type='positive')
def test_can_create_server_with_max_number_personality_files(self):
- """
- Server should be created successfully if maximum allowed number of
- files is injected into the server during creation.
- """
+ # Server should be created successfully if maximum allowed number of
+ # files is injected into the server during creation.
try:
name = rand_name('server')
file_contents = 'This is a test file.'
@@ -61,16 +58,16 @@
max_file_limit = \
self.user_client.get_specific_absolute_limit("maxPersonality")
- personality = []
+ person = []
for i in range(0, int(max_file_limit)):
path = 'etc/test' + str(i) + '.txt'
- personality.append({
+ person.append({
'path': path,
'contents': base64.b64encode(file_contents),
})
- resp, server = self.client.create_server(name, self.image_ref,
- self.flavor_ref,
- personality=personality)
+ resp, server = self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref,
+ personality=person)
self.assertEqual('202', resp['status'])
except Exception:
diff --git a/tempest/tests/compute/servers/test_servers.py b/tempest/tests/compute/servers/test_servers.py
index 4132b02..3566ef4 100644
--- a/tempest/tests/compute/servers/test_servers.py
+++ b/tempest/tests/compute/servers/test_servers.py
@@ -25,37 +25,40 @@
@attr(type='positive')
def test_create_server_with_admin_password(self):
- """
- If an admin password is provided on server creation, the server's root
- password should be set to that password.
- """
+ # If an admin password is provided on server creation, the server's
+ # root password should be set to that password.
try:
+ server = None
name = rand_name('server')
- resp, server = self.client.create_server(name, self.image_ref,
- self.flavor_ref,
- adminPass='testpassword')
+ resp, server = self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref,
+ adminPass='test'
+ 'password')
#Verify the password is set correctly in the response
self.assertEqual('testpassword', server['adminPass'])
#Teardown
finally:
- self.client.delete_server(server['id'])
+ if server:
+ self.client.delete_server(server['id'])
def test_create_with_existing_server_name(self):
- """Creating a server with a name that already exists is allowed"""
+ # Creating a server with a name that already exists is allowed
try:
+ id1 = None
+ id2 = None
server_name = rand_name('server')
- resp, server = self.client.create_server(server_name,
- self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(server_name,
+ self.image_ref,
+ self.flavor_ref)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
id1 = server['id']
- resp, server = self.client.create_server(server_name,
- self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(server_name,
+ self.image_ref,
+ self.flavor_ref)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
id2 = server['id']
self.assertNotEqual(id1, id2, "Did not create a new server")
@@ -71,17 +74,18 @@
@attr(type='positive')
def test_create_specify_keypair(self):
- """Specify a keypair while creating a server"""
+ # Specify a keypair while creating a server
try:
+ server = None
key_name = rand_name('key')
resp, keypair = self.keypairs_client.create_keypair(key_name)
resp, body = self.keypairs_client.list_keypairs()
server_name = rand_name('server')
- resp, server = self.client.create_server(server_name,
- self.image_ref,
- self.flavor_ref,
- key_name=key_name)
+ resp, server = self.create_server_with_extras(server_name,
+ self.image_ref,
+ self.flavor_ref,
+ key_name=key_name)
self.assertEqual('202', resp['status'])
self.client.wait_for_server_status(server['id'], 'ACTIVE')
resp, server = self.client.get_server(server['id'])
@@ -92,11 +96,12 @@
@attr(type='positive')
def test_update_server_name(self):
- """The server name should be changed to the the provided value"""
+ # The server name should be changed to the the provided value
try:
+ server = None
name = rand_name('server')
- resp, server = self.client.create_server(name, self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
#Update the server with a new name
@@ -111,17 +116,17 @@
#Teardown
finally:
- self.client.delete_server(server['id'])
+ if server:
+ self.client.delete_server(server['id'])
@attr(type='positive')
def test_update_access_server_address(self):
- """
- The server's access addresses should reflect the provided values
- """
+ # The server's access addresses should reflect the provided values
try:
+ server = None
name = rand_name('server')
- resp, server = self.client.create_server(name, self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref)
self.client.wait_for_server_status(server['id'], 'ACTIVE')
#Update the IPv4 and IPv6 access addresses
@@ -138,13 +143,14 @@
#Teardown
finally:
- self.client.delete_server(server['id'])
+ if server:
+ self.client.delete_server(server['id'])
def test_delete_server_while_in_building_state(self):
- """Delete a server while it's VM state is Building"""
+ # Delete a server while it's VM state is Building
name = rand_name('server')
- resp, server = self.client.create_server(name, self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras(name, self.image_ref,
+ self.flavor_ref)
self.client.wait_for_server_status(server['id'], 'BUILD')
resp, _ = self.client.delete_server(server['id'])
self.assertEqual('204', resp['status'])
@@ -156,9 +162,27 @@
super(ServersTestJSON, cls).setUpClass()
cls.client = cls.servers_client
+ def tearDown(self):
+ # clean up any remaining servers and wait for them to fully
+ # delete. This is done because delete calls are async, and if
+ # deletes are running slow we could very well overrun system
+ # memory
+ self.clear_servers()
+
+ super(ServersTestJSON, self).tearDown()
+
class ServersTestXML(base.BaseComputeTestXML, ServersTestBase):
@classmethod
def setUpClass(cls):
super(ServersTestXML, cls).setUpClass()
cls.client = cls.servers_client
+
+ def tearDown(self):
+ # clean up any remaining servers and wait for them to fully
+ # delete. This is done because delete calls are async, and if
+ # deletes are running slow we could very well overrun system
+ # memory
+ self.clear_servers()
+
+ super(ServersTestXML, self).tearDown()
diff --git a/tempest/tests/compute/servers/test_servers_negative.py b/tempest/tests/compute/servers/test_servers_negative.py
index fb88d1e..970f6bc 100644
--- a/tempest/tests/compute/servers/test_servers_negative.py
+++ b/tempest/tests/compute/servers/test_servers_negative.py
@@ -17,13 +17,13 @@
import sys
+import nose
from nose.plugins.attrib import attr
import unittest2 as unittest
-import nose
-from tempest import exceptions
-from tempest import openstack
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute.base import BaseComputeTest
@@ -35,15 +35,16 @@
super(ServersNegativeTest, cls).setUpClass()
cls.client = cls.servers_client
cls.img_client = cls.images_client
- cls.alt_os = openstack.AltManager()
+ cls.alt_os = clients.AltManager()
cls.alt_client = cls.alt_os.servers_client
@attr(type='negative')
def test_server_name_blank(self):
- """Create a server with name parameter empty"""
+ # Create a server with name parameter empty
try:
- resp, server = self.client.create_server('', self.image_ref,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras('',
+ self.image_ref,
+ self.flavor_ref)
except exceptions.BadRequest:
pass
else:
@@ -51,16 +52,16 @@
@attr(type='negative')
def test_personality_file_contents_not_encoded(self):
- """Use an unencoded file when creating a server with personality"""
+ # Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
- personality = [{'path': '/etc/testfile.txt',
- 'contents': file_contents}]
+ person = [{'path': '/etc/testfile.txt',
+ 'contents': file_contents}]
try:
- resp, server = self.client.create_server('test',
- self.image_ref,
- self.flavor_ref,
- personality=personality)
+ resp, server = self.create_server_with_extras('test',
+ self.image_ref,
+ self.flavor_ref,
+ personality=person)
except exceptions.BadRequest:
pass
else:
@@ -68,10 +69,10 @@
@attr(type='negative')
def test_create_with_invalid_image(self):
- """Create a server with an unknown image"""
+ # Create a server with an unknown image
try:
- resp, server = self.client.create_server('fail', -1,
- self.flavor_ref)
+ resp, server = self.create_server_with_extras('fail', -1,
+ self.flavor_ref)
except exceptions.BadRequest:
pass
else:
@@ -79,9 +80,9 @@
@attr(type='negative')
def test_create_with_invalid_flavor(self):
- """Create a server with an unknown flavor"""
+ # Create a server with an unknown flavor
try:
- self.client.create_server('fail', self.image_ref, -1)
+ self.create_server_with_extras('fail', self.image_ref, -1)
except exceptions.BadRequest:
pass
else:
@@ -89,14 +90,14 @@
@attr(type='negative')
def test_invalid_access_ip_v4_address(self):
- """An access IPv4 address must match a valid address pattern"""
- accessIPv4 = '1.1.1.1.1.1'
+ # An access IPv4 address must match a valid address pattern
+ IPv4 = '1.1.1.1.1.1'
name = rand_name('server')
try:
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- accessIPv4=accessIPv4)
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ accessIPv4=IPv4)
except exceptions.BadRequest:
pass
else:
@@ -104,14 +105,14 @@
@attr(type='negative')
def test_invalid_ip_v6_address(self):
- """An access IPv6 address must match a valid address pattern"""
- accessIPv6 = 'notvalid'
+ # An access IPv6 address must match a valid address pattern
+ IPv6 = 'notvalid'
name = rand_name('server')
try:
- resp, server = self.client.create_server(name,
- self.image_ref,
- self.flavor_ref,
- accessIPv6=accessIPv6)
+ resp, server = self.create_server_with_extras(name,
+ self.image_ref,
+ self.flavor_ref,
+ accessIPv6=IPv6)
except exceptions.BadRequest:
pass
else:
@@ -119,11 +120,11 @@
@attr(type='negative')
def test_reboot_deleted_server(self):
- """Reboot a deleted server"""
+ # Reboot a deleted server
self.name = rand_name('server')
- resp, create_server = self.client.create_server(self.name,
- self.image_ref,
- self.flavor_ref)
+ resp, create_server = self.create_server_with_extras(self.name,
+ self.image_ref,
+ self.flavor_ref)
self.server_id = create_server['id']
self.client.delete_server(self.server_id)
self.client.wait_for_server_termination(self.server_id)
@@ -136,11 +137,11 @@
@attr(type='negative')
def test_rebuild_deleted_server(self):
- """Rebuild a deleted server"""
+ # Rebuild a deleted server
self.name = rand_name('server')
- resp, create_server = self.client.create_server(self.name,
- self.image_ref,
- self.flavor_ref)
+ resp, create_server = self.create_server_with_extras(self.name,
+ self.image_ref,
+ self.flavor_ref)
self.server_id = create_server['id']
self.client.delete_server(self.server_id)
self.client.wait_for_server_termination(self.server_id)
@@ -154,55 +155,60 @@
@attr(type='negative')
def test_create_numeric_server_name(self):
- """Create a server with a numeric name"""
+ # Create a server with a numeric name
server_name = 12345
- self.assertRaises(exceptions.BadRequest, self.client.create_server,
+ self.assertRaises(exceptions.BadRequest,
+ self.create_server_with_extras,
server_name, self.image_ref, self.flavor_ref)
@attr(type='negative')
def test_create_server_name_length_exceeds_256(self):
- """Create a server with name length exceeding 256 characters"""
+ # Create a server with name length exceeding 256 characters
server_name = 'a' * 256
- self.assertRaises(exceptions.BadRequest, self.client.create_server,
+ self.assertRaises(exceptions.BadRequest,
+ self.create_server_with_extras,
server_name, self.image_ref, self.flavor_ref)
@attr(type='negative')
def test_create_with_invalid_network_uuid(self):
- """Pass invalid network uuid while creating a server"""
+ # Pass invalid network uuid while creating a server
server_name = rand_name('server')
networks = [{'fixed_ip': '10.0.1.1', 'uuid':'a-b-c-d-e-f-g-h-i-j'}]
- self.assertRaises(exceptions.BadRequest, self.client.create_server,
+ self.assertRaises(exceptions.BadRequest,
+ self.create_server_with_extras,
server_name, self.image_ref, self.flavor_ref,
networks=networks)
@attr(type='negative')
def test_create_with_non_existant_keypair(self):
- """Pass a non existant keypair while creating a server"""
+ # Pass a non existant keypair while creating a server
key_name = rand_name('key')
server_name = rand_name('server')
- self.assertRaises(exceptions.BadRequest, self.client.create_server,
+ self.assertRaises(exceptions.BadRequest,
+ self.create_server_with_extras,
server_name, self.image_ref, self.flavor_ref,
key_name=key_name)
@unittest.skip("Until Bug 1004007 is fixed")
@attr(type='negative')
def test_create_server_metadata_exceeds_length_limit(self):
- """Pass really long metadata while creating a server"""
+ # Pass really long metadata while creating a server
server_name = rand_name('server')
metadata = {'a': 'b' * 260}
- self.assertRaises(exceptions.OverLimit, self.client.create_server,
+ self.assertRaises(exceptions.OverLimit,
+ self.create_server_with_extras,
server_name, self.image_ref, self.flavor_ref,
meta=metadata)
@attr(type='negative')
def test_update_name_of_non_existent_server(self):
- """Update name of a non-existent server"""
+ # Update name of a non-existent server
server_name = rand_name('server')
new_name = rand_name('server') + '_updated'
@@ -212,7 +218,7 @@
@attr(type='negative')
def test_update_server_set_empty_name(self):
- """Update name of the server to an empty string"""
+ # Update name of the server to an empty string
server_name = rand_name('server')
new_name = ''
@@ -222,7 +228,7 @@
@attr(type='negative')
def test_update_server_of_another_tenant(self):
- """Update name of a server that belongs to another tenant"""
+ # Update name of a server that belongs to another tenant
server = self.create_server()
new_name = server['id'] + '_new'
@@ -232,7 +238,7 @@
@attr(type='negative')
def test_update_server_name_length_exceeds_256(self):
- """Update name of server exceed the name length limit"""
+ # Update name of server exceed the name length limit
server = self.create_server()
new_name = 'a' * 256
@@ -243,14 +249,14 @@
@attr(type='negative')
def test_delete_non_existent_server(self):
- """Delete a non existent server"""
+ # Delete a non existent server
self.assertRaises(exceptions.NotFound, self.client.delete_server,
'999erra43')
@attr(type='negative')
def test_delete_a_server_of_another_tenant(self):
- """Delete a server that belongs to another tenant"""
+ # Delete a server that belongs to another tenant
try:
server = self.create_server()
self.assertRaises(exceptions.NotFound,
@@ -261,13 +267,13 @@
@attr(type='negative')
def test_delete_server_pass_negative_id(self):
- """Pass an invalid string parameter to delete server"""
+ # Pass an invalid string parameter to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server, -1)
@attr(type='negative')
def test_delete_server_pass_id_exceeding_length_limit(self):
- """Pass a server ID that exceeds length limit to delete server"""
+ # Pass a server ID that exceeds length limit to delete server
self.assertRaises(exceptions.NotFound, self.client.delete_server,
sys.maxint + 1)
diff --git a/tempest/tests/compute/servers/test_servers_whitebox.py b/tempest/tests/compute/servers/test_servers_whitebox.py
index 980f6cf..3ff4df6 100644
--- a/tempest/tests/compute/servers/test_servers_whitebox.py
+++ b/tempest/tests/compute/servers/test_servers_whitebox.py
@@ -15,12 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import exceptions
-from tempest import whitebox
-from tempest.tests.identity.base import BaseIdentityAdminTest
+import nose
from nose.plugins.attrib import attr
-import nose
+from tempest import exceptions
+from tempest.tests.identity.base import BaseIdentityAdminTest
+from tempest import whitebox
@attr(type='whitebox')
@@ -53,11 +53,11 @@
continue
def test_create_server_vcpu_quota_full(self):
- """Disallow server creation when tenant's vcpu quota is full"""
+ # Disallow server creation when tenant's vcpu quota is full
quotas = self.meta.tables['quotas']
stmt = quotas.select().where(
- quotas.c.project_id == self.tenant_id).where(
- quotas.c.resource == 'cores')
+ quotas.c.project_id == self.tenant_id).where(
+ quotas.c.resource == 'cores')
result = self.connection.execute(stmt).first()
# Set vcpu quota for tenant if not already set
@@ -85,11 +85,11 @@
self.connection.execute(stmt, autocommit=True)
def test_create_server_memory_quota_full(self):
- """Disallow server creation when tenant's memory quota is full"""
+ # Disallow server creation when tenant's memory quota is full
quotas = self.meta.tables['quotas']
stmt = quotas.select().where(
- quotas.c.project_id == self.tenant_id).where(
- quotas.c.resource == 'ram')
+ quotas.c.project_id == self.tenant_id).where(
+ quotas.c.resource == 'ram')
result = self.connection.execute(stmt).first()
# Set memory quota for tenant if not already set
@@ -117,15 +117,15 @@
self.connection.execute(stmt, autocommit=True)
def update_state(self, server_id, vm_state, task_state, deleted=False):
- """Update states of an instance in database for validation"""
+ """Update states of an instance in database for validation."""
if not task_state:
task_state = 'NULL'
instances = self.meta.tables['instances']
stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
+ deleted=deleted,
+ vm_state=vm_state,
+ task_state=task_state)
self.connection.execute(stmt, autocommit=True)
def _test_delete_server_base(self, vm_state, task_state):
@@ -149,7 +149,7 @@
self.assertEqual(1, result.deleted)
self.assertEqual('deleted', result.vm_state)
self.assertEqual(None, result.task_state)
- except:
+ except Exception:
self.fail("Should be able to delete a server when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
@@ -164,90 +164,88 @@
self.assertRaises(exceptions.Unauthorized,
self.client.delete_server,
self.shared_server['id'])
- except:
+ except Exception:
self.fail("Should not allow delete server when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
finally:
self.update_state(self.shared_server['id'], 'active', None)
def test_delete_server_when_vm_eq_building_task_eq_networking(self):
- """Delete server when instance states are building,networking"""
+ # Delete server when instance states are building,networking
self._test_delete_server_base('building', 'networking')
def test_delete_server_when_vm_eq_building_task_eq_bdm(self):
- """
- Delete server when instance states are building,block device mapping
- """
+ # Delete server when instance states are building,block device mapping
self._test_delete_server_base('building', 'block_device_mapping')
def test_delete_server_when_vm_eq_building_task_eq_spawning(self):
- """Delete server when instance states are building,spawning"""
+ # Delete server when instance states are building,spawning
self._test_delete_server_base('building', 'spawning')
def test_delete_server_when_vm_eq_active_task_eq_image_backup(self):
- """Delete server when instance states are active,image_backup"""
+ # Delete server when instance states are active,image_backup
self._test_delete_server_base('active', 'image_backup')
def test_delete_server_when_vm_eq_active_task_eq_rebuilding(self):
- """Delete server when instance states are active,rebuilding"""
+ # Delete server when instance states are active,rebuilding
self._test_delete_server_base('active', 'rebuilding')
def test_delete_server_when_vm_eq_error_task_eq_spawning(self):
- """Delete server when instance states are error,spawning"""
+ # Delete server when instance states are error,spawning
self._test_delete_server_base('error', 'spawning')
def test_delete_server_when_vm_eq_resized_task_eq_resize_prep(self):
- """Delete server when instance states are resized,resize_prep"""
+ # Delete server when instance states are resized,resize_prep
self._test_delete_server_403_base('resized', 'resize_prep')
def test_delete_server_when_vm_eq_resized_task_eq_resize_migrating(self):
- """Delete server when instance states are resized,resize_migrating"""
+ # Delete server when instance states are resized,resize_migrating
self._test_delete_server_403_base('resized', 'resize_migrating')
def test_delete_server_when_vm_eq_resized_task_eq_resize_migrated(self):
- """Delete server when instance states are resized,resize_migrated"""
+ # Delete server when instance states are resized,resize_migrated
self._test_delete_server_403_base('resized', 'resize_migrated')
def test_delete_server_when_vm_eq_resized_task_eq_resize_finish(self):
- """Delete server when instance states are resized,resize_finish"""
+ # Delete server when instance states are resized,resize_finish
self._test_delete_server_403_base('resized', 'resize_finish')
def test_delete_server_when_vm_eq_resized_task_eq_resize_reverting(self):
- """Delete server when instance states are resized,resize_reverting"""
+ # Delete server when instance states are resized,resize_reverting
self._test_delete_server_403_base('resized', 'resize_reverting')
def test_delete_server_when_vm_eq_resized_task_eq_resize_confirming(self):
- """Delete server when instance states are resized,resize_confirming"""
+ # Delete server when instance states are resized,resize_confirming
self._test_delete_server_403_base('resized', 'resize_confirming')
def test_delete_server_when_vm_eq_active_task_eq_resize_verify(self):
- """Delete server when instance states are active,resize_verify"""
+ # Delete server when instance states are active,resize_verify
self._test_delete_server_base('active', 'resize_verify')
def test_delete_server_when_vm_eq_active_task_eq_rebooting(self):
- """Delete server when instance states are active,rebooting"""
+ # Delete server when instance states are active,rebooting
self._test_delete_server_base('active', 'rebooting')
def test_delete_server_when_vm_eq_building_task_eq_deleting(self):
- """Delete server when instance states are building,deleting"""
+ # Delete server when instance states are building,deleting
self._test_delete_server_base('building', 'deleting')
def test_delete_server_when_vm_eq_active_task_eq_deleting(self):
- """Delete server when instance states are active,deleting"""
+ # Delete server when instance states are active,deleting
self._test_delete_server_base('active', 'deleting')
def test_delete_server_when_vm_eq_error_task_eq_none(self):
- """Delete server when instance states are error,None"""
+ # Delete server when instance states are error,None
self._test_delete_server_base('error', None)
def test_delete_server_when_vm_eq_resized_task_eq_none(self):
- """Delete server when instance states are resized,None"""
+ # Delete server when instance states are resized,None
self._test_delete_server_403_base('resized', None)
def test_delete_server_when_vm_eq_error_task_eq_resize_prep(self):
- """Delete server when instance states are error,resize_prep"""
+ # Delete server when instance states are error,resize_prep
self._test_delete_server_base('error', 'resize_prep')
def test_delete_server_when_vm_eq_error_task_eq_error(self):
- """Delete server when instance states are error,error"""
+ # Delete server when instance states are error,error
self._test_delete_server_base('error', 'error')
diff --git a/tempest/tests/compute/test_authorization.py b/tempest/tests/compute/test_authorization.py
index 12fa94b..78661d1 100644
--- a/tempest/tests/compute/test_authorization.py
+++ b/tempest/tests/compute/test_authorization.py
@@ -16,15 +16,16 @@
# under the License.
from nose.plugins.attrib import attr
-from nose.tools import raises
from nose import SkipTest
+from nose.tools import raises
import unittest2 as unittest
-from tempest import openstack
-from tempest.common.utils.data_utils import rand_name, parse_image_id
+from tempest import clients
+from tempest.common.utils.data_utils import parse_image_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
-from tempest.tests.compute.base import BaseComputeTest
from tempest.tests import compute
+from tempest.tests.compute.base import BaseComputeTest
class AuthorizationTest(BaseComputeTest):
@@ -46,12 +47,12 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- cls.alt_manager = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
- cls.alt_manager = openstack.AltManager()
+ cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.servers_client
cls.alt_images_client = cls.alt_manager.images_client
@@ -104,41 +105,37 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_server_for_alt_account_fails(self):
- """A GET request for a server on another user's account should fail"""
+ # A GET request for a server on another user's account should fail
self.alt_client.get_server(self.server['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_server_for_alt_account_fails(self):
- """A DELETE request for another user's server should fail"""
+ # A DELETE request for another user's server should fail
self.alt_client.delete_server(self.server['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_update_server_for_alt_account_fails(self):
- """An update server request for another user's server should fail"""
+ # An update server request for another user's server should fail
self.alt_client.update_server(self.server['id'], name='test')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_list_server_addresses_for_alt_account_fails(self):
- """A list addresses request for another user's server should fail"""
+ # A list addresses request for another user's server should fail
self.alt_client.list_addresses(self.server['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_list_server_addresses_by_network_for_alt_account_fails(self):
- """
- A list address/network request for another user's server should fail
- """
+ # A list address/network request for another user's server should fail
server_id = self.server['id']
self.alt_client.list_addresses_by_network(server_id, 'public')
def test_list_servers_with_alternate_tenant(self):
- """
- A list on servers from one tenant should not
- show on alternate tenant
- """
+ # A list on servers from one tenant should not
+ # show on alternate tenant
#Listing servers from alternate tenant
alt_server_ids = []
resp, body = self.alt_client.list_servers()
@@ -148,47 +145,45 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_change_password_for_alt_account_fails(self):
- """A change password request for another user's server should fail"""
+ # A change password request for another user's server should fail
self.alt_client.change_password(self.server['id'], 'newpass')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_reboot_server_for_alt_account_fails(self):
- """A reboot request for another user's server should fail"""
+ # A reboot request for another user's server should fail
self.alt_client.reboot(self.server['id'], 'HARD')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_rebuild_server_for_alt_account_fails(self):
- """A rebuild request for another user's server should fail"""
+ # A rebuild request for another user's server should fail
self.alt_client.rebuild(self.server['id'], self.image_ref_alt)
@raises(exceptions.NotFound)
@attr(type='negative')
def test_resize_server_for_alt_account_fails(self):
- """A resize request for another user's server should fail"""
+ # A resize request for another user's server should fail
self.alt_client.resize(self.server['id'], self.flavor_ref_alt)
@raises(exceptions.NotFound)
@attr(type='negative')
def test_create_image_for_alt_account_fails(self):
- """A create image request for another user's server should fail"""
+ # A create image request for another user's server should fail
self.alt_images_client.create_image(self.server['id'], 'testImage')
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_server_with_unauthorized_image(self):
- """Server creation with another user's image should fail"""
+ # Server creation with another user's image should fail
self.alt_client.create_server('test', self.image['id'],
self.flavor_ref)
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_server_fails_when_tenant_incorrect(self):
- """
- A create server request should fail if the tenant id does not match
- the current user
- """
+ # A create server request should fail if the tenant id does not match
+ # the current user
saved_base_url = self.alt_client.base_url
try:
# Change the base URL to impersonate another user
@@ -202,10 +197,8 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_keypair_in_analt_user_tenant(self):
- """
- A create keypair request should fail if the tenant id does not match
- the current user
- """
+ # A create keypair request should fail if the tenant id does not match
+ # the current user
#POST keypair with other user tenant
k_name = rand_name('keypair-')
self.alt_keypairs_client._set_auth()
@@ -227,34 +220,33 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_keypair_of_alt_account_fails(self):
- """A GET request for another user's keypair should fail"""
+ # A GET request for another user's keypair should fail
self.alt_keypairs_client.get_keypair(self.keypairname)
@raises(exceptions.NotFound)
@attr(type='negative')
+ @unittest.skip("Skipped until the Bug #1086980 is resolved")
def test_delete_keypair_of_alt_account_fails(self):
- """A DELETE request for another user's keypair should fail"""
+ # A DELETE request for another user's keypair should fail
self.alt_keypairs_client.delete_keypair(self.keypairname)
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_image_for_alt_account_fails(self):
- """A GET request for an image on another user's account should fail"""
+ # A GET request for an image on another user's account should fail
self.alt_images_client.get_image(self.image['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_image_for_alt_account_fails(self):
- """A DELETE request for another user's image should fail"""
+ # A DELETE request for another user's image should fail
self.alt_images_client.delete_image(self.image['id'])
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_security_group_in_analt_user_tenant(self):
- """
- A create security group request should fail if the tenant id does not
- match the current user
- """
+ # A create security group request should fail if the tenant id does not
+ # match the current user
#POST security group with other user tenant
s_name = rand_name('security-')
s_description = rand_name('security')
@@ -279,23 +271,21 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_security_group_of_alt_account_fails(self):
- """A GET request for another user's security group should fail"""
+ # A GET request for another user's security group should fail
self.alt_security_client.get_security_group(self.security_group['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_security_group_of_alt_account_fails(self):
- """A DELETE request for another user's security group should fail"""
+ # A DELETE request for another user's security group should fail
self.alt_security_client.delete_security_group(
self.security_group['id'])
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_security_group_rule_in_analt_user_tenant(self):
- """
- A create security group rule request should fail if the tenant id
- does not match the current user
- """
+ # A create security group rule request should fail if the tenant id
+ # does not match the current user
#POST security group rule with other user tenant
parent_group_id = self.security_group['id']
ip_protocol = 'icmp'
@@ -323,34 +313,31 @@
"happen if the tenant id does not match the"
" current user")
- @unittest.skip("Skipped until the Bug #1001118 is resolved")
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_security_group_rule_of_alt_account_fails(self):
- """
- A DELETE request for another user's security group rule
- should fail
- """
+ # A DELETE request for another user's security group rule
+ # should fail
self.alt_security_client.delete_security_group_rule(self.rule['id'])
@raises(exceptions.NotFound)
@attr(type='negative')
def test_set_metadata_of_alt_account_server_fails(self):
- """ A set metadata for another user's server should fail """
+ # A set metadata for another user's server should fail
req_metadata = {'meta1': 'data1', 'meta2': 'data2'}
self.alt_client.set_server_metadata(self.server['id'], req_metadata)
@raises(exceptions.NotFound)
@attr(type='negative')
def test_set_metadata_of_alt_account_image_fails(self):
- """ A set metadata for another user's image should fail """
+ # A set metadata for another user's image should fail
req_metadata = {'meta1': 'value1', 'meta2': 'value2'}
self.alt_images_client.set_image_metadata(self.image['id'],
req_metadata)
@attr(type='negative')
def test_get_metadata_of_alt_account_server_fails(self):
- """ A get metadata for another user's server should fail """
+ # A get metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.client.set_server_metadata(self.server['id'], req_metadata)
try:
@@ -365,7 +352,7 @@
@attr(type='negative')
def test_get_metadata_of_alt_account_image_fails(self):
- """ A get metadata for another user's image should fail """
+ # A get metadata for another user's image should fail
req_metadata = {'meta1': 'value1'}
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
@@ -381,7 +368,7 @@
@attr(type='negative')
def test_delete_metadata_of_alt_account_server_fails(self):
- """ A delete metadata for another user's server should fail """
+ # A delete metadata for another user's server should fail
req_metadata = {'meta1': 'data1'}
self.client.set_server_metadata(self.server['id'], req_metadata)
try:
@@ -396,7 +383,7 @@
@attr(type='negative')
def test_delete_metadata_of_alt_account_image_fails(self):
- """ A delete metadata for another user's image should fail """
+ # A delete metadata for another user's image should fail
req_metadata = {'meta1': 'data1'}
self.images_client.set_image_metadata(self.image['id'],
req_metadata)
@@ -414,8 +401,6 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_console_output_of_alt_account_server_fails(self):
- """
- A Get Console Output for another user's server should fail
- """
+ # A Get Console Output for another user's server should fail
self.alt_console_outputs_client.get_console_output(self.server['id'],
10)
diff --git a/tempest/tests/compute/test_extensions.py b/tempest/tests/compute/test_extensions.py
index 552c58c..829e295 100644
--- a/tempest/tests/compute/test_extensions.py
+++ b/tempest/tests/compute/test_extensions.py
@@ -24,7 +24,7 @@
@attr(type='positive')
def test_list_extensions(self):
- """List of all extensions"""
+ # List of all extensions
resp, extensions = self.client.list_extensions()
self.assertTrue("extensions" in extensions)
self.assertEqual(200, resp.status)
diff --git a/tempest/tests/compute/test_live_block_migration.py b/tempest/tests/compute/test_live_block_migration.py
index 92c2cf6..915868c 100644
--- a/tempest/tests/compute/test_live_block_migration.py
+++ b/tempest/tests/compute/test_live_block_migration.py
@@ -15,19 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import nose
-import unittest2 as unittest
-from nose.plugins.attrib import attr
import random
import string
-from tempest.tests.compute import base
+import nose
+from nose.plugins.attrib import attr
+import unittest2 as unittest
+
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import config
from tempest import exceptions
-
from tempest.services.compute.json.hosts_client import HostsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
+from tempest.tests.compute import base
@attr(category='live-migration')
@@ -104,7 +104,7 @@
@unittest.skipIf(not live_migration_available,
'Block Live migration not available')
def test_001_live_block_migration(self):
- """Live block migrate an instance to another host"""
+ # Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
raise nose.SkipTest(
"Less than 2 compute nodes, skipping migration test.")
@@ -120,7 +120,7 @@
@unittest.skipIf(not live_migration_available,
'Block Live migration not available')
def test_002_invalid_host_for_migration(self):
- """Migrating to an invalid host should not change the status"""
+ # Migrating to an invalid host should not change the status
server_id = self._get_an_active_server()
target_host = self._get_non_existing_host_name()
diff --git a/tempest/tests/compute/test_quotas.py b/tempest/tests/compute/test_quotas.py
index d07064f..3dc2515 100644
--- a/tempest/tests/compute/test_quotas.py
+++ b/tempest/tests/compute/test_quotas.py
@@ -33,7 +33,7 @@
@attr(type='smoke')
def test_get_default_quotas(self):
- """User can get the default quota set for it's tenant"""
+ # User can get the default quota set for it's tenant
expected_quota_set = {'injected_file_content_bytes': 10240,
'metadata_items': 128, 'injected_files': 5,
'ram': 51200, 'floating_ips': 10,
@@ -45,5 +45,5 @@
resp, quota_set = self.client.get_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
self.assertSequenceEqual(expected_quota_set, quota_set)
- except:
+ except Exception:
self.fail("Quota set for tenant did not have default limits")
diff --git a/tempest/tests/compute/volumes/test_attach_volume.py b/tempest/tests/compute/volumes/test_attach_volume.py
index cb695c1..9581026 100644
--- a/tempest/tests/compute/volumes/test_attach_volume.py
+++ b/tempest/tests/compute/volumes/test_attach_volume.py
@@ -18,10 +18,10 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-import tempest.config
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
-from tempest import openstack
+import tempest.config
from tempest.tests.compute import base
@@ -70,10 +70,8 @@
@attr(type='positive')
@unittest.skipIf(not run_ssh, 'SSH required for this test')
def test_attach_detach_volume(self):
- """
- Stop and Start a server with an attached volume, ensuring that
- the volume remains attached.
- """
+ # Stop and Start a server with an attached volume, ensuring that
+ # the volume remains attached.
server, volume = self._create_and_attach()
attached = True
diff --git a/tempest/tests/compute/volumes/test_volumes_get.py b/tempest/tests/compute/volumes/test_volumes_get.py
index 0a207b9..afb00cd 100644
--- a/tempest/tests/compute/volumes/test_volumes_get.py
+++ b/tempest/tests/compute/volumes/test_volumes_get.py
@@ -25,7 +25,7 @@
@attr(type='smoke')
def test_volume_create_get_delete(self):
- """CREATE, GET, DELETE Volume"""
+ # CREATE, GET, DELETE Volume
volume = None
try:
v_name = rand_name('Volume-%s-') % self._interface
@@ -71,7 +71,7 @@
@attr(type='positive')
def test_volume_get_metadata_none(self):
- """CREATE, GET empty metadata dict"""
+ # CREATE, GET empty metadata dict
try:
v_name = rand_name('Volume-')
#Create volume
diff --git a/tempest/tests/compute/volumes/test_volumes_list.py b/tempest/tests/compute/volumes/test_volumes_list.py
index 2c09add..fef9c8d 100644
--- a/tempest/tests/compute/volumes/test_volumes_list.py
+++ b/tempest/tests/compute/volumes/test_volumes_list.py
@@ -32,7 +32,7 @@
"""
def test_volume_list(self):
- """Should return the list of Volumes"""
+ # Should return the list of Volumes
# Fetch all Volumes
resp, fetched_list = self.client.list_volumes()
self.assertEqual(200, resp.status)
@@ -47,7 +47,7 @@
for m_vol in missing_volumes))
def test_volume_list_with_details(self):
- """Should return the list of Volumes with details"""
+ # Should return the list of Volumes with details
#Fetch all Volumes
resp, fetched_list = self.client.list_volumes_with_detail()
self.assertEqual(200, resp.status)
@@ -83,7 +83,7 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except:
+ except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
@@ -129,7 +129,7 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except:
+ except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
diff --git a/tempest/tests/compute/volumes/test_volumes_negative.py b/tempest/tests/compute/volumes/test_volumes_negative.py
index fa14640..d2ad30e 100644
--- a/tempest/tests/compute/volumes/test_volumes_negative.py
+++ b/tempest/tests/compute/volumes/test_volumes_negative.py
@@ -18,8 +18,8 @@
from nose.plugins.attrib import attr
from nose.tools import raises
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.compute import base
@@ -27,7 +27,7 @@
@attr(type='negative')
def test_volume_get_nonexistant_volume_id(self):
- """Negative: Should not be able to get details of nonexistant volume"""
+ # Negative: Should not be able to get details of nonexistant volume
#Creating a nonexistant volume id
volume_id_list = list()
resp, body = self.client.list_volumes()
@@ -48,7 +48,7 @@
@attr(type='negative')
def test_volume_delete_nonexistant_volume_id(self):
- """Negative: Should not be able to delete nonexistant Volume"""
+ # Negative: Should not be able to delete nonexistant Volume
#Creating nonexistant volume id
volume_id_list = list()
resp, body = self.client.list_volumes()
@@ -69,10 +69,8 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_invalid_size(self):
- """
- Negative: Should not be able to create volume with invalid size
- in request
- """
+ # Negative: Should not be able to create volume with invalid size
+ # in request
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='#$%',
@@ -82,10 +80,8 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_out_passing_size(self):
- """
- Negative: Should not be able to create volume without passing size
- in request
- """
+ # Negative: Should not be able to create volume without passing size
+ # in request
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='',
@@ -95,9 +91,7 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_size_zero(self):
- """
- Negative: Should not be able to create volume with size zero
- """
+ # Negative: Should not be able to create volume with size zero
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='0',
@@ -107,33 +101,26 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_invalid_volume_id(self):
- """
- Negative: Should not be able to get volume with invalid id
- """
+ # Negative: Should not be able to get volume with invalid id
resp, volume = self.client.get_volume('#$%%&^&^')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_volume_without_passing_volume_id(self):
- """
- Negative: Should not be able to get volume when empty ID is passed
- """
+ # Negative: Should not be able to get volume when empty ID is passed
resp, volume = self.client.get_volume('')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_invalid_volume_id(self):
- """
- Negative: Should not be able to delete volume when invalid ID is passed
- """
+ # Negative: Should not be able to delete volume when invalid ID is
+ # passed
resp, volume = self.client.delete_volume('!@#$%^&*()')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_volume_without_passing_volume_id(self):
- """
- Negative: Should not be able to delete volume when empty ID is passed
- """
+ # Negative: Should not be able to delete volume when empty ID is passed
resp, volume = self.client.delete_volume('')
diff --git a/tempest/tests/identity/admin/test_roles.py b/tempest/tests/identity/admin/test_roles.py
index 637cee5..f0dd8d9 100644
--- a/tempest/tests/identity/admin/test_roles.py
+++ b/tempest/tests/identity/admin/test_roles.py
@@ -17,8 +17,8 @@
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.identity import base
@@ -40,26 +40,26 @@
return (user, tenant, role)
def test_list_roles(self):
- """Return a list of all roles"""
+ # Return a list of all roles
resp, body = self.client.list_roles()
found = [role for role in body if role in self.data.roles]
self.assertTrue(any(found))
self.assertEqual(len(found), len(self.data.roles))
def test_list_roles_by_unauthorized_user(self):
- """Non admin user should not be able to list roles"""
+ # Non admin user should not be able to list roles
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_roles)
def test_list_roles_request_without_token(self):
- """Request to list roles without a valid token should fail"""
+ # Request to list roles without a valid token should fail
token = self.client.get_auth()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.list_roles)
self.client.clear_auth()
def test_role_create_delete(self):
- """Role should be created, verified, and deleted"""
+ # Role should be created, verified, and deleted
role_name = rand_name('role-test-')
resp, body = self.client.create_role(role_name)
self.assertTrue('status' in resp)
@@ -79,11 +79,11 @@
self.assertFalse(any(found))
def test_role_create_blank_name(self):
- """Should not be able to create a role with a blank name"""
+ # Should not be able to create a role with a blank name
self.assertRaises(exceptions.BadRequest, self.client.create_role, '')
def test_role_create_duplicate(self):
- """Role names should be unique"""
+ # Role names should be unique
role_name = rand_name('role-dup-')
resp, body = self.client.create_role(role_name)
role1_id = body.get('id')
@@ -121,7 +121,7 @@
class UserRolesTestBase(RolesTestBase):
def test_assign_user_role(self):
- """Assign a role to a user on a tenant"""
+ # Assign a role to a user on a tenant
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
resp, roles = self.client.list_user_roles(tenant['id'], user['id'])
@@ -129,14 +129,14 @@
self.assertEquals(roles[0]['id'], role['id'])
def test_assign_user_role_by_unauthorized_user(self):
- """Non admin user should not be authorized to assign a role to user"""
+ # Non admin user should not be authorized to assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.assign_user_role,
tenant['id'], user['id'], role['id'])
def test_assign_user_role_request_without_token(self):
- """Request to assign a role to a user without a valid token"""
+ # Request to assign a role to a user without a valid token
(user, tenant, role) = self._get_role_params()
token = self.client.get_auth()
self.client.delete_token(token)
@@ -146,32 +146,32 @@
self.client.clear_auth()
def test_assign_user_role_for_non_existent_user(self):
- """Attempt to assign a role to a non existent user should fail"""
+ # Attempt to assign a role to a non existent user should fail
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
tenant['id'], 'junk-user-id-999', role['id'])
def test_assign_user_role_for_non_existent_role(self):
- """Attempt to assign a non existent role to user should fail"""
+ # Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
tenant['id'], user['id'], 'junk-role-id-12345')
def test_assign_user_role_for_non_existent_tenant(self):
- """Attempt to assign a role on a non existent tenant should fail"""
+ # Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
'junk-tenant-1234', user['id'], role['id'])
def test_assign_duplicate_user_role(self):
- """Duplicate user role should not get assigned"""
+ # Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Duplicate, self.client.assign_user_role,
tenant['id'], user['id'], role['id'])
def test_remove_user_role(self):
- """Remove a role assigned to a user on a tenant"""
+ # Remove a role assigned to a user on a tenant
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'], role['id'])
@@ -180,7 +180,7 @@
self.assertEquals(resp['status'], '204')
def test_remove_user_role_by_unauthorized_user(self):
- """Non admin user should not be authorized to remove a user's role"""
+ # Non admin user should not be authorized to remove a user's role
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -190,7 +190,7 @@
tenant['id'], user['id'], role['id'])
def test_remove_user_role_request_without_token(self):
- """Request to remove a user's role without a valid token"""
+ # Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -203,7 +203,7 @@
self.client.clear_auth()
def test_remove_user_role_non_existant_user(self):
- """Attempt to remove a role from a non existent user should fail"""
+ # Attempt to remove a role from a non existent user should fail
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -212,7 +212,7 @@
tenant['id'], 'junk-user-id-123', role['id'])
def test_remove_user_role_non_existant_role(self):
- """Attempt to delete a non existent role from a user should fail"""
+ # Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -221,7 +221,7 @@
tenant['id'], user['id'], 'junk-user-role-123')
def test_remove_user_role_non_existant_tenant(self):
- """Attempt to remove a role from a non existent tenant should fail"""
+ # Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -230,7 +230,7 @@
'junk-tenant-id-123', user['id'], role['id'])
def test_list_user_roles(self):
- """List roles assigned to a user on tenant"""
+ # List roles assigned to a user on tenant
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
resp, roles = self.client.list_user_roles(tenant['id'], user['id'])
@@ -238,7 +238,7 @@
self.assertEquals(role['id'], roles[0]['id'])
def test_list_user_roles_by_unauthorized_user(self):
- """Non admin user should not be authorized to list a user's roles"""
+ # Non admin user should not be authorized to list a user's roles
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Unauthorized,
@@ -246,7 +246,7 @@
user['id'])
def test_list_user_roles_request_without_token(self):
- """Request to list user's roles without a valid token should fail"""
+ # Request to list user's roles without a valid token should fail
(user, tenant, role) = self._get_role_params()
token = self.client.get_auth()
self.client.delete_token(token)
@@ -258,7 +258,7 @@
self.client.clear_auth()
def test_list_user_roles_for_non_existent_user(self):
- """Attempt to list roles of a non existent user should fail"""
+ # Attempt to list roles of a non existent user should fail
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.NotFound, self.client.list_user_roles,
tenant['id'], 'junk-role-aabbcc11')
diff --git a/tempest/tests/identity/admin/test_services.py b/tempest/tests/identity/admin/test_services.py
index da697ab..30dfeb0 100644
--- a/tempest/tests/identity/admin/test_services.py
+++ b/tempest/tests/identity/admin/test_services.py
@@ -17,15 +17,15 @@
import nose
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.identity import base
class ServicesTestBase(object):
def test_create_get_delete_service(self):
- """GET Service"""
+ # GET Service
try:
#Creating a Service
name = rand_name('service-')
diff --git a/tempest/tests/identity/admin/test_tenants.py b/tempest/tests/identity/admin/test_tenants.py
index 1b4ec18..8fba7e3 100644
--- a/tempest/tests/identity/admin/test_tenants.py
+++ b/tempest/tests/identity/admin/test_tenants.py
@@ -17,8 +17,8 @@
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.identity import base
@@ -31,7 +31,7 @@
cls.data.tenants.append(tenant)
def test_list_tenants(self):
- """Return a list of all tenants"""
+ # Return a list of all tenants
resp, body = self.client.list_tenants()
found = [tenant for tenant in body if tenant in self.data.tenants]
self.assertTrue(any(found), 'List did not return newly created '
@@ -40,19 +40,19 @@
self.assertTrue(resp['status'].startswith('2'))
def test_list_tenants_by_unauthorized_user(self):
- """Non-admin user should not be able to list tenants"""
+ # Non-admin user should not be able to list tenants
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_tenants)
def test_list_tenant_request_without_token(self):
- """Request to list tenants without a valid token should fail"""
+ # Request to list tenants without a valid token should fail
token = self.client.get_auth()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.list_tenants)
self.client.clear_auth()
def test_tenant_delete(self):
- """Create several tenants and delete them"""
+ # Create several tenants and delete them
tenants = []
for _ in xrange(5):
resp, body = self.client.create_tenant(rand_name('tenant-new'))
@@ -70,14 +70,14 @@
self.assertFalse(any(found_2), 'Tenants failed to delete')
def test_tenant_delete_by_unauthorized_user(self):
- """Non-admin user should not be able to delete a tenant"""
+ # Non-admin user should not be able to delete a tenant
tenant_name = rand_name('tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_tenant, tenant['id'])
def test_tenant_delete_request_without_token(self):
- """Request to delete a tenant without a valid token should fail"""
+ # Request to delete a tenant without a valid token should fail
tenant_name = rand_name('tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
token = self.client.get_auth()
@@ -87,12 +87,12 @@
self.client.clear_auth()
def test_delete_non_existent_tenant(self):
- """Attempt to delete a non existent tenant should fail"""
+ # Attempt to delete a non existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.delete_tenant,
'junk_tenant_123456abc')
def test_tenant_create_with_description(self):
- """Create tenant with a description"""
+ # Create tenant with a description
tenant_name = rand_name('tenant-')
tenant_desc = rand_name('desc-')
resp, body = self.client.create_tenant(tenant_name,
@@ -110,7 +110,7 @@
self.client.delete_tenant(tenant_id)
def test_tenant_create_enabled(self):
- """Create a tenant that is enabled"""
+ # Create a tenant that is enabled
tenant_name = rand_name('tenant-')
resp, body = self.client.create_tenant(tenant_name, enabled=True)
tenant_id = body['id']
@@ -124,7 +124,7 @@
self.client.delete_tenant(tenant_id)
def test_tenant_create_not_enabled(self):
- """Create a tenant that is not enabled"""
+ # Create a tenant that is not enabled
tenant_name = rand_name('tenant-')
resp, body = self.client.create_tenant(tenant_name, enabled=False)
tenant_id = body['id']
@@ -140,7 +140,7 @@
self.client.delete_tenant(tenant_id)
def test_tenant_create_duplicate(self):
- """Tenant names should be unique"""
+ # Tenant names should be unique
tenant_name = rand_name('tenant-dup-')
resp, body = self.client.create_tenant(tenant_name)
tenant1_id = body.get('id')
@@ -155,13 +155,13 @@
self.client.delete_tenant(tenant1_id)
def test_create_tenant_by_unauthorized_user(self):
- """Non-admin user should not be authorized to create a tenant"""
+ # Non-admin user should not be authorized to create a tenant
tenant_name = rand_name('tenant-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_tenant, tenant_name)
def test_create_tenant_request_without_token(self):
- """Create tenant request without a token should not be authorized"""
+ # Create tenant request without a token should not be authorized
tenant_name = rand_name('tenant-')
token = self.client.get_auth()
self.client.delete_token(token)
@@ -170,18 +170,18 @@
self.client.clear_auth()
def test_create_tenant_with_empty_name(self):
- """Tenant name should not be empty"""
+ # Tenant name should not be empty
self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
name='')
def test_create_tenants_name_length_over_64(self):
- """Tenant name length should not be greater than 64 characters"""
+ # Tenant name length should not be greater than 64 characters
tenant_name = 'a' * 65
self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
tenant_name)
def test_tenant_update_name(self):
- """Update name attribute of a tenant"""
+ # Update name attribute of a tenant
t_name1 = rand_name('tenant-')
resp, body = self.client.create_tenant(t_name1)
t_id = body['id']
@@ -204,7 +204,7 @@
self.client.delete_tenant(t_id)
def test_tenant_update_desc(self):
- """Update description attribute of a tenant"""
+ # Update description attribute of a tenant
t_name = rand_name('tenant-')
t_desc = rand_name('desc-')
resp, body = self.client.create_tenant(t_name, description=t_desc)
@@ -228,7 +228,7 @@
self.client.delete_tenant(t_id)
def test_tenant_update_enable(self):
- """Update the enabled attribute of a tenant"""
+ # Update the enabled attribute of a tenant
t_name = rand_name('tenant-')
t_en = False
resp, body = self.client.create_tenant(t_name, enabled=t_en)
diff --git a/tempest/tests/identity/admin/test_users.py b/tempest/tests/identity/admin/test_users.py
index a724ce9..27a214c 100644
--- a/tempest/tests/identity/admin/test_users.py
+++ b/tempest/tests/identity/admin/test_users.py
@@ -18,8 +18,8 @@
from nose.plugins.attrib import attr
import unittest2 as unittest
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.identity import base
@@ -33,7 +33,7 @@
@attr(type='smoke')
def test_create_user(self):
- """Create a user"""
+ # Create a user
self.data.setup_test_tenant()
resp, user = self.client.create_user(self.alt_user, self.alt_password,
self.data.tenant['id'],
@@ -44,7 +44,7 @@
@attr(type='negative')
def test_create_user_by_unauthorized_user(self):
- """Non-admin should not be authorized to create a user"""
+ # Non-admin should not be authorized to create a user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_user, self.alt_user,
@@ -53,7 +53,7 @@
@attr(type='negative')
def test_create_user_with_empty_name(self):
- """User with an empty name should not be created"""
+ # User with an empty name should not be created
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user, '',
self.alt_password, self.data.tenant['id'],
@@ -62,7 +62,7 @@
@attr(type='negative')
@unittest.skip("Until Bug 966251 is fixed")
def test_create_user_with_name_length_over_64(self):
- """Length of user name filed should be restricted to 64 characters"""
+ # Length of user name filed should be restricted to 64 characters
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user,
'a' * 65, self.alt_password,
@@ -70,7 +70,7 @@
@attr(type='negative')
def test_create_user_with_duplicate_name(self):
- """Duplicate user should not be created"""
+ # Duplicate user should not be created
self.data.setup_test_user()
self.assertRaises(exceptions.Duplicate, self.client.create_user,
self.data.test_user, self.data.test_password,
@@ -79,7 +79,7 @@
@attr(type='negative')
@unittest.skip("Until Bug 999084 is fixed")
def test_create_user_with_empty_password(self):
- """User with an empty password should not be created"""
+ # User with an empty password should not be created
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user,
self.alt_user, '', self.data.tenant['id'],
@@ -88,7 +88,7 @@
@attr(type='nagative')
@unittest.skip("Until Bug 999084 is fixed")
def test_create_user_with_long_password(self):
- """User having password exceeding max length should not be created"""
+ # User having password exceeding max length should not be created
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user,
self.alt_user, 'a' * 65, self.data.tenant['id'],
@@ -97,21 +97,21 @@
@attr(type='negative')
@unittest.skip("Until Bug 999084 is fixed")
def test_create_user_with_invalid_email_format(self):
- """Email format should be validated while creating a user"""
+ # Email format should be validated while creating a user
self.data.setup_test_tenant()
self.assertRaises(exceptions.BadRequest, self.client.create_user,
self.alt_user, '', self.data.tenant['id'], '12345')
@attr(type='negative')
def test_create_user_for_non_existant_tenant(self):
- """Attempt to create a user in a non-existent tenant should fail"""
+ # Attempt to create a user in a non-existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.create_user,
self.alt_user, self.alt_password, '49ffgg99999',
self.alt_email)
@attr(type='negative')
def test_create_user_request_without_a_token(self):
- """Request to create a user without a valid token should fail"""
+ # Request to create a user without a valid token should fail
self.data.setup_test_tenant()
# Get the token of the current client
token = self.client.get_auth()
@@ -126,7 +126,7 @@
@attr(type='smoke')
def test_delete_user(self):
- """Delete a user"""
+ # Delete a user
self.data.setup_test_tenant()
resp, user = self.client.create_user('user_1234', self.alt_password,
self.data.tenant['id'],
@@ -136,7 +136,7 @@
@attr(type='negative')
def test_delete_users_by_unauthorized_user(self):
- """Non admin user should not be authorized to delete a user"""
+ # Non admin user should not be authorized to delete a user
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_user,
@@ -144,13 +144,13 @@
@attr(type='negative')
def test_delete_non_existant_user(self):
- """Attempt to delete a non-existent user should fail"""
+ # Attempt to delete a non-existent user should fail
self.assertRaises(exceptions.NotFound, self.client.delete_user,
'junk12345123')
@attr(type='smoke')
def test_user_authentication(self):
- """Valid user's token is authenticated"""
+ # Valid user's token is authenticated
self.data.setup_test_user()
# Get a token
self.token_client.auth(self.data.test_user, self.data.test_password,
@@ -163,7 +163,7 @@
@attr(type='negative')
def test_authentication_for_disabled_user(self):
- """Disabled user's token should not get authenticated"""
+ # Disabled user's token should not get authenticated
self.data.setup_test_user()
self.disable_user(self.data.test_user)
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
@@ -174,7 +174,7 @@
@attr(type='negative')
@unittest.skip('Until Bug 988920 is fixed')
def test_authentication_when_tenant_is_disabled(self):
- """User's token for a disabled tenant should not be authenticated"""
+ # User's token for a disabled tenant should not be authenticated
self.data.setup_test_user()
self.disable_tenant(self.data.test_tenant)
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
@@ -185,7 +185,7 @@
@attr(type='negative')
@unittest.skip('Until Bug 988920 is fixed')
def test_authentication_with_invalid_tenant(self):
- """User's token for an invalid tenant should not be authenticated"""
+ # User's token for an invalid tenant should not be authenticated
self.data.setup_one_user()
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user,
@@ -194,14 +194,14 @@
@attr(type='negative')
def test_authentication_with_invalid_username(self):
- """Non-existent user's token should not get authenticated"""
+ # Non-existent user's token should not get authenticated
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
'junkuser123', self.data.test_password,
self.data.test_tenant)
@attr(type='negative')
def test_authentication_with_invalid_password(self):
- """User's token with invalid password should not be authenticated"""
+ # User's token with invalid password should not be authenticated
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized, self.token_client.auth,
self.data.test_user, 'junkpass1234',
@@ -209,7 +209,7 @@
@attr(type='positive')
def test_authentication_request_without_token(self):
- """Request for token authentication with a valid token in header"""
+ # Request for token authentication with a valid token in header
self.data.setup_test_user()
self.token_client.auth(self.data.test_user, self.data.test_password,
self.data.test_tenant)
@@ -226,7 +226,7 @@
@attr(type='smoke')
def test_get_users(self):
- """Get a list of users and find the test user"""
+ # Get a list of users and find the test user
self.data.setup_test_user()
resp, users = self.client.get_users()
self.assertIn(self.data.test_user, [u['name'] for u in users],
@@ -234,14 +234,14 @@
@attr(type='negative')
def test_get_users_by_unauthorized_user(self):
- """Non admin user should not be authorized to get user list"""
+ # Non admin user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_users)
@attr(type='negative')
def test_get_users_request_without_token(self):
- """Request to get list of users without a valid token should fail"""
+ # Request to get list of users without a valid token should fail
token = self.client.get_auth()
self.client.delete_token(token)
self.assertRaises(exceptions.Unauthorized, self.client.get_users)
@@ -249,7 +249,7 @@
@attr(type='positive')
def test_list_users_for_tenant(self):
- """Return a list of all users for a tenant"""
+ # Return a list of all users for a tenant
self.data.setup_test_tenant()
user_ids = list()
fetched_user_ids = list()
@@ -277,7 +277,7 @@
@attr(type='positive')
def test_list_users_with_roles_for_tenant(self):
- """Return list of users on tenant when roles are assigned to users"""
+ # Return list of users on tenant when roles are assigned to users
self.data.setup_test_user()
self.data.setup_test_role()
user = self.get_user_by_name(self.data.test_user)
@@ -309,10 +309,8 @@
@attr(type='negative')
def test_list_users_with_invalid_tenant(self):
- """
- Should not be able to return a list of all
- users for a nonexistant tenant
- """
+ # Should not be able to return a list of all
+ # users for a nonexistant tenant
#Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
diff --git a/tempest/tests/identity/base.py b/tempest/tests/identity/base.py
index 618b328..ce160da 100644
--- a/tempest/tests/identity/base.py
+++ b/tempest/tests/identity/base.py
@@ -18,15 +18,15 @@
import nose
import unittest2 as unittest
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
-from tempest import openstack
class BaseIdAdminTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- os = openstack.IdentityManager(interface=cls._interface)
+ os = clients.IdentityManager(interface=cls._interface)
cls.client = os.admin_client
cls.token_client = os.token_client
@@ -35,7 +35,7 @@
cls.data = DataGenerator(cls.client)
- os = openstack.IdentityNaManager(interface=cls._interface)
+ os = clients.IdentityNaManager(interface=cls._interface)
cls.non_admin_client = os.admin_client
@classmethod
@@ -95,7 +95,7 @@
self.role_name = None
def setup_test_user(self):
- """Set up a test user"""
+ """Set up a test user."""
self.setup_test_tenant()
self.test_user = rand_name('test_user_')
self.test_password = rand_name('pass_')
@@ -107,16 +107,16 @@
self.users.append(self.user)
def setup_test_tenant(self):
- """Set up a test tenant"""
+ """Set up a test tenant."""
self.test_tenant = rand_name('test_tenant_')
self.test_description = rand_name('desc_')
resp, self.tenant = self.client.create_tenant(
- name=self.test_tenant,
- description=self.test_description)
+ name=self.test_tenant,
+ description=self.test_description)
self.tenants.append(self.tenant)
def setup_test_role(self):
- """Set up a test role"""
+ """Set up a test role."""
self.test_role = rand_name('role')
resp, self.role = self.client.create_role(self.test_role)
self.roles.append(self.role)
diff --git a/tempest/tests/image/test_images.py b/tempest/tests/image/test_images.py
index c6b903b..2429a32 100644
--- a/tempest/tests/image/test_images.py
+++ b/tempest/tests/image/test_images.py
@@ -30,7 +30,7 @@
except ImportError:
pass
-from tempest import openstack
+from tempest import clients
class CreateRegisterImagesTest(unittest.TestCase):
@@ -43,7 +43,7 @@
def setUpClass(cls):
if not GLANCE_INSTALLED:
raise SkipTest('Glance not installed')
- cls.os = openstack.ServiceManager()
+ cls.os = clients.ServiceManager()
cls.client = cls.os.images.get_client()
cls.created_images = []
@@ -54,7 +54,7 @@
@attr(type='image')
def test_register_with_invalid_data(self):
- """Negative tests for invalid data supplied to POST /images"""
+ # Negative tests for invalid data supplied to POST /images
metas = [
{
@@ -76,7 +76,7 @@
@attr(type='image')
def test_register_then_upload(self):
- """Register, then upload an image"""
+ # Register, then upload an image
meta = {
'name': 'New Name',
'is_public': True,
@@ -108,7 +108,7 @@
@attr(type='image')
def test_register_remote_image(self):
- """Register a new remote image"""
+ # Register a new remote image
meta = {
'name': 'New Remote Image',
'is_public': True,
@@ -138,10 +138,9 @@
def setUpClass(cls):
if not GLANCE_INSTALLED:
raise SkipTest('Glance not installed')
- cls.os = openstack.ServiceManager()
+ cls.os = clients.ServiceManager()
cls.client = cls.os.images.get_client()
cls.created_images = []
- cls.original_images = list(cls.client.images.list())
# We add a few images here to test the listing functionality of
# the images API
@@ -195,8 +194,6 @@
@attr(type='image')
def test_index_no_params(self):
- """
- Simple test to see all fixture images returned
- """
- images = list(self.client.images.list())
- self.assertEqual(10, len(images) - len(self.original_images))
+ # Simple test to see all fixture images returned
+ current_images = set(i.id for i in self.client.images.list())
+ self.assertTrue(set(self.created_images) <= current_images)
diff --git a/tempest/tests/network/base.py b/tempest/tests/network/base.py
index 887056e..90b351d 100644
--- a/tempest/tests/network/base.py
+++ b/tempest/tests/network/base.py
@@ -18,16 +18,16 @@
import nose
import unittest2 as unittest
-from tempest import exceptions
-from tempest import openstack
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
class BaseNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- os = openstack.Manager()
+ os = clients.Manager()
client = os.network_client
config = os.config
networks = []
@@ -48,7 +48,7 @@
cls.client.delete_network(network['id'])
def create_network(self, network_name=None):
- """Wrapper utility that returns a test network"""
+ """Wrapper utility that returns a test network."""
network_name = network_name or rand_name('test-network')
resp, body = self.client.create_network(network_name)
diff --git a/tempest/tests/network/test_network_basic_ops.py b/tempest/tests/network/test_network_basic_ops.py
new file mode 100644
index 0000000..1d88759
--- /dev/null
+++ b/tempest/tests/network/test_network_basic_ops.py
@@ -0,0 +1,454 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import subprocess
+
+import netaddr
+import nose
+
+from quantumclient.common import exceptions as exc
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import smoke
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AttributeDict(dict):
+
+ """
+ Provide attribute access (dict.key) to dictionary values.
+ """
+
+ def __getattr__(self, name):
+ """Allow attribute access for all keys in the dict."""
+ if name in self:
+ return self[name]
+ return super(AttributeDict, self).__getattribute__(name)
+
+
+class DeletableResource(AttributeDict):
+
+ """
+ Support deletion of quantum resources (networks, subnets) via a
+ delete() method, as is supported by keystone and nova resources.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.client = kwargs.pop('client', None)
+ super(DeletableResource, self).__init__(*args, **kwargs)
+
+ def __str__(self):
+ return '<%s id="%s" name="%s">' % (self.__class__.__name__,
+ self.id, self.name)
+
+ def delete(self):
+ raise NotImplemented()
+
+
+class DeletableNetwork(DeletableResource):
+
+ def delete(self):
+ self.client.delete_network(self.id)
+
+
+class DeletableSubnet(DeletableResource):
+
+ _router_ids = set()
+
+ def add_to_router(self, router_id):
+ self._router_ids.add(router_id)
+ body = dict(subnet_id=self.id)
+ self.client.add_interface_router(router_id, body=body)
+
+ def delete(self):
+ for router_id in self._router_ids.copy():
+ body = dict(subnet_id=self.id)
+ self.client.remove_interface_router(router_id, body=body)
+ self._router_ids.remove(router_id)
+ self.client.delete_subnet(self.id)
+
+
+class DeletableRouter(DeletableResource):
+
+ def add_gateway(self, network_id):
+ body = dict(network_id=network_id)
+ self.client.add_gateway_router(self.id, body=body)
+
+ def delete(self):
+ self.client.remove_gateway_router(self.id)
+ self.client.delete_router(self.id)
+
+
+class DeletableFloatingIp(DeletableResource):
+
+ def delete(self):
+ self.client.delete_floatingip(self.id)
+
+
+class TestNetworkBasicOps(smoke.DefaultClientSmokeTest):
+
+ """
+ This smoke test suite assumes that Nova has been configured to
+ boot VM's with Quantum-managed networking, and attempts to
+ verify network connectivity as follows:
+
+ * For a freshly-booted VM with an IP address ("port") on a given network:
+
+ - the Tempest host can ping the IP address. This implies that
+ the VM has been assigned the correct IP address and has
+ connectivity to the Tempest host.
+
+ #TODO(mnewby) - Need to implement the following:
+ - the Tempest host can ssh into the VM via the IP address and
+ successfully execute the following:
+
+ - ping an external IP address, implying external connectivity.
+
+ - ping an external hostname, implying that dns is correctly
+ configured.
+
+ - ping an internal IP address, implying connectivity to another
+ VM on the same network.
+
+ There are presumed to be two types of networks: tenant and
+ public. A tenant network may or may not be reachable from the
+ Tempest host. A public network is assumed to be reachable from
+ the Tempest host, and it should be possible to associate a public
+ ('floating') IP address with a tenant ('fixed') IP address to
+ faciliate external connectivity to a potentially unroutable
+ tenant IP address.
+
+ This test suite can be configured to test network connectivity to
+ a VM via a tenant network, a public network, or both. If both
+ networking types are to be evaluated, tests that need to be
+ executed remotely on the VM (via ssh) will only be run against
+ one of the networks (to minimize test execution time).
+
+ Determine which types of networks to test as follows:
+
+ * Configure tenant network checks (via the
+ 'tenant_networks_reachable' key) if the Tempest host should
+ have direct connectivity to tenant networks. This is likely to
+ be the case if Tempest is running on the same host as a
+ single-node devstack installation with IP namespaces disabled.
+
+ * Configure checks for a public network if a public network has
+ been configured prior to the test suite being run and if the
+ Tempest host should have connectivity to that public network.
+ Checking connectivity for a public network requires that a
+ value be provided for 'public_network_id'. A value can
+ optionally be provided for 'public_router_id' if tenants will
+ use a shared router to access a public network (as is likely to
+ be the case when IP namespaces are not enabled). If a value is
+ not provided for 'public_router_id', a router will be created
+ for each tenant and use the network identified by
+ 'public_network_id' as its gateway.
+
+ """
+
+ @classmethod
+ def check_preconditions(cls):
+ cfg = cls.config.network
+ msg = None
+ if not (cfg.tenant_networks_reachable or cfg.public_network_id):
+ msg = ('Either tenant_networks_reachable must be "true", or '
+ 'public_network_id must be defined.')
+ else:
+ try:
+ cls.network_client.list_networks()
+ except exc.QuantumClientException:
+ msg = 'Unable to connect to Quantum service.'
+
+ cls.enabled = not bool(msg)
+ if msg:
+ raise nose.SkipTest(msg)
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNetworkBasicOps, cls).setUpClass()
+ cls.check_preconditions()
+ cfg = cls.config.network
+ cls.tenant_id = cls.manager._get_identity_client(
+ cfg.username,
+ cfg.password,
+ cfg.tenant_name).tenant_id
+ # TODO(mnewby) Consider looking up entities as needed instead
+ # of storing them as collections on the class.
+ cls.keypairs = {}
+ cls.security_groups = {}
+ cls.networks = []
+ cls.servers = []
+ cls.floating_ips = {}
+
+ def _create_keypair(self, client):
+ kp_name = rand_name('keypair-smoke-')
+ keypair = client.keypairs.create(kp_name)
+ try:
+ self.assertEqual(keypair.id, kp_name)
+ self.set_resource(kp_name, keypair)
+ except AttributeError:
+ self.fail("Keypair object not successfully created.")
+ return keypair
+
+ def _create_security_group(self, client):
+ # Create security group
+ sg_name = rand_name('secgroup-smoke-')
+ sg_desc = sg_name + " description"
+ secgroup = client.security_groups.create(sg_name, sg_desc)
+ try:
+ self.assertEqual(secgroup.name, sg_name)
+ self.assertEqual(secgroup.description, sg_desc)
+ self.set_resource(sg_name, secgroup)
+ except AttributeError:
+ self.fail("SecurityGroup object not successfully created.")
+
+ # Add rules to the security group
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': secgroup.id
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': secgroup.id
+ }
+ ]
+ for ruleset in rulesets:
+ try:
+ client.security_group_rules.create(secgroup.id, **ruleset)
+ except Exception:
+ self.fail("Failed to create rule in security group.")
+
+ return secgroup
+
+ def _get_router(self, tenant_id):
+ """Retrieve a router for the given tenant id.
+
+ If a public router has been configured, it will be returned.
+
+ If a public router has not been configured, but a public
+ network has, a tenant router will be created and returned that
+ routes traffic to the public network.
+
+ """
+ router_id = self.config.network.public_router_id
+ network_id = self.config.network.public_network_id
+ if router_id:
+ result = self.network_client.show_router(router_id)
+ return AttributeDict(**result['router'])
+ elif network_id:
+ router = self._create_router(tenant_id)
+ router.add_gateway(network_id)
+ return router
+ else:
+ raise Exception("Neither of 'public_router_id' or "
+ "'public_network_id' has been defined.")
+
+ def _create_router(self, tenant_id):
+ name = rand_name('router-smoke-')
+ body = dict(
+ router=dict(
+ name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id,
+ ),
+ )
+ result = self.network_client.create_router(body=body)
+ router = DeletableRouter(client=self.network_client,
+ **result['router'])
+ self.assertEqual(router.name, name)
+ self.set_resource(name, router)
+ return router
+
+ def _create_network(self, tenant_id):
+ name = rand_name('network-smoke-')
+ body = dict(
+ network=dict(
+ name=name,
+ tenant_id=tenant_id,
+ ),
+ )
+ result = self.network_client.create_network(body=body)
+ network = DeletableNetwork(client=self.network_client,
+ **result['network'])
+ self.assertEqual(network.name, name)
+ self.set_resource(name, network)
+ return network
+
+ def _create_subnet(self, network):
+ """
+ Create a subnet for the given network within the cidr block
+ configured for tenant networks.
+ """
+ cfg = self.config.network
+ tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)
+ result = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):
+ body = dict(
+ subnet=dict(
+ ip_version=4,
+ network_id=network.id,
+ tenant_id=network.tenant_id,
+ cidr=str(subnet_cidr),
+ ),
+ )
+ try:
+ result = self.network_client.create_subnet(body=body)
+ break
+ except exc.QuantumClientException as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+ subnet = DeletableSubnet(client=self.network_client,
+ **result['subnet'])
+ self.assertEqual(subnet.cidr, str(subnet_cidr))
+ self.set_resource(rand_name('subnet-smoke-'), subnet)
+ return subnet
+
+ def _create_server(self, client, network, name, key_name, security_groups):
+ flavor_id = self.config.compute.flavor_ref
+ base_image_id = self.config.compute.image_ref
+ create_kwargs = {
+ 'nics': [
+ {'net-id': network.id},
+ ],
+ 'key_name': key_name,
+ 'security_groups': security_groups,
+ }
+ server = client.servers.create(name, base_image_id, flavor_id,
+ **create_kwargs)
+ try:
+ self.assertEqual(server.name, name)
+ self.set_resource(name, server)
+ except AttributeError:
+ self.fail("Server not successfully created.")
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
+ # The instance retrieved on creation is missing network
+ # details, necessitating retrieval after it becomes active to
+ # ensure correct details.
+ server = client.servers.get(server.id)
+ self.set_resource(name, server)
+ return server
+
+ def _create_floating_ip(self, server, external_network_id):
+ result = self.network_client.list_ports(device_id=server.id)
+ ports = result.get('ports', [])
+ self.assertEqual(len(ports), 1,
+ "Unable to determine which port to target.")
+ port_id = ports[0]['id']
+ body = dict(
+ floatingip=dict(
+ floating_network_id=external_network_id,
+ port_id=port_id,
+ tenant_id=server.tenant_id,
+ )
+ )
+ result = self.network_client.create_floatingip(body=body)
+ floating_ip = DeletableFloatingIp(client=self.network_client,
+ **result['floatingip'])
+ self.set_resource(rand_name('floatingip-'), floating_ip)
+ return floating_ip
+
+ def _ping_ip_address(self, ip_address):
+ cmd = ['ping', '-c1', '-w1', ip_address]
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+ if proc.returncode == 0:
+ return True
+
+ # TODO(mnewby) Allow configuration of execution and sleep duration.
+ return test.call_until_true(ping, 20, 1)
+
+ def test_001_create_keypairs(self):
+ self.keypairs[self.tenant_id] = self._create_keypair(
+ self.compute_client)
+
+ def test_002_create_security_groups(self):
+ self.security_groups[self.tenant_id] = self._create_security_group(
+ self.compute_client)
+
+ def test_003_create_networks(self):
+ network = self._create_network(self.tenant_id)
+ router = self._get_router(self.tenant_id)
+ subnet = self._create_subnet(network)
+ subnet.add_to_router(router.id)
+ self.networks.append(network)
+
+ def test_004_create_servers(self):
+ if not (self.keypairs or self.security_groups or self.networks):
+ raise nose.SkipTest('Necessary resources have not been defined')
+ for i, network in enumerate(self.networks):
+ tenant_id = network.tenant_id
+ name = rand_name('server-smoke-%d-' % i)
+ keypair_name = self.keypairs[tenant_id].name
+ security_groups = [self.security_groups[tenant_id].name]
+ server = self._create_server(self.compute_client, network,
+ name, keypair_name, security_groups)
+ self.servers.append(server)
+
+ def test_005_check_tenant_network_connectivity(self):
+ if not self.config.network.tenant_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ raise nose.SkipTest(msg)
+ if not self.servers:
+ raise nose.SkipTest("No VM's have been created")
+ for server in self.servers:
+ for net_name, ip_addresses in server.networks.iteritems():
+ for ip_address in ip_addresses:
+ self.assertTrue(self._ping_ip_address(ip_address),
+ "Timed out waiting for %s's ip to become "
+ "reachable" % server.name)
+
+ def test_006_assign_floating_ips(self):
+ public_network_id = self.config.network.public_network_id
+ if not public_network_id:
+ raise nose.SkipTest('Public network not configured')
+ if not self.servers:
+ raise nose.SkipTest("No VM's have been created")
+ for server in self.servers:
+ floating_ip = self._create_floating_ip(server, public_network_id)
+ self.floating_ips.setdefault(server, [])
+ self.floating_ips[server].append(floating_ip)
+
+ def test_007_check_public_network_connectivity(self):
+ if not self.floating_ips:
+ raise nose.SkipTest('No floating ips have been allocated.')
+ for server, floating_ips in self.floating_ips.iteritems():
+ for floating_ip in floating_ips:
+ ip_address = floating_ip.floating_ip_address
+ self.assertTrue(self._ping_ip_address(ip_address),
+ "Timed out waiting for %s's ip to become "
+ "reachable" % server.name)
diff --git a/tempest/tests/network/test_networks.py b/tempest/tests/network/test_networks.py
index 5476551..d7f09c4 100644
--- a/tempest/tests/network/test_networks.py
+++ b/tempest/tests/network/test_networks.py
@@ -31,7 +31,7 @@
@attr(type='positive')
def test_create_delete_network(self):
- """Creates and deletes a network for a tenant"""
+ # Creates and deletes a network for a tenant
name = rand_name('network')
resp, body = self.client.create_network(name)
self.assertEqual('202', resp['status'])
@@ -42,7 +42,7 @@
@attr(type='positive')
def test_show_network(self):
- """Verifies the details of a network"""
+ # Verifies the details of a network
resp, body = self.client.get_network(self.network['id'])
self.assertEqual('200', resp['status'])
network = body['network']
@@ -51,7 +51,7 @@
@attr(type='positive')
def test_show_network_details(self):
- """Verifies the full details of a network"""
+ # Verifies the full details of a network
resp, body = self.client.get_network_details(self.network['id'])
self.assertEqual('200', resp['status'])
network = body['network']
@@ -61,7 +61,7 @@
@attr(type='positive')
def test_list_networks(self):
- """Verify the network exists in the list of all networks"""
+ # Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
networks = body['networks']
found = any(n for n in networks if n['id'] == self.network['id'])
@@ -69,7 +69,7 @@
@attr(type='positive')
def test_list_networks_with_detail(self):
- """Verify the network exists in the detailed list of all networks"""
+ # Verify the network exists in the detailed list of all networks
resp, body = self.client.list_networks_details()
networks = body['networks']
found = any(n for n in networks if n['id'] == self.network['id'])
diff --git a/tempest/tests/object_storage/base.py b/tempest/tests/object_storage/base.py
index 8edb3d2..3992b13 100644
--- a/tempest/tests/object_storage/base.py
+++ b/tempest/tests/object_storage/base.py
@@ -18,20 +18,21 @@
import nose
import unittest2 as unittest
+from tempest import clients
import tempest.config
from tempest import exceptions
-from tempest import openstack
class BaseObjectTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.os = openstack.Manager()
+ cls.os = clients.Manager()
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
cls.account_client = cls.os.account_client
cls.config = cls.os.config
+ cls.custom_object_client = cls.os.custom_object_client
try:
cls.account_client.list_account_containers()
diff --git a/tempest/tests/object_storage/test_account_services.py b/tempest/tests/object_storage/test_account_services.py
index db3aa69..cae2da1 100644
--- a/tempest/tests/object_storage/test_account_services.py
+++ b/tempest/tests/object_storage/test_account_services.py
@@ -36,7 +36,7 @@
@attr(type='smoke')
def test_list_containers(self):
- """List of all containers should not be empty"""
+ # List of all containers should not be empty
params = {'format': 'json'}
resp, container_list = \
@@ -48,7 +48,7 @@
@attr(type='smoke')
def test_list_account_metadata(self):
- """List all account metadata"""
+ # List all account metadata
resp, metadata = self.account_client.list_account_metadata()
self.assertEqual(resp['status'], '204')
@@ -58,7 +58,7 @@
@attr(type='smoke')
def test_create_account_metadata(self):
- """Add metadata to account"""
+ # Add metadata to account
metadata = {'test-account-meta': 'Meta!'}
resp, _ = \
@@ -71,7 +71,7 @@
@attr(type='smoke')
def test_delete_account_metadata(self):
- """Delete metadata from account"""
+ # Delete metadata from account
metadata = ['test-account-meta']
resp, _ = \
diff --git a/tempest/tests/object_storage/test_container_services.py b/tempest/tests/object_storage/test_container_services.py
index e57256b..fe09341 100644
--- a/tempest/tests/object_storage/test_container_services.py
+++ b/tempest/tests/object_storage/test_container_services.py
@@ -16,7 +16,9 @@
# under the License.
from nose.plugins.attrib import attr
-from tempest.common.utils.data_utils import rand_name, arbitrary_string
+
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
from tempest.tests.object_storage import base
@@ -44,7 +46,7 @@
@attr(type='smoke')
def test_create_container(self):
- """Create a container, test responses"""
+ # Create a container, test responses
#Create a container
container_name = rand_name(name='TestContainer')
@@ -55,7 +57,7 @@
@attr(type='smoke')
def test_delete_container(self):
- """Create and Delete a container, test responses"""
+ # Create and Delete a container, test responses
#Create a container
container_name = rand_name(name='TestContainer')
@@ -69,7 +71,7 @@
@attr(type='smoke')
def test_list_container_contents_json(self):
- """Add metadata to object"""
+ # Add metadata to object
#Create a container
container_name = rand_name(name='TestContainer')
@@ -105,7 +107,7 @@
@attr(type='smoke')
def test_container_metadata(self):
- """Update/Retrieve/Delete Container Metadata"""
+ # Update/Retrieve/Delete Container Metadata
# Create a container
container_name = rand_name(name='TestContainer')
diff --git a/tempest/tests/object_storage/test_object_expiry.py b/tempest/tests/object_storage/test_object_expiry.py
new file mode 100644
index 0000000..099fc16
--- /dev/null
+++ b/tempest/tests/object_storage/test_object_expiry.py
@@ -0,0 +1,93 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nose.plugins.attrib import attr
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.tests.object_storage import base
+from time import sleep
+import unittest2 as unittest
+
+
+class ObjectExpiryTest(base.BaseObjectTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ObjectExpiryTest, cls).setUpClass()
+
+ #Create a container
+ cls.container_name = rand_name(name='TestContainer')
+ cls.container_client.create_container(cls.container_name)
+
+ @classmethod
+ def tearDownClass(cls):
+ """The test script fails in tear down class
+ as the container contains expired objects (LP bug 1069849).
+ But delete action for the expired object is raising
+ NotFound exception and also non empty container cannot be deleted."""
+
+ #Get list of all object in the container
+ objlist = \
+ cls.container_client.list_all_container_objects(cls.container_name)
+
+ #Attempt to delete every object in the container
+ if objlist:
+ for obj in objlist:
+ resp, _ = cls.object_client.delete_object(cls.container_name,
+ obj['name'])
+
+ #Attempt to delete the container
+ resp, _ = cls.container_client.delete_container(cls.container_name)
+
+ @unittest.skip('Until bug 1069849 is resolved.')
+ @attr(type='regression')
+ def test_get_object_after_expiry_time(self):
+ # GET object after expiry time
+ #TODO(harika-vakadi): Similar test case has to be created for
+ # "X-Delete-At", after this test case works.
+
+ #Create Object
+ object_name = rand_name(name='TestObject')
+ data = arbitrary_string()
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+
+ #Update object metadata with expiry time of 3 seconds
+ metadata = {'X-Delete-After': '3'}
+ resp, _ = \
+ self.object_client.update_object_metadata(self.container_name,
+ object_name, metadata,
+ metadata_prefix='')
+
+ resp, _ = \
+ self.object_client.list_object_metadata(self.container_name,
+ object_name)
+
+ self.assertEqual(resp['status'], '200')
+ self.assertIn('x-delete-at', resp)
+
+ resp, body = self.object_client.get_object(self.container_name,
+ object_name)
+ self.assertEqual(resp['status'], '200')
+ # Check data
+ self.assertEqual(body, data)
+ # Sleep for over 5 seconds, so that object is expired
+ sleep(5)
+ # Verification of raised exception after object gets expired
+ self.assertRaises(exceptions.NotFound, self.object_client.get_object,
+ self.container_name, object_name)
diff --git a/tempest/tests/object_storage/test_object_services.py b/tempest/tests/object_storage/test_object_services.py
index d0862eb..8b87ad6 100644
--- a/tempest/tests/object_storage/test_object_services.py
+++ b/tempest/tests/object_storage/test_object_services.py
@@ -16,7 +16,10 @@
# under the License.
from nose.plugins.attrib import attr
-from tempest.common.utils.data_utils import rand_name, arbitrary_string
+
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.object_storage import base
@@ -46,7 +49,7 @@
@attr(type='smoke')
def test_create_object(self):
- """Create storage object, test response"""
+ # Create storage object, test response
#Create Object
object_name = rand_name(name='TestObject')
@@ -63,7 +66,7 @@
@attr(type='smoke')
def test_delete_object(self):
- """Create and delete a storage object, test responses"""
+ # Create and delete a storage object, test responses
#Create Object
object_name = rand_name(name='TestObject')
@@ -77,7 +80,7 @@
@attr(type='smoke')
def test_object_metadata(self):
- """Add metadata to storage object, test if metadata is retrievable"""
+ # Add metadata to storage object, test if metadata is retrievable
#Create Object
object_name = rand_name(name='TestObject')
@@ -107,7 +110,7 @@
@attr(type='smoke')
def test_get_object(self):
- """Retrieve object's data(in response body)"""
+ # Retrieve object's data(in response body)
#Create Object
object_name = rand_name(name='TestObject')
@@ -122,8 +125,8 @@
self.assertEqual(body, data)
@attr(type='smoke')
- def test_copy_object(self):
- """Copy storage object"""
+ def test_copy_object_in_same_container(self):
+ # Copy storage object
# Create source Object
src_object_name = rand_name(name='SrcObject')
@@ -140,9 +143,8 @@
dst_object_name, dst_data)
# Copy source object to destination
- resp, _ = self.object_client.copy_object(self.container_name,
- src_object_name,
- dst_object_name)
+ resp, _ = self.object_client.copy_object_in_same_container(
+ self.container_name, src_object_name, dst_object_name)
self.assertEqual(resp['status'], '201')
# Check data
@@ -152,7 +154,7 @@
@attr(type='smoke')
def test_copy_object_to_itself(self):
- """Change the content type of an existing object"""
+ # Change the content type of an existing object
# Create Object
object_name = rand_name(name='TestObject')
@@ -161,15 +163,13 @@
object_name, data)
# Get the old content type
resp_tmp, _ = self.object_client.list_object_metadata(
- self.container_name,
- object_name)
+ self.container_name,
+ object_name)
# Change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
- resp, _ = self.object_client.copy_object(self.container_name,
- object_name,
- object_name,
- metadata)
+ resp, _ = self.object_client.copy_object_in_same_container(
+ self.container_name, object_name, object_name, metadata)
self.assertEqual(resp['status'], '201')
# Check the content type
@@ -179,7 +179,7 @@
@attr(type='smoke')
def test_copy_object_2d_way(self):
- """Copy storage object"""
+ # Copy storage object
# Create source Object
src_object_name = rand_name(name='SrcObject')
@@ -205,3 +205,165 @@
resp, body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(body, src_data)
+
+ @attr(type='smoke')
+ def test_copy_object_across_containers(self):
+ # Copy storage object across containers
+
+ #Create a container so as to use as source container
+ src_container_name = rand_name(name='TestSourceContainer')
+ self.container_client.create_container(src_container_name)
+
+ #Create a container so as to use as destination container
+ dst_container_name = rand_name(name='TestDestinationContainer')
+ self.container_client.create_container(dst_container_name)
+
+ # Create Object in source container
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name) * 2,
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(src_container_name,
+ object_name, data)
+ #Set Object Metadata
+ meta_key = rand_name(name='test-')
+ meta_value = rand_name(name='MetaValue-')
+ orig_metadata = {meta_key: meta_value}
+
+ resp, _ = \
+ self.object_client.update_object_metadata(src_container_name,
+ object_name,
+ orig_metadata)
+ self.assertEqual(resp['status'], '202')
+
+ try:
+ # Copy object from source container to destination container
+ resp, _ = self.object_client.copy_object_across_containers(
+ src_container_name, object_name, dst_container_name,
+ object_name)
+ self.assertEqual(resp['status'], '201')
+
+ # Check if object is present in destination container
+ resp, body = self.object_client.get_object(dst_container_name,
+ object_name)
+ self.assertEqual(body, data)
+ actual_meta_key = 'x-object-meta-' + meta_key
+ self.assertTrue(actual_meta_key in resp)
+ self.assertEqual(resp[actual_meta_key], meta_value)
+
+ except Exception as e:
+ self.fail("Got exception :%s ; while copying"
+ " object across containers" % e)
+ finally:
+ #Delete objects from respective containers
+ resp, _ = self.object_client.delete_object(dst_container_name,
+ object_name)
+ resp, _ = self.object_client.delete_object(src_container_name,
+ object_name)
+ #Delete containers created in this method
+ resp, _ = self.container_client.delete_container(
+ src_container_name)
+ resp, _ = self.container_client.delete_container(
+ dst_container_name)
+
+ @attr(type='smoke')
+ def test_access_public_container_object_without_using_creds(self):
+ # Make container public-readable, and access the object
+ # anonymously, e.g. without using credentials
+
+ try:
+ resp_meta = None
+ # Update Container Metadata to make public readable
+ cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
+ resp_meta, body = \
+ self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertEqual(resp_meta['status'], '204')
+
+ # Create Object
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name),
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+ self.assertEqual(resp['status'], '201')
+
+ # List container metadata
+ resp_meta, _ = \
+ self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertEqual(resp_meta['status'], '204')
+ self.assertIn('x-container-read', resp_meta)
+ self.assertEqual(resp_meta['x-container-read'], '.r:*,.rlistings')
+
+ # Trying to Get Object with empty Headers as it is public readable
+ resp, body = \
+ self.custom_object_client.get_object(self.container_name,
+ object_name, metadata={})
+ self.assertEqual(body, data)
+ finally:
+ if resp_meta['status'] == '204':
+ # Delete updated container metadata, to revert back.
+ resp, body = \
+ self.container_client.delete_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+
+ resp, _ = \
+ self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertEqual(resp['status'], '204')
+ self.assertIn('x-container-read', resp)
+ self.assertEqual(resp['x-container-read'], 'x')
+
+ @attr(type='negative')
+ def test_access_object_without_using_creds(self):
+ # Attempt to access the object anonymously, e.g.
+ # not using any credentials
+
+ # Create Object
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name),
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+ self.assertEqual(resp['status'], '201')
+
+ # Trying to Get Object with empty Headers
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.get_object,
+ self.container_name, object_name, metadata={})
+
+ @attr(type='negative')
+ def test_write_object_without_using_creds(self):
+ # Attempt to write to the object anonymously, e.g.
+ # not using any credentials
+
+ # Trying to Create Object with empty Headers
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name),
+ base_text=object_name)
+ obj_headers = {'Content-Type': 'application/json',
+ 'Accept': 'application/json'}
+
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.create_object,
+ self.container_name, object_name, data,
+ metadata=obj_headers)
+
+ @attr(type='negative')
+ def test_delete_object_without_using_creds(self):
+ # Attempt to delete the object anonymously,
+ # e.g. not using any credentials
+
+ # Create Object
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name),
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+
+ # Trying to Delete Object with empty Headers
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.delete_object,
+ self.container_name, object_name)
diff --git a/tempest/tests/object_storage/test_object_version.py b/tempest/tests/object_storage/test_object_version.py
new file mode 100644
index 0000000..28e0893
--- /dev/null
+++ b/tempest/tests/object_storage/test_object_version.py
@@ -0,0 +1,114 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nose.plugins.attrib import attr
+from tempest.common.utils.data_utils import rand_name
+from tempest.tests.object_storage import base
+
+
+class ContainerTest(base.BaseObjectTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ContainerTest, cls).setUpClass()
+ cls.containers = []
+
+ @classmethod
+ def tearDownClass(cls):
+ for container in cls.containers:
+ #Get list of all object in the container
+ objlist = \
+ cls.container_client.list_all_container_objects(container)
+
+ #Attempt to delete every object in the container
+ for obj in objlist:
+ resp, _ = \
+ cls.object_client.delete_object(container, obj['name'])
+
+ #Attempt to delete the container
+ resp, _ = cls.container_client.delete_container(container)
+
+ def assertContainer(self, container, count, byte, versioned):
+ resp, _ = self.container_client.list_container_metadata(container)
+ self.assertEqual(resp['status'], ('204'))
+ header_value = resp.get('x-container-object-count', 'Missing Header')
+ self.assertEqual(header_value, count)
+ header_value = resp.get('x-container-bytes-used', 'Missing Header')
+ self.assertEqual(header_value, byte)
+ header_value = resp.get('x-versions-location', 'Missing Header')
+ self.assertEqual(header_value, versioned)
+
+ @attr(type='smoke')
+ def test_versioned_container(self):
+ # Versioned container responses tests
+
+ # Create a containers
+ vers_container_name = rand_name(name='TestVersionContainer')
+ resp, body = self.container_client.create_container(
+ vers_container_name)
+ self.containers.append(vers_container_name)
+ self.assertIn(resp['status'], ('202', '201'))
+ self.assertContainer(vers_container_name, '0', '0',
+ 'Missing Header')
+
+ base_container_name = rand_name(name='TestBaseContainer')
+ headers = {'X-versions-Location': vers_container_name}
+ resp, body = self.container_client.create_container(
+ base_container_name,
+ metadata=headers,
+ metadata_prefix='')
+ self.containers.append(base_container_name)
+ self.assertIn(resp['status'], ('202', '201'))
+ self.assertContainer(base_container_name, '0', '0',
+ vers_container_name)
+ # Create Object
+ object_name = rand_name(name='TestObject')
+ resp, _ = self.object_client.create_object(base_container_name,
+ object_name, '1')
+
+ resp, _ = self.object_client.create_object(base_container_name,
+ object_name, '2')
+
+ resp, body = self.object_client.get_object(base_container_name,
+ object_name)
+ self.assertEqual(body, '2')
+ # Delete Object version 2
+ resp, _ = self.object_client.delete_object(base_container_name,
+ object_name)
+ self.assertContainer(base_container_name, '1', '1',
+ vers_container_name)
+ resp, body = self.object_client.get_object(base_container_name,
+ object_name)
+ self.assertEqual(body, '1')
+
+ # Delete Object version 1
+ resp, _ = self.object_client.delete_object(base_container_name,
+ object_name)
+ # Containers are Empty
+ self.assertContainer(base_container_name, '0', '0',
+ vers_container_name)
+ self.assertContainer(vers_container_name, '0', '0',
+ 'Missing Header')
+
+ # Delete Containers
+ resp, _ = self.container_client.delete_container(base_container_name)
+ self.assertEqual(resp['status'], '204')
+ self.containers.remove(base_container_name)
+
+ resp, _ = self.container_client.delete_container(vers_container_name)
+ self.assertEqual(resp['status'], '204')
+ self.containers.remove(vers_container_name)
diff --git a/tempest/tests/utils.py b/tempest/tests/utils.py
index 8adaa51..571fc2a 100644
--- a/tempest/tests/utils.py
+++ b/tempest/tests/utils.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Common utilities used in testing"""
+"""Common utilities used in testing."""
import nose.plugins.skip
diff --git a/tempest/tests/volume/admin/__init__.py b/tempest/tests/volume/admin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/volume/admin/__init__.py
diff --git a/tempest/tests/volume/admin/base.py b/tempest/tests/volume/admin/base.py
new file mode 100644
index 0000000..81c7c78
--- /dev/null
+++ b/tempest/tests/volume/admin/base.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nose
+
+
+from tempest import config
+import tempest.services.volume.json.admin.volume_types_client \
+ as volume_types_json_client
+import tempest.services.volume.xml.admin.volume_types_client \
+ as volume_types_xml_client
+from tempest.tests.volume.base import BaseVolumeTest
+
+
+class BaseVolumeAdminTest(BaseVolumeTest):
+ """Base test case class for all Volume Admin API tests."""
+ @classmethod
+ def setUpClass(cls):
+ super(BaseVolumeAdminTest, cls).setUpClass()
+ cls.config = config.TempestConfig()
+ cls.adm_user = cls.config.compute_admin.username
+ cls.adm_pass = cls.config.compute_admin.password
+ cls.adm_tenant = cls.config.compute_admin.tenant_name
+ cls.auth_url = cls.config.identity.auth_url
+
+ if not cls.adm_user and cls.adm_pass and cls.adm_tenant:
+ msg = ("Missing Volume Admin API credentials "
+ "in configuration.")
+ raise nose.SkipTest(msg)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BaseVolumeAdminTest, cls).tearDownClass()
+
+
+class BaseVolumeAdminTestJSON(BaseVolumeAdminTest):
+ @classmethod
+ def setUpClass(cls):
+ cls._interface = "json"
+ super(BaseVolumeAdminTestJSON, cls).setUpClass()
+ cls.client = volume_types_json_client.\
+ VolumeTypesClientJSON(cls.config, cls.adm_user, cls.adm_pass,
+ cls.auth_url, cls.adm_tenant)
+
+
+class BaseVolumeAdminTestXML(BaseVolumeAdminTest):
+ @classmethod
+ def setUpClass(cls):
+ cls._interface = "xml"
+ super(BaseVolumeAdminTestXML, cls).setUpClass()
+ cls.client = volume_types_xml_client.\
+ VolumeTypesClientXML(cls.config, cls.adm_user, cls.adm_pass,
+ cls.auth_url, cls.adm_tenant)
diff --git a/tempest/tests/volume/admin/test_volume_types.py b/tempest/tests/volume/admin/test_volume_types.py
new file mode 100644
index 0000000..8ebb78f
--- /dev/null
+++ b/tempest/tests/volume/admin/test_volume_types.py
@@ -0,0 +1,156 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.services.volume.json.admin import volume_types_client
+from tempest.tests.volume.base import BaseVolumeTest
+
+
+class VolumeTypesTest(BaseVolumeTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(VolumeTypesTest, cls).setUpClass()
+ adm_user = cls.config.compute_admin.username
+ adm_pass = cls.config.compute_admin.password
+ adm_tenant = cls.config.compute_admin.tenant_name
+ auth_url = cls.config.identity.auth_url
+
+ cls.client = volume_types_client.VolumeTypesClientJSON(cls.config,
+ adm_user,
+ adm_pass,
+ auth_url,
+ adm_tenant)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(VolumeTypesTest, cls).tearDownClass()
+
+ def test_volume_type_list(self):
+ # List Volume types.
+ try:
+ resp, body = self.client.list_volume_types()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(type(body), list)
+ except Exception:
+ self.fail("Could not list volume types")
+
+ def test_create_get_delete_volume_with_volume_type_and_extra_specs(self):
+ # Create/get/delete volume with volume_type and extra spec.
+ try:
+ volume = {}
+ vol_name = rand_name("volume-")
+ vol_type_name = rand_name("volume-type-")
+ extra_specs = {"Spec1": "Val1", "Spec2": "Val2"}
+ body = {}
+ resp, body = self.client.create_volume_type(vol_type_name,
+ extra_specs=
+ extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('id' in body)
+ self.assertTrue('name' in body)
+ resp, volume = self.volumes_client.\
+ create_volume(size=1, display_name=vol_name,
+ volume_type=vol_type_name)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('id' in volume)
+ self.assertTrue('display_name' in volume)
+ self.assertEqual(volume['display_name'], vol_name,
+ "The created volume name is not equal "
+ "to the requested name")
+ self.assertTrue(volume['id'] is not None,
+ "Field volume id is empty or not found.")
+ self.volumes_client.wait_for_volume_status(volume['id'],
+ 'available')
+ resp, fetched_volume = self.volumes_client.\
+ get_volume(volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(vol_name, fetched_volume['display_name'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(volume['id'], fetched_volume['id'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(vol_type_name, fetched_volume['volume_type'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ except Exception:
+ self.fail("Could not create correct volume with volume_type")
+ finally:
+ if volume:
+ # Delete the Volume if it was created
+ resp, _ = self.volumes_client.delete_volume(volume['id'])
+ self.assertEqual(202, resp.status)
+
+ if body:
+ resp, _ = self.client.delete_volume_type(body['id'])
+ self.assertEqual(202, resp.status)
+
+ def test_volume_type_create_delete(self):
+ # Create/Delete volume type.
+ try:
+ name = rand_name("volume-type-")
+ extra_specs = {"Spec1": "Val1", "Spec2": "Val2"}
+ resp, body = self.client.\
+ create_volume_type(name, extra_specs=extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('id' in body)
+ self.assertTrue('name' in body)
+ self.assertEqual(body['name'], name,
+ "The created volume_type name is not equal "
+ "to the requested name")
+ self.assertTrue(body['id'] is not None,
+ "Field volume_type id is empty or not found.")
+ resp, fetched_volume_type = self.client.\
+ delete_volume_type(body['id'])
+ self.assertEqual(202, resp.status)
+ except Exception:
+ self.fail("Could not create a volume_type")
+
+ def test_volume_type_create_get(self):
+ # Create/get volume type.
+ try:
+ body = {}
+ name = rand_name("volume-type-")
+ extra_specs = {"Spec1": "Val1", "Spec2": "Val2"}
+ resp, body = self.client.\
+ create_volume_type(name, extra_specs=extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('id' in body)
+ self.assertTrue('name' in body)
+ self.assertEqual(body['name'], name,
+ "The created volume_type name is not equal "
+ "to the requested name")
+ self.assertTrue(body['id'] is not None,
+ "Field volume_type id is empty or not found.")
+ resp, fetched_volume_type = self.client.get_volume_type(body['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(name, fetched_volume_type['name'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
+ self.assertEqual(str(body['id']), fetched_volume_type['id'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
+ self.assertEqual(extra_specs, fetched_volume_type['extra_specs'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
+ except Exception:
+ self.fail("Could not create a volume_type")
+ finally:
+ if body:
+ resp, _ = self.client.delete_volume_type(body['id'])
+ self.assertEqual(202, resp.status)
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs.py b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
new file mode 100644
index 0000000..9734c42
--- /dev/null
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.services.volume.json.admin import volume_types_client
+from tempest.tests.volume.base import BaseVolumeTest
+
+
+class VolumeTypesExtraSpecsTest(BaseVolumeTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(VolumeTypesExtraSpecsTest, cls).setUpClass()
+
+ adm_user = cls.config.compute_admin.username
+ adm_pass = cls.config.compute_admin.password
+ adm_tenant = cls.config.compute_admin.tenant_name
+ auth_url = cls.config.identity.auth_url
+
+ cls.client = volume_types_client.VolumeTypesClientJSON(cls.config,
+ adm_user,
+ adm_pass,
+ auth_url,
+ adm_tenant)
+
+ vol_type_name = rand_name('Volume-type-')
+ cls.extra_spec = {"spec1": "val1"}
+ resp, cls.volume_type = cls.client.create_volume_type(vol_type_name)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
+ cls.client.delete_volume_type(cls.volume_type['id'])
+
+ def test_volume_type_extra_specs_list(self):
+ # List Volume types extra specs.
+ try:
+ resp, body = self.client.\
+ list_volume_types_extra_specs(self.volume_type['id'])
+ self.assertEqual(200, resp.status)
+ self.assertTrue(type(body), dict)
+ self.assertTrue('spec1' in body, "Incorrect volume type extra"
+ " spec returned")
+ except Exception:
+ self.fail("Could not list volume types extra specs")
+
+ def test_volume_type_extra_specs_update(self):
+ # Update volume type extra specs
+ try:
+ extra_spec = {"spec1": "val2"}
+ resp, body = self.client.\
+ update_volume_type_extra_specs(self.volume_type['id'],
+ extra_spec.keys()[0],
+ extra_spec)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('spec1' in body,
+ "Volume type extra spec incorrectly updated")
+ self.assertEqual(extra_spec['spec1'], body['spec1'],
+ "Volume type extra spec incorrectly updated")
+ except Exception:
+ self.fail("Couldnt update volume type extra spec")
+
+ def test_volume_type_extra_spec_create_delete(self):
+ # Create/Delete volume type extra spec.
+ try:
+ extra_specs = {"spec2": "val1"}
+ resp, body = self.client.\
+ create_volume_type_extra_specs(self.volume_type['id'], extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly created")
+ resp, _ = self.client.\
+ delete_volume_type_extra_specs(self.volume_type['id'],
+ extra_specs.keys()[0])
+ self.assertEqual(202, resp.status)
+ except Exception:
+ self.fail("Could not create a volume_type extra spec")
+
+ def test_volume_type_extra_spec_create_get(self):
+ # Create/get volume type extra spec
+ try:
+ extra_specs = {"spec1": "val1"}
+ resp, body = self.client.\
+ create_volume_type_extra_specs(self.volume_type['id'], extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly created")
+ resp, fetched_vol_type_extra_spec = self.client.\
+ get_volume_type_extra_specs(self.volume_type['id'],
+ extra_specs.keys()[0])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly fetched")
+ except Exception:
+ self.fail("Could not create a volume_type extra spec")
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
new file mode 100644
index 0000000..d139425
--- /dev/null
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -0,0 +1,164 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import uuid
+
+from nose.plugins.attrib import attr
+from nose.tools import raises
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.tests.volume.admin.base import BaseVolumeAdminTestJSON
+from tempest.tests.volume.admin.base import BaseVolumeAdminTestXML
+
+
+class ExtraSpecsNegativeTestBase():
+
+ @staticmethod
+ def setUpClass(cls):
+ cls.client = cls.client
+ vol_type_name = rand_name('Volume-type-')
+ cls.extra_specs = {"spec1": "val1"}
+ resp, cls.volume_type = cls.client.create_volume_type(vol_type_name,
+ extra_specs=
+ cls.extra_specs)
+
+ @staticmethod
+ def tearDownClass(cls):
+ cls.client.delete_volume_type(cls.volume_type['id'])
+
+ @unittest.skip('Until bug 1090320 is fixed')
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_update_no_body(self):
+ # Should not update volume type extra specs with no body
+ extra_spec = {"spec1": "val2"}
+ self.client.update_volume_type_extra_specs(self.volume_type['id'],
+ extra_spec.keys()[0],
+ None)
+
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_update_nonexistent_extra_spec_id(self):
+ # Should not update volume type extra specs with nonexistent id.
+ extra_spec = {"spec1": "val2"}
+ self.client.update_volume_type_extra_specs(self.volume_type['id'],
+ str(uuid.uuid4()),
+ extra_spec)
+
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_update_none_extra_spec_id(self):
+ # Should not update volume type extra specs with none id.
+ extra_spec = {"spec1": "val2"}
+ self.client.update_volume_type_extra_specs(self.volume_type['id'],
+ None, extra_spec)
+
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_update_multiple_extra_spec(self):
+ # Should not update volume type extra specs with multiple specs as
+ # body.
+ extra_spec = {"spec1": "val2", 'spec2': 'val1'}
+ self.client.update_volume_type_extra_specs(self.volume_type['id'],
+ extra_spec.keys()[0],
+ extra_spec)
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_create_nonexistent_type_id(self):
+ # Should not create volume type extra spec for nonexistent volume
+ # type id.
+ extra_specs = {"spec2": "val1"}
+ self.client.create_volume_type_extra_specs(str(uuid.uuid4()),
+ extra_specs)
+
+ @unittest.skip('Until bug 1090322 is fixed')
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_create_none_body(self):
+ # Should not create volume type extra spec for none POST body.
+ self.client.create_volume_type_extra_specs(self.volume_type['id'],
+ None)
+
+ @unittest.skip('Until bug 1090322 is fixed')
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_create_invalid_body(self):
+ # Should not create volume type extra spec for invalid POST body.
+ self.client.create_volume_type_extra_specs(self.volume_type['id'],
+ ['invalid'])
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_delete_nonexistent_volume_type_id(self):
+ # Should not delete volume type extra spec for nonexistent
+ # type id.
+ extra_specs = {"spec1": "val1"}
+ self.client.delete_volume_type_extra_specs(str(uuid.uuid4()),
+ extra_specs.keys()[0])
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_list_nonexistent_volume_type_id(self):
+ # Should not list volume type extra spec for nonexistent type id.
+ self.client.list_volume_types_extra_specs(str(uuid.uuid4()))
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_get_nonexistent_volume_type_id(self):
+ # Should not get volume type extra spec for nonexistent type id.
+ extra_specs = {"spec1": "val1"}
+ self.client.get_volume_type_extra_specs(str(uuid.uuid4()),
+ extra_specs.keys()[0])
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_get_nonexistent_extra_spec_id(self):
+ # Should not get volume type extra spec for nonexistent extra spec
+ # id.
+ self.client.get_volume_type_extra_specs(self.volume_type['id'],
+ str(uuid.uuid4()))
+
+
+class ExtraSpecsNegativeTestXML(BaseVolumeAdminTestXML,
+ ExtraSpecsNegativeTestBase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ExtraSpecsNegativeTestXML, cls).setUpClass()
+ ExtraSpecsNegativeTestBase.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ExtraSpecsNegativeTestXML, cls).tearDownClass()
+ ExtraSpecsNegativeTestBase.tearDownClass(cls)
+
+
+class ExtraSpecsNegativeTestJSON(BaseVolumeAdminTestJSON,
+ ExtraSpecsNegativeTestBase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ExtraSpecsNegativeTestJSON, cls).setUpClass()
+ ExtraSpecsNegativeTestBase.setUpClass(cls)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ExtraSpecsNegativeTestJSON, cls).tearDownClass()
+ ExtraSpecsNegativeTestBase.tearDownClass(cls)
diff --git a/tempest/tests/volume/admin/test_volume_types_negative.py b/tempest/tests/volume/admin/test_volume_types_negative.py
new file mode 100644
index 0000000..c2daef9
--- /dev/null
+++ b/tempest/tests/volume/admin/test_volume_types_negative.py
@@ -0,0 +1,76 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import uuid
+
+from nose.plugins.attrib import attr
+from nose.tools import raises
+
+from tempest import exceptions
+from tempest.tests.volume.admin.base import BaseVolumeAdminTestJSON
+from tempest.tests.volume.admin.base import BaseVolumeAdminTestXML
+
+
+class VolumeTypesNegativeTestBase():
+
+ @staticmethod
+ def setUpClass(cls):
+ cls.client = cls.client
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_create_with_nonexistent_volume_type(self):
+ # Should not be able to create volume with nonexistent volume_type.
+ self.volumes_client.create_volume(size=1,
+ display_name=str(uuid.uuid4()),
+ volume_type=str(uuid.uuid4()))
+
+ @unittest.skip('Until bug 1090356 is fixed')
+ @raises(exceptions.BadRequest)
+ @attr(type='negative')
+ def test_create_with_empty_name(self):
+ # Should not be able to create volume type with an empty name.
+ self.client.create_volume_type('')
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_get_nonexistent_type_id(self):
+ # Should not be able to get volume type with nonexistent type id.
+ self.client.get_volume_type(str(uuid.uuid4()))
+
+ @raises(exceptions.NotFound)
+ @attr(type='negative')
+ def test_delete_nonexistent_type_id(self):
+ # Should not be able to delete volume type with nonexistent type id.
+ self.client.delete_volume_type(str(uuid.uuid4()))
+
+
+class VolumesTypesNegativeTestXML(BaseVolumeAdminTestXML,
+ VolumeTypesNegativeTestBase):
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesTypesNegativeTestXML, cls).setUpClass()
+ VolumeTypesNegativeTestBase.setUpClass(cls)
+
+
+class VolumesTypesNegativeTestJSON(BaseVolumeAdminTestJSON,
+ VolumeTypesNegativeTestBase):
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesTypesNegativeTestJSON, cls).setUpClass()
+ VolumeTypesNegativeTestBase.setUpClass(cls)
diff --git a/tempest/tests/volume/base.py b/tempest/tests/volume/base.py
index 6af4bbf..8657db8 100644
--- a/tempest/tests/volume/base.py
+++ b/tempest/tests/volume/base.py
@@ -17,13 +17,13 @@
import logging
import time
-import nose
+import nose
import unittest2 as unittest
-from tempest import config
-from tempest import openstack
+from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest import exceptions
LOG = logging.getLogger(__name__)
@@ -31,7 +31,7 @@
class BaseVolumeTest(unittest.TestCase):
- """Base test case class for all Cinder API tests"""
+ """Base test case class for all Cinder API tests."""
@classmethod
def setUpClass(cls):
@@ -41,11 +41,11 @@
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
- os = openstack.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
else:
- os = openstack.Manager()
+ os = clients.Manager()
cls.os = os
cls.volumes_client = os.volumes_client
@@ -73,7 +73,7 @@
"""
Returns an instance of the Identity Admin API client
"""
- os = openstack.IdentityManager()
+ os = clients.IdentityManager()
return os.admin_client
@classmethod
@@ -122,7 +122,7 @@
cls.clear_isolated_creds()
def create_volume(self, size=1, metadata={}):
- """Wrapper utility that returns a test volume"""
+ """Wrapper utility that returns a test volume."""
display_name = rand_name(self.__class__.__name__ + "-volume")
cli_resp = self.volumes_client.create_volume(size=size,
display_name=display_name,
@@ -133,12 +133,12 @@
return volume
def wait_for(self, condition):
- """Repeatedly calls condition() until a timeout"""
+ """Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
- except:
+ except Exception:
pass
else:
return
diff --git a/tempest/tests/volume/test_volumes_actions.py b/tempest/tests/volume/test_volumes_actions.py
index f76235d..7eddb67 100644
--- a/tempest/tests/volume/test_volumes_actions.py
+++ b/tempest/tests/volume/test_volumes_actions.py
@@ -50,7 +50,7 @@
@attr(type='smoke')
def test_attach_detach_volume_to_instance(self):
- """Volume is attached and detached successfully from an instance"""
+ # Volume is attached and detached successfully from an instance
try:
mountpoint = '/dev/vdc'
resp, body = self.client.attach_volume(self.volume['id'],
@@ -58,7 +58,7 @@
mountpoint)
self.assertEqual(202, resp.status)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
- except:
+ except Exception:
self.fail("Could not attach volume to instance")
finally:
# Detach the volume from the instance
@@ -67,7 +67,7 @@
self.client.wait_for_volume_status(self.volume['id'], 'available')
def test_get_volume_attachment(self):
- """Verify that a volume's attachment information is retrieved"""
+ # Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
resp, body = self.client.attach_volume(self.volume['id'],
self.server['id'],
@@ -83,7 +83,7 @@
self.assertEqual(self.server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
- except:
+ except Exception:
self.fail("Could not get attachment details from volume")
finally:
self.client.detach_volume(self.volume['id'])
diff --git a/tempest/tests/volume/test_volumes_get.py b/tempest/tests/volume/test_volumes_get.py
index fa8e86e..bc64ff4 100644
--- a/tempest/tests/volume/test_volumes_get.py
+++ b/tempest/tests/volume/test_volumes_get.py
@@ -25,7 +25,7 @@
@attr(type='smoke')
def test_volume_create_get_delete(self):
- """Create a volume, Get it's details and Delete the volume"""
+ # Create a volume, Get it's details and Delete the volume
try:
volume = {}
v_name = rand_name('Volume-')
@@ -58,7 +58,7 @@
fetched_volume['metadata'],
'The fetched Volume is different '
'from the created Volume')
- except:
+ except Exception:
self.fail("Could not create a volume")
finally:
if volume:
@@ -69,7 +69,7 @@
@attr(type='positive')
def test_volume_get_metadata_none(self):
- """Create a volume without passing metadata, get details, and delete"""
+ # Create a volume without passing metadata, get details, and delete
try:
volume = {}
v_name = rand_name('Volume-')
@@ -85,7 +85,7 @@
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(fetched_volume['metadata'], {})
- except:
+ except Exception:
self.fail("Could not get volume metadata")
finally:
if volume:
diff --git a/tempest/tests/volume/test_volumes_list.py b/tempest/tests/volume/test_volumes_list.py
index e9bafaf..26a85b7 100644
--- a/tempest/tests/volume/test_volumes_list.py
+++ b/tempest/tests/volume/test_volumes_list.py
@@ -34,7 +34,7 @@
@attr(type='smoke')
def test_volume_list(self):
- """Get a list of Volumes"""
+ # Get a list of Volumes
# Fetch all volumes
resp, fetched_list = self.client.list_volumes()
self.assertEqual(200, resp.status)
@@ -47,7 +47,7 @@
@attr(type='smoke')
def test_volume_list_with_details(self):
- """Get a list of Volumes with details"""
+ # Get a list of Volumes with details
# Fetch all Volumes
resp, fetched_list = self.client.list_volumes_with_detail()
self.assertEqual(200, resp.status)
@@ -80,7 +80,7 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except:
+ except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
@@ -126,7 +126,7 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except:
+ except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
diff --git a/tempest/tests/volume/test_volumes_negative.py b/tempest/tests/volume/test_volumes_negative.py
index bf7e5f0..6bd7002 100644
--- a/tempest/tests/volume/test_volumes_negative.py
+++ b/tempest/tests/volume/test_volumes_negative.py
@@ -18,8 +18,8 @@
from nose.plugins.attrib import attr
from nose.tools import raises
-from tempest import exceptions
from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
from tempest.tests.volume import base
@@ -28,7 +28,7 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_volume_get_nonexistant_volume_id(self):
- """Should not be able to get a nonexistant volume"""
+ # Should not be able to get a nonexistant volume
#Creating a nonexistant volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
@@ -44,7 +44,7 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_volume_delete_nonexistant_volume_id(self):
- """Should not be able to delete a nonexistant Volume"""
+ # Should not be able to delete a nonexistant Volume
# Creating nonexistant volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
@@ -60,10 +60,8 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_invalid_size(self):
- """
- Should not be able to create volume with invalid size
- in request
- """
+ # Should not be able to create volume with invalid size
+ # in request
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='#$%',
@@ -73,10 +71,8 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_out_passing_size(self):
- """
- Should not be able to create volume without passing size
- in request
- """
+ # Should not be able to create volume without passing size
+ # in request
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='',
@@ -86,9 +82,7 @@
@raises(exceptions.BadRequest)
@attr(type='negative')
def test_create_volume_with_size_zero(self):
- """
- Should not be able to create volume with size zero
- """
+ # Should not be able to create volume with size zero
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
resp, volume = self.client.create_volume(size='0',
@@ -98,33 +92,25 @@
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_invalid_volume_id(self):
- """
- Should not be able to get volume with invalid id
- """
+ # Should not be able to get volume with invalid id
resp, volume = self.client.get_volume('#$%%&^&^')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_get_volume_without_passing_volume_id(self):
- """
- Should not be able to get volume when empty ID is passed
- """
+ # Should not be able to get volume when empty ID is passed
resp, volume = self.client.get_volume('')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_invalid_volume_id(self):
- """
- Should not be able to delete volume when invalid ID is passed
- """
+ # Should not be able to delete volume when invalid ID is passed
resp, volume = self.client.delete_volume('!@#$%^&*()')
@raises(exceptions.NotFound)
@attr(type='negative')
def test_delete_volume_without_passing_volume_id(self):
- """
- Should not be able to delete volume when empty ID is passed
- """
+ # Should not be able to delete volume when empty ID is passed
resp, volume = self.client.delete_volume('')
diff --git a/tempest/whitebox.py b/tempest/whitebox.py
index 2711903..d78b9e0 100644
--- a/tempest/whitebox.py
+++ b/tempest/whitebox.py
@@ -17,15 +17,15 @@
import logging
import os
-import sys
import shlex
import subprocess
+import sys
import nose
from sqlalchemy import create_engine, MetaData
-from tempest.common.utils.data_utils import rand_name
from tempest.common.ssh import Client
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest import test
from tempest.tests import compute
@@ -106,7 +106,7 @@
@classmethod
def create_server(cls, image_id=None):
- """Wrapper utility that returns a test server"""
+ """Wrapper utility that returns a test server."""
server_name = rand_name(cls.__name__ + "-instance")
flavor = cls.flavor_ref
if not image_id:
@@ -120,7 +120,7 @@
@classmethod
def get_db_handle_and_meta(cls, database='nova'):
- """Return a connection handle and metadata of an OpenStack database"""
+ """Return a connection handle and metadata of an OpenStack database."""
engine_args = {"echo": False,
"convert_unicode": True,
"pool_recycle": 3600
@@ -138,7 +138,7 @@
return connection, meta
def nova_manage(self, category, action, params):
- """Executes nova-manage command for the given action"""
+ """Executes nova-manage command for the given action."""
nova_manage_path = os.path.join(self.compute_bin_dir, 'nova-manage')
cmd = ' '.join([nova_manage_path, category, action, params])
@@ -161,7 +161,7 @@
return result
def get_ssh_connection(self, host, username, password):
- """Create an SSH connection object to a host"""
+ """Create an SSH connection object to a host."""
ssh_timeout = self.config.compute.ssh_timeout
ssh_client = Client(host, username, password, ssh_timeout)
if not ssh_client.test_connection_auth():
diff --git a/tools/hacking.py b/tools/hacking.py
new file mode 100755
index 0000000..617682d
--- /dev/null
+++ b/tools/hacking.py
@@ -0,0 +1,502 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012, Cloudscaling
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""tempest HACKING file compliance testing
+
+built on top of pep8.py
+"""
+
+import fnmatch
+import inspect
+import logging
+import os
+import re
+import subprocess
+import sys
+import tokenize
+import warnings
+
+import pep8
+
+# Don't need this for testing
+logging.disable('LOG')
+
+#T1xx comments
+#T2xx except
+#T3xx imports
+#T4xx docstrings
+#T5xx dictionaries/lists
+#T6xx calling methods
+#T7xx localization
+#N8xx git commit messages
+
+IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate']
+DOCSTRING_TRIPLE = ['"""', "'''"]
+VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
+
+
+# Monkey patch broken excluded filter in pep8
+# See https://github.com/jcrocholl/pep8/pull/111
+def excluded(self, filename):
+ """
+ Check if options.exclude contains a pattern that matches filename.
+ """
+ basename = os.path.basename(filename)
+ return any((pep8.filename_match(filename, self.options.exclude,
+ default=False),
+ pep8.filename_match(basename, self.options.exclude,
+ default=False)))
+
+
+def input_dir(self, dirname):
+ """Check all files in this directory and all subdirectories."""
+ dirname = dirname.rstrip('/')
+ if self.excluded(dirname):
+ return 0
+ counters = self.options.report.counters
+ verbose = self.options.verbose
+ filepatterns = self.options.filename
+ runner = self.runner
+ for root, dirs, files in os.walk(dirname):
+ if verbose:
+ print('directory ' + root)
+ counters['directories'] += 1
+ for subdir in sorted(dirs):
+ if self.excluded(os.path.join(root, subdir)):
+ dirs.remove(subdir)
+ for filename in sorted(files):
+ # contain a pattern that matches?
+ if ((pep8.filename_match(filename, filepatterns) and
+ not self.excluded(filename))):
+ runner(os.path.join(root, filename))
+
+
+def is_import_exception(mod):
+ return (mod in IMPORT_EXCEPTIONS or
+ any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
+
+
+def import_normalize(line):
+ # convert "from x import y" to "import x.y"
+ # handle "from x import y as z" to "import x.y as z"
+ split_line = line.split()
+ if ("import" in line and line.startswith("from ") and "," not in line and
+ split_line[2] == "import" and split_line[3] != "*" and
+ split_line[1] != "__future__" and
+ (len(split_line) == 4 or
+ (len(split_line) == 6 and split_line[4] == "as"))):
+ return "import %s.%s" % (split_line[1], split_line[3])
+ else:
+ return line
+
+
+def tempest_todo_format(physical_line):
+ """Check for 'TODO()'.
+
+ tempest HACKING guide recommendation for TODO:
+ Include your name with TODOs as in "#TODO(termie)"
+ T101
+ """
+ pos = physical_line.find('TODO')
+ pos1 = physical_line.find('TODO(')
+ pos2 = physical_line.find('#') # make sure it's a comment
+ if (pos != pos1 and pos2 >= 0 and pos2 < pos):
+ return pos, "T101: Use TODO(NAME)"
+
+
+def tempest_except_format(logical_line):
+ """Check for 'except:'.
+
+ tempest HACKING guide recommends not using except:
+ Do not write "except:", use "except Exception:" at the very least
+ T201
+ """
+ if logical_line.startswith("except:"):
+ yield 6, "T201: no 'except:' at least use 'except Exception:'"
+
+
+def tempest_except_format_assert(logical_line):
+ """Check for 'assertRaises(Exception'.
+
+ tempest HACKING guide recommends not using assertRaises(Exception...):
+ Do not use overly broad Exception type
+ T202
+ """
+ if logical_line.startswith("self.assertRaises(Exception"):
+ yield 1, "T202: assertRaises Exception too broad"
+
+
+def tempest_one_import_per_line(logical_line):
+ """Check for import format.
+
+ tempest HACKING guide recommends one import per line:
+ Do not import more than one module per line
+
+ Examples:
+ BAD: from tempest.common.rest_client import RestClient, RestClientXML
+ T301
+ """
+ pos = logical_line.find(',')
+ parts = logical_line.split()
+ if (pos > -1 and (parts[0] == "import" or
+ parts[0] == "from" and parts[2] == "import") and
+ not is_import_exception(parts[1])):
+ yield pos, "T301: one import per line"
+
+_missingImport = set([])
+
+
+def tempest_import_module_only(logical_line):
+ """Check for import module only.
+
+ tempest HACKING guide recommends importing only modules:
+ Do not import objects, only modules
+ T302 import only modules
+ T303 Invalid Import
+ T304 Relative Import
+ """
+ def importModuleCheck(mod, parent=None, added=False):
+ """
+ If can't find module on first try, recursively check for relative
+ imports
+ """
+ current_path = os.path.dirname(pep8.current_file)
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', DeprecationWarning)
+ valid = True
+ if parent:
+ if is_import_exception(parent):
+ return
+ parent_mod = __import__(parent, globals(), locals(),
+ [mod], -1)
+ valid = inspect.ismodule(getattr(parent_mod, mod))
+ else:
+ __import__(mod, globals(), locals(), [], -1)
+ valid = inspect.ismodule(sys.modules[mod])
+ if not valid:
+ if added:
+ sys.path.pop()
+ added = False
+ return logical_line.find(mod), ("T304: No "
+ "relative imports. "
+ "'%s' is a relative "
+ "import"
+ % logical_line)
+ return logical_line.find(mod), ("T302: import only"
+ " modules. '%s' does not "
+ "import a module"
+ % logical_line)
+
+ except (ImportError, NameError) as exc:
+ if not added:
+ added = True
+ sys.path.append(current_path)
+ return importModuleCheck(mod, parent, added)
+ else:
+ name = logical_line.split()[1]
+ if name not in _missingImport:
+ if VERBOSE_MISSING_IMPORT != 'False':
+ print >> sys.stderr, ("ERROR: import '%s' in %s "
+ "failed: %s" %
+ (name, pep8.current_file, exc))
+ _missingImport.add(name)
+ added = False
+ sys.path.pop()
+ return
+
+ except AttributeError:
+ # Invalid import
+ return logical_line.find(mod), ("T303: Invalid import, "
+ "AttributeError raised")
+
+ # convert "from x import y" to " import x.y"
+ # convert "from x import y as z" to " import x.y"
+ import_normalize(logical_line)
+ split_line = logical_line.split()
+
+ if (logical_line.startswith("import ") and "," not in logical_line and
+ (len(split_line) == 2 or
+ (len(split_line) == 4 and split_line[2] == "as"))):
+ mod = split_line[1]
+ rval = importModuleCheck(mod)
+ if rval is not None:
+ yield rval
+
+ # TODO(jogo) handle "from x import *"
+
+#TODO(jogo): import template: T305
+
+
+def tempest_import_alphabetical(logical_line, line_number, lines):
+ """Check for imports in alphabetical order.
+
+ Tempest HACKING guide recommendation for imports:
+ imports in human alphabetical order
+ T306
+ """
+ # handle import x
+ # use .lower since capitalization shouldn't dictate order
+ split_line = import_normalize(logical_line.strip()).lower().split()
+ split_previous = import_normalize(lines[
+ line_number - 2]).strip().lower().split()
+ # with or without "as y"
+ length = [2, 4]
+ if (len(split_line) in length and len(split_previous) in length and
+ split_line[0] == "import" and split_previous[0] == "import"):
+ if split_line[1] < split_previous[1]:
+ yield (0, "T306: imports not in alphabetical order"
+ " (%s, %s)"
+ % (split_previous[1], split_line[1]))
+
+
+def tempest_docstring_start_space(physical_line):
+ """Check for docstring not start with space.
+
+ tempest HACKING guide recommendation for docstring:
+ Docstring should not start with space
+ T401
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
+ if (pos != -1 and end and len(physical_line) > pos + 4):
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "T401: one line docstring should not start"
+ " with a space")
+
+
+def tempest_docstring_one_line(physical_line):
+ """Check one line docstring end.
+
+ tempest HACKING guide recommendation for one line docstring:
+ A one line docstring looks like this and ends in a period.
+ T402
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
+ if (pos != -1 and end and len(physical_line) > pos + 4):
+ if (physical_line[-5] != '.'):
+ return pos, "T402: one line docstring needs a period"
+
+
+def tempest_docstring_multiline_end(physical_line):
+ """Check multi line docstring end.
+
+ Tempest HACKING guide recommendation for docstring:
+ Docstring should end on a new line
+ T403
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ if (pos != -1 and len(physical_line) == pos):
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "T403: multi line docstring end on new line")
+
+
+def tempest_no_test_docstring(physical_line, previous_logical, filename):
+ """Check that test_ functions don't have docstrings
+
+ This ensure we get better results out of tempest, instead
+ of them being hidden behind generic descriptions of the
+ functions.
+
+ T404
+ """
+ if "tempest/test" in filename:
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
+ if pos != -1:
+ if previous_logical.startswith("def test_"):
+ return (pos, "T404: test functions must "
+ "not have doc strings")
+
+
+FORMAT_RE = re.compile("%(?:"
+ "%|" # Ignore plain percents
+ "(\(\w+\))?" # mapping key
+ "([#0 +-]?" # flag
+ "(?:\d+|\*)?" # width
+ "(?:\.\d+)?" # precision
+ "[hlL]?" # length mod
+ "\w))") # type
+
+
+class LocalizationError(Exception):
+ pass
+
+
+def check_i18n():
+ """Generator that checks token stream for localization errors.
+
+ Expects tokens to be ``send``ed one by one.
+ Raises LocalizationError if some error is found.
+ """
+ while True:
+ try:
+ token_type, text, _, _, line = yield
+ except GeneratorExit:
+ return
+ if (token_type == tokenize.NAME and text == "_" and
+ not line.startswith('def _(msg):')):
+
+ while True:
+ token_type, text, start, _, _ = yield
+ if token_type != tokenize.NL:
+ break
+ if token_type != tokenize.OP or text != "(":
+ continue # not a localization call
+
+ format_string = ''
+ while True:
+ token_type, text, start, _, _ = yield
+ if token_type == tokenize.STRING:
+ format_string += eval(text)
+ elif token_type == tokenize.NL:
+ pass
+ else:
+ break
+
+ if not format_string:
+ raise LocalizationError(start,
+ "T701: Empty localization "
+ "string")
+ if token_type != tokenize.OP:
+ raise LocalizationError(start,
+ "T701: Invalid localization "
+ "call")
+ if text != ")":
+ if text == "%":
+ raise LocalizationError(start,
+ "T702: Formatting "
+ "operation should be outside"
+ " of localization method call")
+ elif text == "+":
+ raise LocalizationError(start,
+ "T702: Use bare string "
+ "concatenation instead of +")
+ else:
+ raise LocalizationError(start,
+ "T702: Argument to _ must"
+ " be just a string")
+
+ format_specs = FORMAT_RE.findall(format_string)
+ positional_specs = [(key, spec) for key, spec in format_specs
+ if not key and spec]
+ # not spec means %%, key means %(smth)s
+ if len(positional_specs) > 1:
+ raise LocalizationError(start,
+ "T703: Multiple positional "
+ "placeholders")
+
+
+def tempest_localization_strings(logical_line, tokens):
+ """Check localization in line.
+
+ T701: bad localization call
+ T702: complex expression instead of string as argument to _()
+ T703: multiple positional placeholders
+ """
+
+ gen = check_i18n()
+ next(gen)
+ try:
+ map(gen.send, tokens)
+ gen.close()
+ except LocalizationError as e:
+ yield e.args
+
+#TODO(jogo) Dict and list objects
+
+current_file = ""
+
+
+def readlines(filename):
+ """Record the current file being tested."""
+ pep8.current_file = filename
+ return open(filename).readlines()
+
+
+def add_tempest():
+ """Monkey patch in tempest guidelines.
+
+ Look for functions that start with tempest_ and have arguments
+ and add them to pep8 module
+ Assumes you know how to write pep8.py checks
+ """
+ for name, function in globals().items():
+ if not inspect.isfunction(function):
+ continue
+ args = inspect.getargspec(function)[0]
+ if args and name.startswith("tempest"):
+ exec("pep8.%s = %s" % (name, name))
+
+
+def once_git_check_commit_title():
+ """Check git commit messages.
+
+ tempest HACKING recommends not referencing a bug or blueprint
+ in first line, it should provide an accurate description of the change
+ T801
+ T802 Title limited to 50 chars
+ """
+ #Get title of most recent commit
+
+ subp = subprocess.Popen(['git', 'log', '--no-merges', '--pretty=%s', '-1'],
+ stdout=subprocess.PIPE)
+ title = subp.communicate()[0]
+ if subp.returncode:
+ raise Exception("git log failed with code %s" % subp.returncode)
+
+ #From https://github.com/openstack/openstack-ci-puppet
+ # /blob/master/modules/gerrit/manifests/init.pp#L74
+ #Changeid|bug|blueprint
+ git_keywords = (r'(I[0-9a-f]{8,40})|'
+ '([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
+ '([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
+ GIT_REGEX = re.compile(git_keywords)
+
+ error = False
+ #NOTE(jogo) if match regex but over 3 words, acceptable title
+ if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
+ print ("T801: git commit title ('%s') should provide an accurate "
+ "description of the change, not just a reference to a bug "
+ "or blueprint" % title.strip())
+ error = True
+ if len(title.decode('utf-8')) > 72:
+ print ("T802: git commit title ('%s') should be under 50 chars"
+ % title.strip())
+ error = True
+ return error
+
+if __name__ == "__main__":
+ #include tempest path
+ sys.path.append(os.getcwd())
+ #Run once tests (not per line)
+ once_error = once_git_check_commit_title()
+ #TEMPEST error codes start with a T
+ pep8.ERRORCODE_REGEX = re.compile(r'[EWT]\d{3}')
+ add_tempest()
+ pep8.current_file = current_file
+ pep8.readlines = readlines
+ pep8.StyleGuide.excluded = excluded
+ pep8.StyleGuide.input_dir = input_dir
+ try:
+ pep8._main()
+ sys.exit(once_error)
+ finally:
+ if len(_missingImport) > 0:
+ print >> sys.stderr, ("%i imports missing in this test environment"
+ % len(_missingImport))
diff --git a/tools/install_venv.py b/tools/install_venv.py
new file mode 100644
index 0000000..42ed32c
--- /dev/null
+++ b/tools/install_venv.py
@@ -0,0 +1,248 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 OpenStack, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Installation script for Tempest's development virtualenv."""
+
+import optparse
+import os
+import subprocess
+import sys
+
+
+ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+VENV = os.path.join(ROOT, '.venv')
+PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
+TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
+PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+
+
+def die(message, *args):
+ print >> sys.stderr, message % args
+ sys.exit(1)
+
+
+def check_python_version():
+ if sys.version_info < (2, 6):
+ die("Need Python Version >= 2.6")
+
+
+def run_command_with_code(cmd, redirect_output=True, check_exit_code=True):
+ """Runs a command in an out-of-process shell.
+
+ Returns the output of that command. Working directory is ROOT.
+ """
+ if redirect_output:
+ stdout = subprocess.PIPE
+ else:
+ stdout = None
+
+ proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
+ output = proc.communicate()[0]
+ if check_exit_code and proc.returncode != 0:
+ die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+ return (output, proc.returncode)
+
+
+def run_command(cmd, redirect_output=True, check_exit_code=True):
+ return run_command_with_code(cmd, redirect_output, check_exit_code)[0]
+
+
+class Distro(object):
+
+ def check_cmd(self, cmd):
+ return bool(run_command(['which', cmd], check_exit_code=False).strip())
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if self.check_cmd('easy_install'):
+ print 'Installing virtualenv via easy_install...',
+ if run_command(['easy_install', 'virtualenv']):
+ print 'Succeeded'
+ return
+ else:
+ print 'Failed'
+
+ die('ERROR: virtualenv not found.\n\nTempest development'
+ ' requires virtualenv, please install it using your'
+ ' favorite package management tool')
+
+ def post_process(self):
+ """Any distribution-specific post-processing gets done here.
+
+ In particular, this is useful for applying patches to code inside
+ the venv.
+ """
+ pass
+
+
+class Fedora(Distro):
+ """This covers all Fedora-based distributions.
+
+ Includes: Fedora, RHEL, CentOS, Scientific Linux"""
+
+ def check_pkg(self, pkg):
+ return run_command_with_code(['rpm', '-q', pkg],
+ check_exit_code=False)[1] == 0
+
+ def yum_install(self, pkg, **kwargs):
+ print "Attempting to install '%s' via yum" % pkg
+ run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
+
+ def apply_patch(self, originalfile, patchfile):
+ run_command(['patch', originalfile, patchfile])
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if not self.check_pkg('python-virtualenv'):
+ self.yum_install('python-virtualenv', check_exit_code=False)
+
+ super(Fedora, self).install_virtualenv()
+
+ def post_process(self):
+ """Workaround for a bug in eventlet.
+
+ This currently affects RHEL6.1, but the fix can safely be
+ applied to all RHEL and Fedora distributions.
+
+ This can be removed when the fix is applied upstream.
+
+ Nova: https://bugs.launchpad.net/nova/+bug/884915
+ Upstream: https://bitbucket.org/which_linden/eventlet/issue/89
+ """
+
+ # Install "patch" program if it's not there
+ if not self.check_pkg('patch'):
+ self.yum_install('patch')
+
+ # Apply the eventlet patch
+ self.apply_patch(os.path.join(VENV, 'lib', PY_VERSION, 'site-packages',
+ 'eventlet/green/subprocess.py'),
+ 'contrib/redhat-eventlet.patch')
+
+
+def get_distro():
+ if (os.path.exists('/etc/fedora-release') or
+ os.path.exists('/etc/redhat-release')):
+ return Fedora()
+ else:
+ return Distro()
+
+
+def check_dependencies():
+ get_distro().install_virtualenv()
+
+
+def create_virtualenv(venv=VENV, no_site_packages=True):
+ """Creates the virtual environment and installs PIP.
+
+ Creates the virtual environment and installs PIP only into the
+ virtual environment.
+ """
+ print 'Creating venv...',
+ if no_site_packages:
+ run_command(['virtualenv', '-q', '--no-site-packages', VENV])
+ else:
+ run_command(['virtualenv', '-q', VENV])
+ print 'done.'
+ print 'Installing pip in virtualenv...',
+ if not run_command(['tools/with_venv.sh', 'easy_install',
+ 'pip>1.0']).strip():
+ die("Failed to install pip.")
+ print 'done.'
+
+
+def pip_install(*args):
+ run_command(['tools/with_venv.sh',
+ 'pip', 'install', '--upgrade'] + list(args),
+ redirect_output=False)
+
+
+def install_dependencies(venv=VENV):
+ print 'Installing dependencies with pip (this can take a while)...'
+
+ # First things first, make sure our venv has the latest pip and distribute.
+ # NOTE: we keep pip at version 1.1 since the most recent version causes
+ # the .venv creation to fail. See:
+ # https://bugs.launchpad.net/nova/+bug/1047120
+ pip_install('pip==1.1')
+ pip_install('distribute')
+
+ # Install greenlet by hand - just listing it in the requires file does not
+ # get it in stalled in the right order
+ pip_install('greenlet')
+
+ pip_install('-r', PIP_REQUIRES)
+ pip_install('-r', TEST_REQUIRES)
+
+ # Install tempest into the virtual_env. No more path munging!
+ run_command([os.path.join(venv, 'bin/python'), 'setup.py', 'develop'])
+
+
+def post_process():
+ get_distro().post_process()
+
+
+def print_help():
+ help = """
+ Tempest development environment setup is complete.
+
+ Tempest development uses virtualenv to track and manage Python dependencies
+ while in development and testing.
+
+ To activate the Tempest virtualenv for the extent of your current shell
+ session you can run:
+
+ $ source .venv/bin/activate
+
+ Or, if you prefer, you can run commands in the virtualenv on a case by case
+ basis by running:
+
+ $ tools/with_venv.sh <your command>
+
+ Also, make test will automatically use the virtualenv.
+ """
+ print help
+
+
+def parse_args():
+ """Parses command-line arguments."""
+ parser = optparse.OptionParser()
+ parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
+ default=False, action="store_true",
+ help="Do not inherit packages from global Python"
+ " install")
+ return parser.parse_args()
+
+
+def main(argv):
+ (options, args) = parse_args()
+ check_python_version()
+ check_dependencies()
+ create_virtualenv(no_site_packages=options.no_site_packages)
+ install_dependencies()
+ post_process()
+ print_help()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/tools/pip-requires b/tools/pip-requires
index 9c861d9..061eff6 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,7 +1,12 @@
anyjson
nose
httplib2>=0.7.0
-pika
unittest2
lxml
boto>=2.2.1
+paramiko
+netaddr
+python-glanceclient>=0.5.0
+python-keystoneclient>=0.2.0
+python-novaclient>=2.10.0
+python-quantumclient>=2.1
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
index 2e2b855..550c477 100755
--- a/tools/with_venv.sh
+++ b/tools/with_venv.sh
@@ -1,4 +1,4 @@
#!/bin/bash
TOOLS=`dirname $0`
-VENV=$TOOLS/../.kong-venv
-source $VENV/bin/activate && $@
+VENV=$TOOLS/../.venv
+source $VENV/bin/activate && "$@"
diff --git a/tox.ini b/tox.ini
index 433c55f..da1672b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,4 +15,4 @@
[testenv:pep8]
deps = pep8==1.3.3
-commands = pep8 --ignore=E121,E122,E125,E126 --repeat --show-source --exclude=.venv,.tox,dist,doc,openstack .
+commands = python tools/hacking.py --ignore=E122,E125,E126 --repeat --show-source --exclude=.venv,.tox,dist,doc,openstack,*egg .