Merge "Add the group and role checks API in tempest"
diff --git a/.coveragerc b/.coveragerc
index c9b6467..51482d3 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,4 +1,4 @@
[run]
branch = True
source = tempest
-omit = tempest/tests/*,tempest/openstack/*
+omit = tempest/tests/*,tempest/scenario/test_*.py,tempest/api_schema/*,tempest/api/*
diff --git a/README.rst b/README.rst
index 45cb4c0..71e185f 100644
--- a/README.rst
+++ b/README.rst
@@ -87,7 +87,7 @@
be done with testr directly or any `testr`_ based test runner, like
`ostestr`_. For example, from the working dir running::
- $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty))'
+ $ ostestr --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))'
will run the same set of tests as the default gate jobs.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 12d1d40..7e4503d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -127,8 +127,11 @@
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
+git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
+ "-n1"]
+html_last_updated_fmt = subprocess.Popen(git_cmd,
+ stdout=subprocess.PIPE).\
+ communicate()[0]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1b2b6d2..e428592 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -3,102 +3,88 @@
Tempest Configuration Guide
===========================
-This guide is a starting point for configuring tempest. It aims to elaborate
+This guide is a starting point for configuring Tempest. It aims to elaborate
on and explain some of the mandatory and common configuration settings and how
they are used in conjunction. The source of truth on each option is the sample
config file which explains the purpose of each individual option. You can see
the sample config file here: :ref:`tempest-sampleconf`
-Lock Path
----------
-
-There are some tests and operations inside of tempest that need to be
-externally locked when running in parallel to prevent them from running at
-the same time. This is a mandatory step for configuring tempest and is still
-needed even when running serially. All that is needed to do this is:
-
- #. Set the lock_path option in the oslo_concurrency group
-
Auth/Credentials
----------------
-Tempest currently has 2 different ways in configuration to provide credentials
-to use when running tempest. One is a traditional set of configuration options
-in the tempest.conf file. These options are in the identity section and let you
-specify a regular user, a global admin user, and an alternate user set of
-credentials. (which consist of a username, password, and project/tenant name)
-These options should be clearly labelled in the sample config file in the
-identity section.
+Tempest currently has two different ways in configuration to provide credentials
+to use when running Tempest. One is a traditional set of configuration options
+in the tempest.conf file. These options are clearly labelled in the ``identity``
+section and let you specify a set of credentials for a regular user, a global
+admin user, and an alternate user, consisting of a username, password, and
+project/tenant name.
The other method to provide credentials is using the accounts.yaml file. This
file is used to specify an arbitrary number of users available to run tests
-with. You can specify the location of the file in the
-auth section in the tempest.conf file. To see the specific format used in
-the file please refer to the accounts.yaml.sample file included in tempest.
-Currently users that are specified in the accounts.yaml file are assumed to
-have the same set of roles which can be used for executing all the tests you
-are running. This will be addressed in the future, but is a current limitation.
-Eventually the config options for providing credentials to tempest will be
-deprecated and removed in favor of the accounts.yaml file.
+with. You can specify the location of the file in the ``auth`` section in the
+tempest.conf file. To see the specific format used in the file please refer to
+the accounts.yaml.sample file included in Tempest. Eventually the config
+options for providing credentials to Tempest will be deprecated and removed in
+favor of the accounts.yaml file.
Keystone Connection Info
^^^^^^^^^^^^^^^^^^^^^^^^
-In order for tempest to be able to talk to your OpenStack deployment you need
+In order for Tempest to be able to talk to your OpenStack deployment you need
to provide it with information about how it communicates with keystone.
-This involves configuring the following options in the identity section:
+This involves configuring the following options in the ``identity`` section:
- #. auth_version
- #. uri
- #. uri_v3
+ #. ``auth_version``
+ #. ``uri``
+ #. ``uri_v3``
-The *auth_version* option is used to tell tempest whether it should be using
+The ``auth_version`` option is used to tell Tempest whether it should be using
keystone's v2 or v3 api for communicating with keystone. (except for the
-identity api tests which will test a specific version) The 2 uri options are
-used to tell tempest the url of the keystone endpoint. The *uri* option is used
-for keystone v2 request and *uri_v3* is used for keystone v3. You want to ensure
-that which ever version you set for *auth_version* has its uri option defined.
+identity api tests which will test a specific version) The two uri options are
+used to tell Tempest the url of the keystone endpoint. The ``uri`` option is
+used for keystone v2 request and ``uri_v3`` is used for keystone v3. You want to
+ensure that which ever version you set for ``auth_version`` has its uri option
+defined.
Credential Provider Mechanisms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Tempest currently also has 3 different internal methods for providing
-authentication to tests. Dynamic credentials, locking test accounts, and
+Tempest currently also has three different internal methods for providing
+authentication to tests: dynamic credentials, locking test accounts, and
non-locking test accounts. Depending on which one is in use the configuration
-of tempest is slightly different.
+of Tempest is slightly different.
Dynamic Credentials
"""""""""""""""""""
Dynamic Credentials (formerly known as Tenant isolation) was originally created
-to enable running tempest in parallel.
-For each test class it creates a unique set of user credentials to use for the
-tests in the class. It can create up to 3 sets of username, password, and
-tenant/project names for a primary user, an admin user, and an alternate user.
-To enable and use dynamic credentials you only need to configure 2 things:
+to enable running Tempest in parallel. For each test class it creates a unique
+set of user credentials to use for the tests in the class. It can create up to
+three sets of username, password, and tenant/project names for a primary user,
+an admin user, and an alternate user. To enable and use dynamic credentials you
+only need to configure two things:
#. A set of admin credentials with permissions to create users and
- tenants/projects. This is specified in the auth section with the
- admin_username, admin_tenant_name, admin_domain_name and admin_password
- options
- #. To enable dynamic_creds in the auth section with the
- use_dynamic_credentials option.
+ tenants/projects. This is specified in the ``auth`` section with the
+ ``admin_username``, ``admin_tenant_name``, ``admin_domain_name`` and
+ ``admin_password`` options
+ #. To enable dynamic credentials in the ``auth`` section with the
+ ``use_dynamic_credentials`` option.
-This is also the currently the default credential provider enabled by tempest,
-due to it's common use and ease of configuration.
+This is also currently the default credential provider enabled by Tempest, due
+to its common use and ease of configuration.
It is worth pointing out that depending on your cloud configuration you might
need to assign a role to each of the users created by Tempest's dynamic
-credentials.
-This can be set using the *tempest_roles* option. It takes in a list of role
-names each of which will be assigned to each of the users created by dynamic
-credentials. This option will not have any effect when set and tempest is not
+credentials. This can be set using the ``tempest_roles`` option. It takes in a
+list of role names each of which will be assigned to each of the users created
+by dynamic credentials. This option will not have any effect when Tempest is not
configured to use dynamic credentials.
-Locking Test Accounts (aka accounts.yaml or accounts file)
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+Pre-Provisioned Credentials (aka accounts.yaml or accounts file)
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
For a long time using dynamic credentials was the only method available if you
-wanted to enable parallel execution of tempest tests. However this was
+wanted to enable parallel execution of Tempest tests. However, this was
insufficient for certain use cases because of the admin credentials requirement
to create the credential sets on demand. To get around that the accounts.yaml
file was introduced and with that a new internal credential provider to enable
@@ -109,17 +95,21 @@
To enable and use locking test accounts you need do a few things:
- #. Create a accounts.yaml file which contains the set of pre-existing
+ #. Create an accounts.yaml file which contains the set of pre-existing
credentials to use for testing. To make sure you don't have a credentials
- starvation issue when running in parallel make sure you have at least 2
- times the number of worker processes you are using to execute tempest
- available in the file. (if running serially the worker count is 1)
+ starvation issue when running in parallel make sure you have at least two
+ times the number of worker processes you are using to execute Tempest
+ available in the file. (If running serially the worker count is 1.)
- You can check the sample file packaged in tempest for the yaml format
- #. Provide tempest with the location of your accounts.yaml file with the
- test_accounts_file option in the auth section
+ You can check the accounts.yaml.sample file packaged in Tempest for the yaml
+ format.
+ #. Provide Tempest with the location of your accounts.yaml file with the
+ ``test_accounts_file`` option in the ``auth`` section
- #. Set use_dynamic_credentials = False in the auth group
+ *NOTE: Be sure to use a full path for the file; otherwise Tempest will
+ likely not find it.*
+
+ #. Set ``use_dynamic_credentials = False`` in the ``auth`` group
It is worth pointing out that each set of credentials in the accounts.yaml
should have a unique tenant. This is required to provide proper isolation
@@ -127,40 +117,40 @@
unexpected failures in some tests.
-Legacy test accounts (aka credentials config options)
-"""""""""""""""""""""""""""""""""""""""""""""""""""""
-**Starting in the Liberty release this mechanism was deprecated and will be
-removed in a future release**
+Legacy Credentials (aka credentials config options)
+"""""""""""""""""""""""""""""""""""""""""""""""""""
+**Starting in the Liberty release this mechanism was deprecated; it will be
+removed in a future release.**
When Tempest was refactored to allow for locking test accounts, the original
non-tenant isolated case was converted to internally work similarly to the
accounts.yaml file. This mechanism was then called the legacy test accounts
provider. To use the legacy test accounts provider you can specify the sets of
-credentials in the configuration file like detailed above with following 9
-options in the identity section:
+credentials in the configuration file as detailed above with following nine
+options in the ``identity`` section:
- #. username
- #. password
- #. tenant_name
- #. admin_username
- #. admin_password
- #. admin_tenant_name
- #. alt_username
- #. alt_password
- #. alt_tenant_name
+ #. ``username``
+ #. ``password``
+ #. ``tenant_name``
+ #. ``admin_username``
+ #. ``admin_password``
+ #. ``admin_tenant_name``
+ #. ``alt_username``
+ #. ``alt_password``
+ #. ``alt_tenant_name``
-And in the auth section:
+And in the ``auth`` section:
- #. use_dynamic_credentials = False
- #. comment out 'test_accounts_file' or keep it as empty
+ #. ``use_dynamic_credentials = False``
+ #. Comment out ``test_accounts_file`` or keep it empty.
-It only makes sense to use it if parallel execution isn't needed, since tempest
-won't be able to properly isolate tests using this. Additionally, using the
-traditional config options for credentials is not able to provide credentials to
-tests which requires specific roles on accounts. This is because the config
-options do not give sufficient flexibility to describe the roles assigned to a
-user for running the tests. There are additional limitations with regard to
-network configuration when using this credential provider mechanism, see the
+It only makes sense to use this if parallel execution isn't needed, since
+Tempest won't be able to properly isolate tests using this. Additionally, using
+the traditional config options for credentials is not able to provide
+credentials to tests requiring specific roles on accounts. This is because the
+config options do not give sufficient flexibility to describe the roles assigned
+to a user for running the tests. There are additional limitations with regard to
+network configuration when using this credential provider mechanism - see the
`Networking`_ section below.
Compute
@@ -168,63 +158,64 @@
Flavors
^^^^^^^
-For tempest to be able to create servers you need to specify flavors that it
-can use to boot the servers with. There are 2 options in the tempest config
+For Tempest to be able to create servers you need to specify flavors that it
+can use to boot the servers with. There are two options in the Tempest config
for doing this:
- #. flavor_ref
- #. flavor_ref_alt
+ #. ``flavor_ref``
+ #. ``flavor_ref_alt``
-Both of these options are in the compute section of the config file and take
-in the flavor id (not the name) from nova. The *flavor_ref* option is what will
-be used for booting almost all of the guests, *flavor_ref_alt* is only used in
-tests where 2 different sized servers are required. (for example a resize test)
+Both of these options are in the ``compute`` section of the config file and take
+in the flavor id (not the name) from nova. The ``flavor_ref`` option is what
+will be used for booting almost all of the guests; ``flavor_ref_alt`` is only
+used in tests where two different-sized servers are required (for example, a
+resize test).
-Using a smaller flavor is generally recommended, when larger flavors are used
+Using a smaller flavor is generally recommended. When larger flavors are used,
the extra time required to bring up servers will likely affect total run time
and probably require tweaking timeout values to ensure tests have ample time to
finish.
Images
^^^^^^
-Just like with flavors, tempest needs to know which images to use for booting
-servers. There are 2 options in the compute section just like with flavors:
+Just like with flavors, Tempest needs to know which images to use for booting
+servers. There are two options in the compute section just like with flavors:
- #. image_ref
- #. image_ref_alt
+ #. ``image_ref``
+ #. ``image_ref_alt``
-Both options are expecting an image id (not name) from nova. The *image_ref*
-option is what will be used for booting the majority of servers in tempest.
-*image_ref_alt* is used for tests that require 2 images such as rebuild. If 2
-images are not available you can set both options to the same image_ref and
+Both options are expecting an image id (not name) from nova. The ``image_ref``
+option is what will be used for booting the majority of servers in Tempest.
+``image_ref_alt`` is used for tests that require two images such as rebuild. If
+two images are not available you can set both options to the same image id and
those tests will be skipped.
-There are also options in the scenario section for images:
+There are also options in the ``scenario`` section for images:
- #. img_file
- #. img_dir
- #. aki_img_file
- #. ari_img_file
- #. ami_img_file
- #. img_container_format
- #. img_disk_format
+ #. ``img_file``
+ #. ``img_dir``
+ #. ``aki_img_file``
+ #. ``ari_img_file``
+ #. ``ami_img_file``
+ #. ``img_container_format``
+ #. ``img_disk_format``
-however unlike the other image options these are used for a very small subset
+However, unlike the other image options, these are used for a very small subset
of scenario tests which are uploading an image. These options are used to tell
-tempest where an image file is located and describe it's metadata for when it's
+Tempest where an image file is located and describe its metadata for when it is
uploaded.
-The behavior of these options is a bit convoluted (which will likely be fixed
-in future versions). You first need to specify *img_dir*, which is the directory
-tempest will look for the image files in. First it will check if the filename
-set for *img_file* could be found in *img_dir*. If it is found then the
-*img_container_format* and *img_disk_format* options are used to upload that
-image to glance. However if it's not found tempest will look for the 3 uec image
-file name options as a fallback. If neither is found the tests requiring an
-image to upload will fail.
+The behavior of these options is a bit convoluted (which will likely be fixed in
+future versions). You first need to specify ``img_dir``, which is the directory
+in which Tempest will look for the image files. First it will check if the
+filename set for ``img_file`` could be found in ``img_dir``. If it is found then
+the ``img_container_format`` and ``img_disk_format`` options are used to upload
+that image to glance. However, if it is not found, Tempest will look for the
+three uec image file name options as a fallback. If neither is found, the tests
+requiring an image to upload will fail.
It is worth pointing out that using `cirros`_ is a very good choice for running
-tempest. It's what is used for upstream testing, they boot quickly and have a
+Tempest. It's what is used for upstream testing, they boot quickly and have a
small footprint.
.. _cirros: https://launchpad.net/cirros
@@ -232,9 +223,9 @@
Networking
----------
OpenStack has a myriad of different networking configurations possible and
-depending on which of the 2 network backends, nova-network or neutron, you are
+depending on which of the two network backends, nova-network or neutron, you are
using things can vary drastically. Due to this complexity Tempest has to provide
-a certain level of flexibility in it's configuration to ensure it will work
+a certain level of flexibility in its configuration to ensure it will work
against any cloud. This ends up causing a large number of permutations in
Tempest's config around network configuration.
@@ -246,7 +237,7 @@
for doing this can be different. In certain configurations it is required to
specify a single network with server create calls. Accordingly, Tempest provides
a few different methods for providing this information in configuration to try
-and ensure that regardless of the clouds configuration it'll still be able to
+and ensure that regardless of the cloud's configuration it'll still be able to
run. This section covers the different methods of configuring Tempest to provide
a network when creating servers.
@@ -255,17 +246,17 @@
This is the simplest method of specifying how networks should be used. You can
just specify a single network name/label to use for all server creations. The
limitation with this is that all tenants/projects and users must be able to see
-that network name/label if they were to perform a network list and be able to
-use it.
+that network name/label if they are to perform a network list and be able to use
+it.
If no network name is assigned in the config file and none of the below
alternatives are used, then Tempest will not specify a network on server
creations, which depending on the cloud configuration might prevent them from
booting.
-To set a fixed network name simply do:
+To set a fixed network name simply:
- #. Set the fixed_network_name option in the compute group
+ #. Set the ``fixed_network_name`` option in the ``compute`` group
In the case that the configured fixed network name can not be found by a user
network list call, it will be treated like one was not provided except that a
@@ -279,8 +270,8 @@
server creations on a per tenant/project and user pair basis. This provides
the necessary flexibility to work with more intricate networking configurations
by enabling the user to specify exactly which network to use for which
-tenants/projects. You can refer to the accounts.yaml sample file included in
-the tempest repo for the syntax around specifying networks in the file.
+tenants/projects. You can refer to the accounts.yaml.sample file included in
+the Tempest repo for the syntax around specifying networks in the file.
However, specifying a network is not required when using an accounts file. If
one is not specified you can use a fixed network name to specify the network to
@@ -299,29 +290,29 @@
With Dynamic Credentials
""""""""""""""""""""""""
-With dynamic credentials enabled and using nova-network then nothing changes.
-Your only option for configuration is to either set a fixed network name or not.
-However, in most cases it shouldn't matter because nova-network should have no
-problem booting a server with multiple networks. If this is not the case for
-your cloud then using an accounts file is recommended because it provides the
-necessary flexibility to describe your configuration. Dynamic credentials is not
-able to dynamically allocate things as necessary if neutron is not enabled.
+With dynamic credentials enabled and using nova-network, your only option for
+configuration is to either set a fixed network name or not. However, in most
+cases it shouldn't matter because nova-network should have no problem booting a
+server with multiple networks. If this is not the case for your cloud then using
+an accounts file is recommended because it provides the necessary flexibility to
+describe your configuration. Dynamic credentials is not able to dynamically
+allocate things as necessary if neutron is not enabled.
With neutron and dynamic credentials enabled there should not be any additional
configuration necessary to enable Tempest to create servers with working
-networking, assuming you have properly configured the network section to work
-for your cloud. Tempest will dynamically create the neutron resources necessary
-to enable using servers with that network. Also, just as with the accounts
-file, if you specify a fixed network name while using neutron and dynamic
-credentials it will enable running tests which require a static network and it
-will additionally be used as a fallback for server creation. However, unlike
-accounts.yaml this should never be triggered.
+networking, assuming you have properly configured the ``network`` section to
+work for your cloud. Tempest will dynamically create the neutron resources
+necessary to enable using servers with that network. Also, just as with the
+accounts file, if you specify a fixed network name while using neutron and
+dynamic credentials it will enable running tests which require a static network
+and it will additionally be used as a fallback for server creation. However,
+unlike accounts.yaml this should never be triggered.
-However, there is an option *create_isolated_networks* to disable dynamic
-credentials's automatic provisioning of network resources. If this option is
-used you will have to either rely on there only being a single/default network
-available for the server creation, or use *fixed_network_name* to inform
-Tempest which network to use.
+However, there is an option ``create_isolated_networks`` to disable dynamic
+credentials's automatic provisioning of network resources. If this option is set
+to False you will have to either rely on there only being a single/default
+network available for the server creation, or use ``fixed_network_name`` to
+inform Tempest which network to use.
Configuring Available Services
------------------------------
@@ -332,7 +323,7 @@
out which tests it is able to run and certain setup steps which differ based
on the available services.
-The *service_available* section of the config file is used to set which
+The ``service_available`` section of the config file is used to set which
services are available. It contains a boolean option for each service (except
for keystone which is a hard requirement) set it to True if the service is
available or False if it is not.
@@ -341,56 +332,56 @@
^^^^^^^^^^^^^^^
Each project which has its own REST API contains an entry in the service
catalog. Like most things in OpenStack this is also completely configurable.
-However, for tempest to be able to figure out the endpoints to send REST API
-calls for each service to it needs to know how that project is defined in the
-service catalog. There are 3 options for each service section to accomplish
+However, for Tempest to be able to figure out which endpoints should get REST
+API calls for each service, it needs to know how that project is defined in the
+service catalog. There are three options for each service section to accomplish
this:
- #. catalog_type
- #. endpoint_type
- #. region
+ #. ``catalog_type``
+ #. ``endpoint_type``
+ #. ``region``
-Setting *catalog_type* and *endpoint_type* should normally give Tempest enough
-information to determine which endpoint it should pull from the service
-catalog to use for talking to that particular service. However, if you're cloud
-has multiple regions available and you need to specify a particular one to use
-a service you can set the *region* option in that service's section.
+Setting ``catalog_type`` and ``endpoint_type`` should normally give Tempest
+enough information to determine which endpoint it should pull from the service
+catalog to use for talking to that particular service. However, if your cloud
+has multiple regions available and you need to specify a particular one to use a
+service you can set the ``region`` option in that service's section.
It should also be noted that the default values for these options are set
-to what devstack uses. (which is a de facto standard for service catalog
-entries) So often nothing actually needs to be set on these options to enable
+to what devstack uses (which is a de facto standard for service catalog
+entries). So often nothing actually needs to be set on these options to enable
communication to a particular service. It is only if you are either not using
-the same *catalog_type* as devstack or you want Tempest to talk to a different
+the same ``catalog_type`` as devstack or you want Tempest to talk to a different
endpoint type instead of publicURL for a service that these need to be changed.
.. note::
- Tempest does not serve all kind of fancy URLs in the service catalog.
- Service catalog should be in a standard format (which is going to be
- standardized at keystone level).
- Tempest expects URLs in the Service catalog in below format:
- * http://example.com:1234/<version-info>
+ Tempest does not serve all kinds of fancy URLs in the service catalog. The
+ service catalog should be in a standard format (which is going to be
+ standardized at the keystone level).
+ Tempest expects URLs in the Service catalog in the following format:
+ * ``http://example.com:1234/<version-info>``
Examples:
- * Good - http://example.com:1234/v2.0
- * Wouldn’t work - http://example.com:1234/xyz/v2.0/
+ * Good - ``http://example.com:1234/v2.0``
+ * Wouldn’t work - ``http://example.com:1234/xyz/v2.0/``
(adding prefix/suffix around version etc)
-Service feature configuration
+Service Feature Configuration
-----------------------------
-OpenStack provides its deployers a myriad of different configuration options
-to enable anyone deploying it to create a cloud tailor-made for any individual
-use case. It provides options for several different backend type, databases,
+OpenStack provides its deployers a myriad of different configuration options to
+enable anyone deploying it to create a cloud tailor-made for any individual use
+case. It provides options for several different backend types, databases,
message queues, etc. However, the downside to this configurability is that
certain operations and features aren't supported depending on the configuration.
These features may or may not be discoverable from the API so the burden is
-often on the user to figure out what the cloud they're talking to supports.
-Besides the obvious interoperability issues with this it also leaves Tempest
-in an interesting situation trying to figure out which tests are expected to
-work. However, Tempest tests do not rely on dynamic api discovery for a feature
-(assuming one exists). Instead Tempest has to be explicitly configured as to
-which optional features are enabled. This is in order to prevent bugs in the
-discovery mechanisms from masking failures.
+often on the user to figure out what is supported by the cloud they're talking
+to. Besides the obvious interoperability issues with this it also leaves
+Tempest in an interesting situation trying to figure out which tests are
+expected to work. However, Tempest tests do not rely on dynamic API discovery
+for a feature (assuming one exists). Instead Tempest has to be explicitly
+configured as to which optional features are enabled. This is in order to
+prevent bugs in the discovery mechanisms from masking failures.
The service feature-enabled config sections are how Tempest addresses the
optional feature question. Each service that has tests for optional features
@@ -402,10 +393,10 @@
API Extensions
^^^^^^^^^^^^^^
-The service feature-enabled sections often contain an *api-extensions* option
-(or in the case of swift a *discoverable_apis* option) this is used to tell
-tempest which api extensions (or configurable middleware) is used in your
-deployment. It has 2 valid config states, either it contains a single value
-"all" (which is the default) which means that every api extension is assumed
+The service feature-enabled sections often contain an ``api-extensions`` option
+(or in the case of swift a ``discoverable_apis`` option). This is used to tell
+Tempest which api extensions (or configurable middleware) is used in your
+deployment. It has two valid config states: either it contains a single value
+``all`` (which is the default) which means that every api extension is assumed
to be enabled, or it is set to a list of each individual extension that is
enabled for that service.
diff --git a/doc/source/field_guide/thirdparty.rst b/doc/source/field_guide/thirdparty.rst
deleted file mode 120000
index 3fd6a51..0000000
--- a/doc/source/field_guide/thirdparty.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/thirdparty/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fe6074f..32e6e51 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,6 @@
field_guide/api
field_guide/scenario
field_guide/stress
- field_guide/thirdparty
field_guide/unit_tests
---------------------------
diff --git a/requirements.txt b/requirements.txt
index d470c30..43f8ff7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,27 +1,27 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.6
+pbr>=1.6 # Apache-2.0
cliff>=1.15.0 # Apache-2.0
-anyjson>=0.3.3
-httplib2>=0.7.5
-jsonschema!=2.5.0,<3.0.0,>=2.0.0
-testtools>=1.4.0
-boto>=2.32.1
-paramiko>=1.13.0
-netaddr!=0.7.16,>=0.7.12
-testrepository>=0.0.18
-pyOpenSSL>=0.14
+anyjson>=0.3.3 # BSD
+httplib2>=0.7.5 # MIT
+jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
+testtools>=1.4.0 # MIT
+paramiko>=1.13.0 # LGPL
+netaddr!=0.7.16,>=0.7.12 # BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+pyOpenSSL>=0.14 # Apache-2.0
oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=2.7.0 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.12.0 # Apache-2.0
+oslo.config>=3.4.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
+oslo.log>=1.14.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
-oslo.utils>=2.8.0 # Apache-2.0
-six>=1.9.0
-iso8601>=0.1.9
-fixtures>=1.3.1
-testscenarios>=0.4
-tempest-lib>=0.11.0
-PyYAML>=3.1.0
+oslo.utils>=3.4.0 # Apache-2.0
+six>=1.9.0 # MIT
+iso8601>=0.1.9 # MIT
+fixtures>=1.3.1 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+tempest-lib>=0.13.0 # Apache-2.0
+PyYAML>=3.1.0 # MIT
stevedore>=1.5.0 # Apache-2.0
+PrettyTable<0.8,>=0.7 # BSD
diff --git a/run_tempest.sh b/run_tempest.sh
index a704684..8c8f25f 100755
--- a/run_tempest.sh
+++ b/run_tempest.sh
@@ -14,8 +14,6 @@
echo " -C, --config Config file location"
echo " -h, --help Print this usage message"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use PDB"
- echo " -l, --logging Enable logging"
- echo " -L, --logging-config Logging config file location. Default is etc/logging.conf"
echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
@@ -31,10 +29,8 @@
wrapper=""
config_file=""
update=0
-logging=0
-logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfusthdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,help,debug,config: -- "$@")
then
# parse error
usage
@@ -55,8 +51,6 @@
-C|--config) config_file=$2; shift;;
-s|--smoke) testrargs+="smoke";;
-t|--serial) serial=1;;
- -l|--logging) logging=1;;
- -L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
*) testrargs="$testrargs $1";;
esac
@@ -69,16 +63,6 @@
export TEMPEST_CONFIG=`basename "$config_file"`
fi
-if [ $logging -eq 1 ]; then
- if [ ! -f "$logging_config" ]; then
- echo "No such logging config file: $logging_config"
- exit 1
- fi
- logging_config=`readlink -f "$logging_config"`
- export TEMPEST_LOG_CONFIG_DIR=`dirname "$logging_config"`
- export TEMPEST_LOG_CONFIG=`basename "$logging_config"`
-fi
-
cd `dirname "$0"`
if [ $no_site_packages -eq 1 ]; then
diff --git a/setup.cfg b/setup.cfg
index 4415063..cc3a365 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,9 +33,12 @@
tempest-account-generator = tempest.cmd.account_generator:main
tempest = tempest.cmd.main:main
tempest.cm =
+ account-generator = tempest.cmd.account_generator:TempestAccountGenerator
init = tempest.cmd.init:TempestInit
cleanup = tempest.cmd.cleanup:TempestCleanup
run-stress = tempest.cmd.run_stress:TempestRunStress
+ list-plugins = tempest.cmd.list_plugins:TempestListPlugins
+ verify-config = tempest.cmd.verify_tempest_config:TempestVerifyConfig
oslo.config.opts =
tempest.config = tempest.config:list_opts
diff --git a/tempest/README.rst b/tempest/README.rst
index f93a173..113b191 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -16,7 +16,6 @@
| api/ - API tests
| scenario/ - complex scenario tests
| stress/ - stress tests
-| thirdparty/ - 3rd party api tests
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
@@ -56,14 +55,6 @@
several test jobs in parallel and can run any existing test in Tempest as a
stress job.
-:ref:`third_party_field_guide`
-------------------------------
-
-Many openstack components include 3rdparty API support. It is
-completely legitimate for Tempest to include tests of 3rdparty APIs,
-but those should be kept separate from the normal OpenStack
-validation.
-
:ref:`unit_tests_field_guide`
-----------------------------
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index d2b3a81..9f7bbae 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -53,7 +53,7 @@
if rand_key in kwargs:
# NOTE: The rand_name is for avoiding agent conflicts.
# If you try to create an agent with the same hypervisor,
- # os and architecture as an exising agent, Nova will return
+ # os and architecture as an existing agent, Nova will return
# an HTTPConflict or HTTPServerError.
kwargs[rand_key] = data_utils.rand_name(kwargs[rand_key])
return kwargs
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index ddd9aa0..1d83fec 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -145,7 +145,7 @@
@test.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
def test_aggregate_add_remove_host(self):
- # Add an host to the given aggregate and remove.
+ # Add a host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -168,7 +168,7 @@
@test.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
def test_aggregate_add_host_list(self):
- # Add an host to the given aggregate and list.
+ # Add a host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -188,7 +188,7 @@
@test.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
def test_aggregate_add_host_get_details(self):
- # Add an host to the given aggregate and get details.
+ # Add a host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
@@ -205,7 +205,7 @@
@test.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
def test_aggregate_add_host_create_server_with_az(self):
- # Add an host to the given aggregate and create a server.
+ # Add a host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index fe05ddb..456363c 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -38,7 +38,7 @@
@classmethod
def resource_setup(cls):
super(FloatingIPsBulkAdminTestJSON, cls).resource_setup()
- cls.ip_range = CONF.compute.floating_ip_range
+ cls.ip_range = CONF.validation.floating_ip_range
cls.verify_unallocated_floating_ip_range(cls.ip_range)
@classmethod
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 7c4c30c..653a3cd 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -79,11 +79,6 @@
def _get_server_status(self, server_id):
return self._get_server_details(server_id)['status']
- def _create_server(self, volume_backed=False):
- server = self.create_test_server(wait_until="ACTIVE",
- volume_backed=volume_backed)
- return server['id']
-
def _volume_clean_up(self, server_id, volume_id):
body = self.volumes_client.show_volume(volume_id)['volume']
if body['status'] == 'in-use':
@@ -103,7 +98,8 @@
volume_backed, *block* migration is not used.
"""
# Live migrate an instance to another host
- server_id = self._create_server(volume_backed=volume_backed)
+ server_id = self.create_test_server(wait_until="ACTIVE",
+ volume_backed=volume_backed)['id']
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
@@ -153,7 +149,7 @@
block_migrate_cinder_iscsi,
'Block Live migration not configured for iSCSI')
def test_iscsi_volume(self):
- server_id = self._create_server()
+ server_id = self.create_test_server(wait_until="ACTIVE")['id']
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index e31129b..1494745 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
@@ -38,9 +36,6 @@
self.client.delete_security_group(securitygroup_id)
@test.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
- @testtools.skipIf(CONF.service_available.neutron,
- "Skipped because neutron does not support all_tenants "
- "search filter.")
@test.services('network')
def test_list_security_groups_list_all_tenants_filter(self):
# Admin can list security groups of all tenants
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index fd6f105..49c7318 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -15,6 +15,7 @@
from tempest_lib import decorators
from tempest.api.compute import base
+from tempest.common import compute
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
@@ -104,16 +105,14 @@
def test_list_servers_filter_by_exist_host(self):
# Filter the list of servers by existent host
name = data_utils.rand_name('server')
- flavor = self.flavor_ref
- image_id = self.image_ref
network = self.get_tenant_network()
network_kwargs = fixed_network.set_networks_kwarg(network)
- test_server = self.client.create_server(name=name, imageRef=image_id,
- flavorRef=flavor,
- **network_kwargs)['server']
+ # We need to create the server as an admin, so we can't use
+ # self.create_test_server() here as this method creates the server
+ # in the "primary" (i.e non-admin) tenant.
+ test_server, _ = compute.create_test_server(
+ self.os_adm, wait_until="ACTIVE", name=name, **network_kwargs)
self.addCleanup(self.client.delete_server, test_server['id'])
- waiters.wait_for_server_status(self.client,
- test_server['id'], 'ACTIVE')
server = self.client.show_server(test_server['id'])['server']
self.assertEqual(server['status'], 'ACTIVE')
hostname = server[self._host_key]
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 055dc1b..23b8a6c 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -155,7 +155,7 @@
self.client.suspend_server(server_id)
waiters.wait_for_server_status(self.client,
server_id, 'SUSPENDED')
- # migrate an suspended server should fail
+ # migrate a suspended server should fail
self.assertRaises(lib_exc.Conflict,
self.client.migrate_server,
server_id)
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
new file mode 100644
index 0000000..814a876
--- /dev/null
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -0,0 +1,68 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(ServersOnMultiNodesTest, cls).skip_checks()
+
+ if CONF.compute.min_compute_nodes < 2:
+ raise cls.skipException(
+ "Less than 2 compute nodes, skipping multi-nodes test.")
+
+ def _get_host(self, server_id):
+ return self.os_adm.servers_client.show_server(
+ server_id)['server']['OS-EXT-SRV-ATTR:host']
+
+ @test.idempotent_id('26a9d5df-6890-45f2-abc4-a659290cb130')
+ def test_create_servers_on_same_host(self):
+ server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+ hints = {'same_host': server01}
+ server02 = self.create_test_server(scheduler_hints=hints,
+ wait_until='ACTIVE')['id']
+ host01 = self._get_host(server01)
+ host02 = self._get_host(server02)
+ self.assertEqual(host01, host02)
+
+ @test.idempotent_id('cc7ca884-6e3e-42a3-a92f-c522fcf25e8e')
+ def test_create_servers_on_different_hosts(self):
+ server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+ hints = {'different_host': server01}
+ server02 = self.create_test_server(scheduler_hints=hints,
+ wait_until='ACTIVE')['id']
+ host01 = self._get_host(server01)
+ host02 = self._get_host(server02)
+ self.assertNotEqual(host01, host02)
+
+ @test.idempotent_id('7869cc84-d661-4e14-9f00-c18cdc89cf57')
+ def test_create_servers_on_different_hosts_with_list_of_servers(self):
+ server01 = self.create_test_server(wait_until='ACTIVE')['id']
+
+ # This scheduler-hint supports list of servers also.
+ hints = {'different_host': [server01]}
+ server02 = self.create_test_server(scheduler_hints=hints,
+ wait_until='ACTIVE')['id']
+ host01 = self._get_host(server01)
+ host02 = self._get_host(server02)
+ self.assertNotEqual(host01, host02)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2321b4e..6d19ca7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -55,6 +55,13 @@
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
+ cls.request_microversion = (
+ api_version_utils.select_request_microversion(
+ cls.min_microversion,
+ CONF.compute_feature_enabled.min_microversion))
+ if cls.request_microversion:
+ cls.services_microversion = {
+ CONF.compute.catalog_type: cls.request_microversion}
super(BaseV2ComputeTest, cls).setup_credentials()
@classmethod
@@ -63,12 +70,13 @@
cls.servers_client = cls.os.servers_client
cls.server_groups_client = cls.os.server_groups_client
cls.flavors_client = cls.os.flavors_client
- cls.images_client = cls.os.images_client
+ cls.compute_images_client = cls.os.compute_images_client
cls.extensions_client = cls.os.extensions_client
cls.floating_ip_pools_client = cls.os.floating_ip_pools_client
cls.floating_ips_client = cls.os.compute_floating_ips_client
cls.keypairs_client = cls.os.keypairs_client
- cls.security_group_rules_client = cls.os.security_group_rules_client
+ cls.security_group_rules_client = (
+ cls.os.compute_security_group_rules_client)
cls.security_groups_client = cls.os.compute_security_groups_client
cls.quotas_client = cls.os.quotas_client
cls.quota_classes_client = cls.os.quota_classes_client
@@ -101,13 +109,13 @@
super(BaseV2ComputeTest, cls).resource_setup()
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
- cls.ssh_user = CONF.compute.ssh_user
cls.image_ref = CONF.compute.image_ref
cls.image_ref_alt = CONF.compute.image_ref_alt
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
- cls.image_ssh_user = CONF.compute.image_ssh_user
- cls.image_ssh_password = CONF.compute.image_ssh_password
+ cls.ssh_user = CONF.validation.image_ssh_user
+ cls.image_ssh_user = CONF.validation.image_ssh_user
+ cls.image_ssh_password = CONF.validation.image_ssh_password
cls.servers = []
cls.images = []
cls.security_groups = []
@@ -170,7 +178,7 @@
LOG.debug('Clearing images: %s', ','.join(cls.images))
for image_id in cls.images:
try:
- cls.images_client.delete_image(image_id)
+ cls.compute_images_client.delete_image(image_id)
except lib_exc.NotFound:
# The image may have already been deleted which is OK.
pass
@@ -278,8 +286,8 @@
# into the delete_volume method as a convenience to the caller.
volumes_client.wait_for_resource_deletion(volume_id)
except lib_exc.NotFound:
- LOG.warn("Unable to delete volume '%s' since it was not found. "
- "Maybe it was already deleted?" % volume_id)
+ LOG.warning("Unable to delete volume '%s' since it was not found. "
+ "Maybe it was already deleted?" % volume_id)
@classmethod
def prepare_instance_network(cls):
@@ -295,14 +303,14 @@
if 'name' in kwargs:
name = kwargs.pop('name')
- image = cls.images_client.create_image(server_id, name=name)
+ image = cls.compute_images_client.create_image(server_id, name=name)
image_id = data_utils.parse_image_id(image.response['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
- waiters.wait_for_image_status(cls.images_client,
+ waiters.wait_for_image_status(cls.compute_images_client,
image_id, kwargs['wait_until'])
- image = cls.images_client.show_image(image_id)['image']
+ image = cls.compute_images_client.show_image(image_id)['image']
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
@@ -321,11 +329,12 @@
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
+ cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
wait_until='ACTIVE',
+ adminPass=cls.password,
**kwargs)
- cls.password = server['adminPass']
return server['id']
@classmethod
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 975b850..0724566 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -37,7 +37,7 @@
def setup_clients(cls):
super(ImagesMetadataTestJSON, cls).setup_clients()
cls.glance_client = cls.os.image_client
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_image_metadata_negative.py b/tempest/api/compute/images/test_image_metadata_negative.py
index 0f02166..85d137b 100644
--- a/tempest/api/compute/images/test_image_metadata_negative.py
+++ b/tempest/api/compute/images/test_image_metadata_negative.py
@@ -25,7 +25,7 @@
@classmethod
def setup_clients(cls):
super(ImagesMetadataTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@test.attr(type=['negative'])
@test.idempotent_id('94069db2-792f-4fa8-8bd3-2271a6e0c095')
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index dc62620..150e8af 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -37,7 +37,7 @@
@classmethod
def setup_clients(cls):
super(ImagesTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
cls.servers_client = cls.servers_client
@test.idempotent_id('aa06b52b-2db5-4807-b218-9441f75d74e3')
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 9197adf..8f6ede9 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -39,7 +39,7 @@
@classmethod
def setup_clients(cls):
super(ImagesNegativeTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
cls.servers_client = cls.servers_client
@test.attr(type=['negative'])
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 37c2bb6..7b978ab 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -62,7 +62,7 @@
@classmethod
def setup_clients(cls):
super(ImagesOneServerTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 9ea62fb..2fc9ef8 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -76,7 +76,7 @@
@classmethod
def setup_clients(cls):
super(ImagesOneServerNegativeTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 9f3ba71..af840cc 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -15,7 +15,6 @@
import time
-from oslo_log import log as logging
import six
import testtools
@@ -27,8 +26,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
@@ -42,7 +39,7 @@
@classmethod
def setup_clients(cls):
super(ListImageFiltersTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
cls.glance_client = cls.os.image_client
@classmethod
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index 82062bd..34d26e2 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -34,7 +34,7 @@
@classmethod
def setup_clients(cls):
super(ListImageFiltersNegativeTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@test.attr(type=['negative'])
@test.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index 6ca15d6..ae3667d 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -32,7 +32,7 @@
@classmethod
def setup_clients(cls):
super(ListImagesTestJSON, cls).setup_clients()
- cls.client = cls.images_client
+ cls.client = cls.compute_images_client
@test.idempotent_id('490d0898-e12a-463f-aef0-c50156b9f789')
def test_get_image(self):
diff --git a/tempest/api/compute/keypairs/base.py b/tempest/api/compute/keypairs/base.py
index 15f231b..ebfb724 100644
--- a/tempest/api/compute/keypairs/base.py
+++ b/tempest/api/compute/keypairs/base.py
@@ -27,10 +27,12 @@
def _delete_keypair(self, keypair_name):
self.client.delete_keypair(keypair_name)
- def _create_keypair(self, keypair_name, pub_key=None):
+ def _create_keypair(self, keypair_name, pub_key=None, keypair_type=None):
kwargs = {'name': keypair_name}
if pub_key:
kwargs.update({'public_key': pub_key})
+ if keypair_type:
+ kwargs.update({'type': keypair_type})
body = self.client.create_keypair(**kwargs)['keypair']
self.addCleanup(self._delete_keypair, keypair_name)
return body
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index d10bf14..be6f615 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -19,6 +19,8 @@
class KeyPairsV2TestJSON(base.BaseKeypairTest):
+ max_microversion = '2.1'
+
@test.idempotent_id('1d1dbedb-d7a0-432a-9d09-83f543c3c19b')
def test_keypairs_create_list_delete(self):
# Keypairs created should be available in the response list
diff --git a/tempest/api/compute/keypairs/test_keypairs_v22.py b/tempest/api/compute/keypairs/test_keypairs_v22.py
new file mode 100644
index 0000000..997ef9b
--- /dev/null
+++ b/tempest/api/compute/keypairs/test_keypairs_v22.py
@@ -0,0 +1,51 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute.keypairs import test_keypairs
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class KeyPairsV22TestJSON(test_keypairs.KeyPairsV2TestJSON):
+ min_microversion = '2.2'
+ max_microversion = 'latest'
+
+ def _check_keypair_type(self, keypair, keypair_type):
+ if keypair_type is None:
+ keypair_type = 'ssh'
+ self.assertEqual(keypair_type, keypair['type'])
+
+ def _test_keypairs_create_list_show(self, keypair_type=None):
+ k_name = data_utils.rand_name('keypair')
+ keypair = self._create_keypair(k_name, keypair_type=keypair_type)
+ # Verify whether 'type' is present in keypair create response of
+ # version 2.2 and it is with default value 'ssh'.
+ self._check_keypair_type(keypair, keypair_type)
+ keypair_detail = self.client.show_keypair(k_name)['keypair']
+ self._check_keypair_type(keypair_detail, keypair_type)
+ fetched_list = self.client.list_keypairs()['keypairs']
+ for keypair in fetched_list:
+ # Verify whether 'type' is present in keypair list response of
+ # version 2.2 and it is with default value 'ssh'.
+ if keypair['keypair']['name'] == k_name:
+ self._check_keypair_type(keypair['keypair'], keypair_type)
+
+ @test.idempotent_id('8726fa85-7f98-4b20-af9e-f710a4f3391c')
+ def test_keypairsv22_create_list_show(self):
+ self._test_keypairs_create_list_show()
+
+ @test.idempotent_id('89d59d43-f735-441a-abcf-0601727f47b6')
+ def test_keypairsv22_create_list_show_with_type(self):
+ keypair_type = 'x509'
+ self._test_keypairs_create_list_show(keypair_type=keypair_type)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 9aa59f7..38c294b 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -158,7 +158,9 @@
to_port=to_port2)['security_group_rule']
rule2_id = rule['id']
# Delete the Security Group rule2 at the end of this method
- self.addCleanup(self.client.delete_security_group_rule, rule2_id)
+ self.addCleanup(
+ self.security_group_rules_client.delete_security_group_rule,
+ rule2_id)
# Get rules of the created Security Group
rules = self.security_groups_client.show_security_group(
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 24d503f..a6ccdd3 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -47,7 +47,7 @@
cls.client = cls.os.interfaces_client
def wait_for_interface_status(self, server, port_id, status):
- """Waits for a interface to reach a given status."""
+ """Waits for an interface to reach a given status."""
body = (self.client.show_interface(server, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index f51c2db..f719bfc 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -50,6 +50,7 @@
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
+ cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
@@ -58,8 +59,8 @@
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
- disk_config=disk_config)
- cls.password = cls.server_initial['adminPass']
+ disk_config=disk_config,
+ adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
@@ -262,13 +263,16 @@
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
+ flavor_base = self.flavors_client.show_flavor(
+ self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
- ram = 64
- vcpus = 1
- disk = 0
+
+ ram = flavor_base['ram']
+ vcpus = flavor_base['vcpus']
+ disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
@@ -284,9 +288,9 @@
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
- ram = 64
- vcpus = 1
- disk = 0
+ ram = flavor_base['ram']
+ vcpus = flavor_base['vcpus']
+ disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 3acff98..d1ec064 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -43,7 +43,7 @@
super(ListServerFiltersTestJSON, cls).resource_setup()
# Check to see if the alternate image ref actually exists...
- images_client = cls.images_client
+ images_client = cls.compute_images_client
images = images_client.list_images()['images']
if cls.image_ref != cls.image_ref_alt and \
@@ -56,13 +56,13 @@
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
- cls.images_client.show_image(cls.image_ref)
+ cls.compute_images_client.show_image(cls.image_ref)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
- cls.images_client.show_image(cls.image_ref_alt)
+ cls.compute_images_client.show_image(cls.image_ref_alt)
except lib_exc.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 71dfd96..66e85a6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -172,11 +172,16 @@
self.assertEqual(new_name, server['name'])
if CONF.validation.run_validation:
- # TODO(jlanoux) add authentication with the provided password
+ # Authentication is attempted in the following order of priority:
+ # 1.The key passed in, if one was passed in.
+ # 2.Any key we can find through an SSH agent (if allowed).
+ # 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
+ # ~/.ssh/ (if allowed).
+ # 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
self.get_server_ip(rebuilt_server),
self.ssh_user,
- self.password,
+ password,
self.validation_resources['keypair']['private_key'])
linux_client.validate_authentication()
@@ -453,7 +458,7 @@
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
- images = self.images_client.list_images(**params)['images']
+ images = self.compute_images_client.list_images(**params)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 58d26d3..dad8e90 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
@@ -55,13 +56,16 @@
file_path = '/test.txt'
personality = [{'path': file_path,
'contents': base64.b64encode(file_contents)}]
- server = self.create_test_server(personality=personality,
- wait_until='ACTIVE',
- validatable=True)
+ password = data_utils.rand_password()
+ created_server = self.create_test_server(personality=personality,
+ adminPass=password,
+ wait_until='ACTIVE',
+ validatable=True)
+ server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
- self.ssh_user, server['adminPass'],
+ self.ssh_user, password,
self.validation_resources['keypair']['private_key'])
self.assertEqual(file_contents,
linux_client.exec_command(
@@ -116,13 +120,16 @@
'path': path,
'contents': base64.b64encode(file_contents),
})
- server = self.create_test_server(personality=person,
- wait_until='ACTIVE',
- validatable=True)
+ password = data_utils.rand_password()
+ created_server = self.create_test_server(personality=person,
+ adminPass=password,
+ wait_until='ACTIVE',
+ validatable=True)
+ server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
- self.ssh_user, server['adminPass'],
+ self.ssh_user, password,
self.validation_resources['keypair']['private_key'])
for i in person:
self.assertEqual(base64.b64decode(i['contents']),
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 2296980..12b824f 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -52,10 +52,11 @@
name=cls.sg_name, description=cls.sg_desc)['security_group']
cls.sg_id = cls.sg['id']
+ cls.password = data_utils.rand_password()
# Server for positive tests
- server = cls.create_test_server(wait_until='BUILD')
+ server = cls.create_test_server(adminPass=cls.password,
+ wait_until='BUILD')
cls.server_id = server['id']
- cls.password = server['adminPass']
waiters.wait_for_server_status(cls.servers_client, cls.server_id,
'ACTIVE')
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 65ad2f5..5afb4d1 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -43,14 +43,15 @@
def resource_setup(cls):
super(ServerRescueNegativeTestJSON, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
-
+ cls.password = data_utils.rand_password()
+ rescue_password = data_utils.rand_password()
# Server for negative tests
- server = cls.create_test_server(wait_until='BUILD')
- resc_server = cls.create_test_server(wait_until='ACTIVE')
+ server = cls.create_test_server(adminPass=cls.password,
+ wait_until='BUILD')
+ resc_server = cls.create_test_server(adminPass=rescue_password,
+ wait_until='ACTIVE')
cls.server_id = server['id']
- cls.password = server['adminPass']
cls.rescue_id = resc_server['id']
- rescue_password = resc_server['adminPass']
cls.servers_client.rescue_server(
cls.rescue_id, adminPass=rescue_password)
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 8e2fbf1..2f79d47 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -13,11 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class ServersTestJSON(base.BaseV2ComputeTest):
@@ -31,6 +36,9 @@
super(ServersTestJSON, self).tearDown()
@test.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
+ @testtools.skipUnless(CONF.compute_feature_enabled.
+ enable_instance_password,
+ 'Instance password not available.')
def test_create_server_with_admin_password(self):
# If an admin password is provided on server creation, the server's
# root password should be set to that password.
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index ed8484e..681b5db 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -490,7 +490,7 @@
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
- images = self.images_client.list_images(**params)['images']
+ images = self.compute_images_client.list_images(**params)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index e363fc4..bf4396d 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -48,18 +48,19 @@
def setup_clients(cls):
super(AuthorizationTestJSON, cls).setup_clients()
cls.client = cls.os.servers_client
- cls.images_client = cls.os.images_client
+ cls.compute_images_client = cls.os.compute_images_client
cls.glance_client = cls.os.image_client
cls.keypairs_client = cls.os.keypairs_client
cls.security_client = cls.os.compute_security_groups_client
- cls.rule_client = cls.os.security_group_rules_client
+ cls.rule_client = cls.os.compute_security_group_rules_client
cls.alt_client = cls.alt_manager.servers_client
- cls.alt_images_client = cls.alt_manager.images_client
+ cls.alt_compute_images_client = cls.alt_manager.compute_images_client
cls.alt_keypairs_client = cls.alt_manager.keypairs_client
cls.alt_security_client = (
cls.alt_manager.compute_security_groups_client)
- cls.alt_rule_client = cls.alt_manager.security_group_rules_client
+ cls.alt_rule_client = (
+ cls.alt_manager.compute_security_group_rules_client)
@classmethod
def resource_setup(cls):
@@ -77,7 +78,7 @@
body = cls.glance_client.update_image(image_id,
data=image_file)['image']
cls.glance_client.wait_for_image_status(image_id, 'active')
- cls.image = cls.images_client.show_image(image_id)['image']
+ cls.image = cls.compute_images_client.show_image(image_id)['image']
cls.keypairname = data_utils.rand_name('keypair')
cls.keypairs_client.create_keypair(name=cls.keypairname)
@@ -98,7 +99,7 @@
@classmethod
def resource_cleanup(cls):
if hasattr(cls, 'image'):
- cls.images_client.delete_image(cls.image['id'])
+ cls.compute_images_client.delete_image(cls.image['id'])
if hasattr(cls, 'keypairname'):
cls.keypairs_client.delete_keypair(cls.keypairname)
if hasattr(cls, 'security_group'):
@@ -175,7 +176,7 @@
def test_create_image_for_alt_account_fails(self):
# A create image request for another user's server should fail
self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.create_image,
+ self.alt_compute_images_client.create_image,
self.server['id'], name='testImage')
@test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d')
@@ -261,13 +262,14 @@
def test_get_image_for_alt_account_fails(self):
# A GET request for an image on another user's account should fail
self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.show_image, self.image['id'])
+ self.alt_compute_images_client.show_image,
+ self.image['id'])
@test.idempotent_id('9facb962-f043-4a9d-b9ee-166a32dea098')
def test_delete_image_for_alt_account_fails(self):
# A DELETE request for another user's image should fail
self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.delete_image,
+ self.alt_compute_images_client.delete_image,
self.image['id'])
@test.idempotent_id('752c917e-83be-499d-a422-3559127f7d3c')
@@ -390,7 +392,7 @@
# A set metadata for another user's image should fail
req_metadata = {'meta1': 'value1', 'meta2': 'value2'}
self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.set_image_metadata,
+ self.alt_compute_images_client.set_image_metadata,
self.image['id'], req_metadata)
@test.idempotent_id('dea1936a-473d-49f2-92ad-97bb7aded22e')
@@ -408,13 +410,14 @@
def test_get_metadata_of_alt_account_image_fails(self):
# A get metadata for another user's image should fail
req_metadata = {'meta1': 'value1'}
- self.addCleanup(self.images_client.delete_image_metadata_item,
+ self.addCleanup(self.compute_images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
- self.images_client.set_image_metadata(self.image['id'],
- req_metadata)
- self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.show_image_metadata_item,
- self.image['id'], 'meta1')
+ self.compute_images_client.set_image_metadata(self.image['id'],
+ req_metadata)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.alt_compute_images_client.show_image_metadata_item,
+ self.image['id'], 'meta1')
@test.idempotent_id('79531e2e-e721-493c-8b30-a35db36fdaa6')
def test_delete_metadata_of_alt_account_server_fails(self):
@@ -431,13 +434,14 @@
def test_delete_metadata_of_alt_account_image_fails(self):
# A delete metadata for another user's image should fail
req_metadata = {'meta1': 'data1'}
- self.addCleanup(self.images_client.delete_image_metadata_item,
+ self.addCleanup(self.compute_images_client.delete_image_metadata_item,
self.image['id'], 'meta1')
- self.images_client.set_image_metadata(self.image['id'],
- req_metadata)
- self.assertRaises(lib_exc.NotFound,
- self.alt_images_client.delete_image_metadata_item,
- self.image['id'], 'meta1')
+ self.compute_images_client.set_image_metadata(self.image['id'],
+ req_metadata)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.alt_compute_images_client.delete_image_metadata_item,
+ self.image['id'], 'meta1')
@test.idempotent_id('b0c1e7a0-8853-40fd-8384-01f93d116cae')
def test_get_console_output_of_alt_account_server_fails(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index b4837f7..01a8e58 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -63,11 +63,11 @@
def _create_and_attach(self):
# Start a server and wait for it to become ready
- admin_pass = self.image_ssh_password
+ self.admin_pass = self.image_ssh_password
self.server = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
- adminPass=admin_pass)
+ adminPass=self.admin_pass)
# Record addresses so that we can ssh later
self.server['addresses'] = self.servers_client.list_addresses(
@@ -75,7 +75,7 @@
# Create a volume and wait for it to become ready
self.volume = self.volumes_client.create_volume(
- CONF.volume.volume_size, display_name='test')['volume']
+ size=CONF.volume.volume_size, display_name='test')['volume']
self.addCleanup(self._delete_volume)
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
@@ -108,7 +108,7 @@
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.image_ssh_user,
- self.server['adminPass'],
+ self.admin_pass,
self.validation_resources['keypair']['private_key'])
partitions = linux_client.get_partitions()
@@ -127,7 +127,7 @@
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.image_ssh_user,
- self.server['adminPass'],
+ self.admin_pass,
self.validation_resources['keypair']['private_key'])
partitions = linux_client.get_partitions()
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index a00c0ba..f42d153 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -50,7 +50,7 @@
s_name = data_utils.rand_name('Snapshot')
# Create snapshot
snapshot = self.snapshots_client.create_snapshot(
- volume['id'],
+ volume_id=volume['id'],
display_name=s_name)['snapshot']
def delete_snapshot(snapshot_id):
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 4e88f65..b6d0c48 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -13,6 +13,7 @@
# under the License.
from collections import OrderedDict
+import copy
import six
from tempest_lib import exceptions as lib_exc
@@ -27,42 +28,93 @@
"""Default templates.
There should always be at least a master1 and a worker1 node
group template."""
-DEFAULT_TEMPLATES = {
- 'vanilla': OrderedDict([
- ('2.6.0', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['namenode', 'resourcemanager',
- 'hiveserver']
+BASE_VANILLA_DESC = {
+ 'NODES': {
+ 'master1': {
+ 'count': 1,
+ 'node_processes': ['namenode', 'resourcemanager',
+ 'hiveserver']
+ },
+ 'master2': {
+ 'count': 1,
+ 'node_processes': ['oozie', 'historyserver',
+ 'secondarynamenode']
+ },
+ 'worker1': {
+ 'count': 1,
+ 'node_processes': ['datanode', 'nodemanager'],
+ 'node_configs': {
+ 'MapReduce': {
+ 'yarn.app.mapreduce.am.resource.mb': 256,
+ 'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
},
- 'master2': {
- 'count': 1,
- 'node_processes': ['oozie', 'historyserver',
- 'secondarynamenode']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['datanode', 'nodemanager'],
- 'node_configs': {
- 'MapReduce': {
- 'yarn.app.mapreduce.am.resource.mb': 256,
- 'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
- },
- 'YARN': {
- 'yarn.scheduler.minimum-allocation-mb': 256,
- 'yarn.scheduler.maximum-allocation-mb': 1024,
- 'yarn.nodemanager.vmem-check-enabled': False
- }
- }
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
+ 'YARN': {
+ 'yarn.scheduler.minimum-allocation-mb': 256,
+ 'yarn.scheduler.maximum-allocation-mb': 1024,
+ 'yarn.nodemanager.vmem-check-enabled': False
}
}
- }),
+ }
+ },
+ 'cluster_configs': {
+ 'HDFS': {
+ 'dfs.replication': 1
+ }
+ }
+}
+
+BASE_SPARK_DESC = {
+ 'NODES': {
+ 'master1': {
+ 'count': 1,
+ 'node_processes': ['namenode', 'master']
+ },
+ 'worker1': {
+ 'count': 1,
+ 'node_processes': ['datanode', 'slave']
+ }
+ },
+ 'cluster_configs': {
+ 'HDFS': {
+ 'dfs.replication': 1
+ }
+ }
+}
+
+BASE_CDH_DESC = {
+ 'NODES': {
+ 'master1': {
+ 'count': 1,
+ 'node_processes': ['CLOUDERA_MANAGER']
+ },
+ 'master2': {
+ 'count': 1,
+ 'node_processes': ['HDFS_NAMENODE',
+ 'YARN_RESOURCEMANAGER']
+ },
+ 'master3': {
+ 'count': 1,
+ 'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
+ 'HDFS_SECONDARYNAMENODE',
+ 'HIVE_METASTORE', 'HIVE_SERVER2']
+ },
+ 'worker1': {
+ 'count': 1,
+ 'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
+ }
+ },
+ 'cluster_configs': {
+ 'HDFS': {
+ 'dfs_replication': 1
+ }
+ }
+}
+
+
+DEFAULT_TEMPLATES = {
+ 'vanilla': OrderedDict([
+ ('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
+ ('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
('1.2.1', {
'NODES': {
'master1': {
@@ -123,81 +175,13 @@
})
]),
'spark': OrderedDict([
- ('1.0.0', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['namenode', 'master']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['datanode', 'slave']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
- }
- }
- })
+ ('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
+ ('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
]),
'cdh': OrderedDict([
- ('5.3.0', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['CLOUDERA_MANAGER']
- },
- 'master2': {
- 'count': 1,
- 'node_processes': ['HDFS_NAMENODE',
- 'YARN_RESOURCEMANAGER']
- },
- 'master3': {
- 'count': 1,
- 'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
- 'HDFS_SECONDARYNAMENODE',
- 'HIVE_METASTORE', 'HIVE_SERVER2']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs_replication': 1
- }
- }
- }),
- ('5', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['CLOUDERA_MANAGER']
- },
- 'master2': {
- 'count': 1,
- 'node_processes': ['HDFS_NAMENODE',
- 'YARN_RESOURCEMANAGER']
- },
- 'master3': {
- 'count': 1,
- 'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
- 'HDFS_SECONDARYNAMENODE',
- 'HIVE_METASTORE', 'HIVE_SERVER2']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs_replication': 1
- }
- }
- })
+ ('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
+ ('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
+ ('5', copy.deepcopy(BASE_CDH_DESC))
]),
'mapr': OrderedDict([
('4.0.1.mrv2', {
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index f4c1881..01e05db 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -13,13 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest import config
import tempest.test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class BaseDatabaseTest(tempest.test.BaseTestCase):
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index bff4f91..349edfa 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -27,7 +27,7 @@
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
- cls.service_data = cls.client.create_service(
+ cls.service_data = cls.services_client.create_service(
s_name, s_type, description=s_description)['OS-KSADM:service']
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
@@ -50,7 +50,7 @@
for e in cls.setup_endpoints:
cls.client.delete_endpoint(e['id'])
for s in cls.service_ids:
- cls.client.delete_service(s)
+ cls.services_client.delete_service(s)
super(EndPointsTestJSON, cls).resource_cleanup()
@test.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
diff --git a/tempest/api/identity/admin/v2/test_roles.py b/tempest/api/identity/admin/v2/test_roles.py
index 8702db7..3f3d16e 100644
--- a/tempest/api/identity/admin/v2/test_roles.py
+++ b/tempest/api/identity/admin/v2/test_roles.py
@@ -27,7 +27,7 @@
super(RolesTestJSON, cls).resource_setup()
for _ in moves.xrange(5):
role_name = data_utils.rand_name(name='role')
- role = cls.client.create_role(role_name)['role']
+ role = cls.roles_client.create_role(name=role_name)['role']
cls.data.roles.append(role)
def _get_role_params(self):
@@ -48,7 +48,7 @@
@test.idempotent_id('75d9593f-50b7-4fcf-bd64-e3fb4a278e23')
def test_list_roles(self):
"""Return a list of all roles."""
- body = self.client.list_roles()['roles']
+ body = self.roles_client.list_roles()['roles']
found = [role for role in body if role in self.data.roles]
self.assertTrue(any(found))
self.assertEqual(len(found), len(self.data.roles))
@@ -57,16 +57,16 @@
def test_role_create_delete(self):
"""Role should be created, verified, and deleted."""
role_name = data_utils.rand_name(name='role-test')
- body = self.client.create_role(role_name)['role']
+ body = self.roles_client.create_role(name=role_name)['role']
self.assertEqual(role_name, body['name'])
- body = self.client.list_roles()['roles']
+ body = self.roles_client.list_roles()['roles']
found = [role for role in body if role['name'] == role_name]
self.assertTrue(any(found))
- body = self.client.delete_role(found[0]['id'])
+ body = self.roles_client.delete_role(found[0]['id'])
- body = self.client.list_roles()['roles']
+ body = self.roles_client.list_roles()['roles']
found = [role for role in body if role['name'] == role_name]
self.assertFalse(any(found))
@@ -76,7 +76,7 @@
self.data.setup_test_role()
role_id = self.data.role['id']
role_name = self.data.role['name']
- body = self.client.show_role(role_id)['role']
+ body = self.roles_client.show_role(role_id)['role']
self.assertEqual(role_id, body['id'])
self.assertEqual(role_name, body['name'])
@@ -84,24 +84,28 @@
def test_assign_user_role(self):
"""Assign a role to a user on a tenant."""
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'], user['id'], role['id'])
- roles = self.client.list_user_roles(tenant['id'], user['id'])['roles']
+ self.roles_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])
+ roles = self.roles_client.list_user_roles(tenant['id'],
+ user['id'])['roles']
self.assert_role_in_role_list(role, roles)
@test.idempotent_id('f0b9292c-d3ba-4082-aa6c-440489beef69')
def test_remove_user_role(self):
"""Remove a role assigned to a user on a tenant."""
(user, tenant, role) = self._get_role_params()
- user_role = self.client.assign_user_role(tenant['id'],
- user['id'],
- role['id'])['role']
- self.client.delete_user_role(tenant['id'], user['id'],
- user_role['id'])
+ user_role = self.roles_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])['role']
+ self.roles_client.delete_user_role(tenant['id'], user['id'],
+ user_role['id'])
@test.idempotent_id('262e1e3e-ed71-4edd-a0e5-d64e83d66d05')
def test_list_user_roles(self):
"""List roles assigned to a user on tenant."""
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'], user['id'], role['id'])
- roles = self.client.list_user_roles(tenant['id'], user['id'])['roles']
+ self.roles_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])
+ roles = self.roles_client.list_user_roles(tenant['id'],
+ user['id'])['roles']
self.assert_role_in_role_list(role, roles)
diff --git a/tempest/api/identity/admin/v2/test_roles_negative.py b/tempest/api/identity/admin/v2/test_roles_negative.py
index 45c95df..c9af7c6 100644
--- a/tempest/api/identity/admin/v2/test_roles_negative.py
+++ b/tempest/api/identity/admin/v2/test_roles_negative.py
@@ -37,7 +37,7 @@
def test_list_roles_by_unauthorized_user(self):
# Non-administrator user should not be able to list roles
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.list_roles)
+ self.non_admin_roles_client.list_roles)
@test.attr(type=['negative'])
@test.idempotent_id('11a3c7da-df6c-40c2-abc2-badd682edf9f')
@@ -45,14 +45,15 @@
# Request to list roles without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
- self.assertRaises(lib_exc.Unauthorized, self.client.list_roles)
+ self.assertRaises(lib_exc.Unauthorized, self.roles_client.list_roles)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@test.idempotent_id('c0b89e56-accc-4c73-85f8-9c0f866104c1')
def test_role_create_blank_name(self):
# Should not be able to create a role with a blank name
- self.assertRaises(lib_exc.BadRequest, self.client.create_role, '')
+ self.assertRaises(lib_exc.BadRequest, self.roles_client.create_role,
+ name='')
@test.attr(type=['negative'])
@test.idempotent_id('585c8998-a8a4-4641-a5dd-abef7a8ced00')
@@ -60,7 +61,8 @@
# Non-administrator user should not be able to create role
role_name = data_utils.rand_name(name='role')
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.create_role, role_name)
+ self.non_admin_roles_client.create_role,
+ name=role_name)
@test.attr(type=['negative'])
@test.idempotent_id('a7edd17a-e34a-4aab-8bb7-fa6f498645b8')
@@ -70,7 +72,7 @@
self.client.delete_token(token)
role_name = data_utils.rand_name(name='role')
self.assertRaises(lib_exc.Unauthorized,
- self.client.create_role, role_name)
+ self.roles_client.create_role, name=role_name)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
@@ -78,35 +80,35 @@
def test_role_create_duplicate(self):
# Role names should be unique
role_name = data_utils.rand_name(name='role-dup')
- body = self.client.create_role(role_name)['role']
+ body = self.roles_client.create_role(name=role_name)['role']
role1_id = body.get('id')
- self.addCleanup(self.client.delete_role, role1_id)
- self.assertRaises(lib_exc.Conflict, self.client.create_role,
- role_name)
+ self.addCleanup(self.roles_client.delete_role, role1_id)
+ self.assertRaises(lib_exc.Conflict, self.roles_client.create_role,
+ name=role_name)
@test.attr(type=['negative'])
@test.idempotent_id('15347635-b5b1-4a87-a280-deb2bd6d865e')
def test_delete_role_by_unauthorized_user(self):
# Non-administrator user should not be able to delete role
role_name = data_utils.rand_name(name='role')
- body = self.client.create_role(role_name)['role']
+ body = self.roles_client.create_role(name=role_name)['role']
self.data.roles.append(body)
role_id = body.get('id')
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.delete_role, role_id)
+ self.non_admin_roles_client.delete_role, role_id)
@test.attr(type=['negative'])
@test.idempotent_id('44b60b20-70de-4dac-beaf-a3fc2650a16b')
def test_delete_role_request_without_token(self):
# Request to delete role without a valid token should fail
role_name = data_utils.rand_name(name='role')
- body = self.client.create_role(role_name)['role']
+ body = self.roles_client.create_role(name=role_name)['role']
self.data.roles.append(body)
role_id = body.get('id')
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
- self.client.delete_role,
+ self.roles_client.delete_role,
role_id)
self.client.auth_provider.clear_auth()
@@ -115,7 +117,7 @@
def test_delete_role_non_existent(self):
# Attempt to delete a non existent role should fail
non_existent_role = str(uuid.uuid4().hex)
- self.assertRaises(lib_exc.NotFound, self.client.delete_role,
+ self.assertRaises(lib_exc.NotFound, self.roles_client.delete_role,
non_existent_role)
@test.attr(type=['negative'])
@@ -125,7 +127,7 @@
# assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.assign_user_role,
+ self.non_admin_roles_client.assign_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@@ -136,7 +138,7 @@
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
- self.client.assign_user_role, tenant['id'],
+ self.roles_client.assign_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@@ -146,7 +148,7 @@
# Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
non_existent_role = str(uuid.uuid4().hex)
- self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
+ self.assertRaises(lib_exc.NotFound, self.roles_client.assign_user_role,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative'])
@@ -155,7 +157,7 @@
# Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
non_existent_tenant = str(uuid.uuid4().hex)
- self.assertRaises(lib_exc.NotFound, self.client.assign_user_role,
+ self.assertRaises(lib_exc.NotFound, self.roles_client.assign_user_role,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative'])
@@ -163,8 +165,9 @@
def test_assign_duplicate_user_role(self):
# Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'], user['id'], role['id'])
- self.assertRaises(lib_exc.Conflict, self.client.assign_user_role,
+ self.roles_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])
+ self.assertRaises(lib_exc.Conflict, self.roles_client.assign_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@@ -173,11 +176,11 @@
# Non-administrator user should not be authorized to
# remove a user's role
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'],
- user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.delete_user_role,
+ self.non_admin_roles_client.delete_user_role,
tenant['id'], user['id'], role['id'])
@test.attr(type=['negative'])
@@ -185,13 +188,13 @@
def test_remove_user_role_request_without_token(self):
# Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'],
- user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
self.assertRaises(lib_exc.Unauthorized,
- self.client.delete_user_role, tenant['id'],
+ self.roles_client.delete_user_role, tenant['id'],
user['id'], role['id'])
self.client.auth_provider.clear_auth()
@@ -200,11 +203,11 @@
def test_remove_user_role_non_existent_role(self):
# Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'],
- user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])
non_existent_role = str(uuid.uuid4().hex)
- self.assertRaises(lib_exc.NotFound, self.client.delete_user_role,
+ self.assertRaises(lib_exc.NotFound, self.roles_client.delete_user_role,
tenant['id'], user['id'], non_existent_role)
@test.attr(type=['negative'])
@@ -212,11 +215,11 @@
def test_remove_user_role_non_existent_tenant(self):
# Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'],
- user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])
non_existent_tenant = str(uuid.uuid4().hex)
- self.assertRaises(lib_exc.NotFound, self.client.delete_user_role,
+ self.assertRaises(lib_exc.NotFound, self.roles_client.delete_user_role,
non_existent_tenant, user['id'], role['id'])
@test.attr(type=['negative'])
@@ -225,10 +228,11 @@
# Non-administrator user should not be authorized to list
# a user's roles
(user, tenant, role) = self._get_role_params()
- self.client.assign_user_role(tenant['id'], user['id'], role['id'])
+ self.roles_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.list_user_roles, tenant['id'],
- user['id'])
+ self.non_admin_roles_client.list_user_roles,
+ tenant['id'], user['id'])
@test.attr(type=['negative'])
@test.idempotent_id('682adfb2-fd5f-4b0a-a9ca-322e9bebb907')
@@ -239,7 +243,7 @@
self.client.delete_token(token)
try:
self.assertRaises(lib_exc.Unauthorized,
- self.client.list_user_roles, tenant['id'],
+ self.roles_client.list_user_roles, tenant['id'],
user['id'])
finally:
self.client.auth_provider.clear_auth()
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index 04e15d2..5685922 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -25,9 +25,9 @@
def _del_service(self, service_id):
# Deleting the service created in this method
- self.client.delete_service(service_id)
+ self.services_client.delete_service(service_id)
# Checking whether service is deleted successfully
- self.assertRaises(lib_exc.NotFound, self.client.show_service,
+ self.assertRaises(lib_exc.NotFound, self.services_client.show_service,
service_id)
@test.idempotent_id('84521085-c6e6-491c-9a08-ec9f70f90110')
@@ -37,7 +37,7 @@
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
description = data_utils.rand_name('description')
- service_data = self.client.create_service(
+ service_data = self.services_client.create_service(
name, type, description=description)['OS-KSADM:service']
self.assertFalse(service_data['id'] is None)
self.addCleanup(self._del_service, service_data['id'])
@@ -50,8 +50,9 @@
self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
# Get service
- fetched_service = (self.client.show_service(service_data['id'])
- ['OS-KSADM:service'])
+ fetched_service = (
+ self.services_client.show_service(service_data['id'])
+ ['OS-KSADM:service'])
# verifying the existence of service created
self.assertIn('id', fetched_service)
self.assertEqual(fetched_service['id'], service_data['id'])
@@ -68,7 +69,8 @@
# Create a service only with name and type
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
- service = self.client.create_service(name, type)['OS-KSADM:service']
+ service = self.services_client.create_service(name,
+ type)['OS-KSADM:service']
self.assertIn('id', service)
self.addCleanup(self._del_service, service['id'])
self.assertIn('name', service)
@@ -85,17 +87,17 @@
name = data_utils.rand_name('service')
type = data_utils.rand_name('type')
description = data_utils.rand_name('description')
- service = self.client.create_service(
+ service = self.services_client.create_service(
name, type, description=description)['OS-KSADM:service']
services.append(service)
service_ids = map(lambda x: x['id'], services)
def delete_services():
for service_id in service_ids:
- self.client.delete_service(service_id)
+ self.services_client.delete_service(service_id)
self.addCleanup(delete_services)
# List and Verify Services
- body = self.client.list_services()['OS-KSADM:services']
+ body = self.services_client.list_services()['OS-KSADM:services']
found = [serv for serv in body if serv['id'] in service_ids]
self.assertEqual(len(found), len(services), 'Services not found')
diff --git a/tempest/api/identity/admin/v2/test_tokens.py b/tempest/api/identity/admin/v2/test_tokens.py
index 0daa66d..ee04420 100644
--- a/tempest/api/identity/admin/v2/test_tokens.py
+++ b/tempest/api/identity/admin/v2/test_tokens.py
@@ -30,8 +30,8 @@
tenant = self.tenants_client.create_tenant(tenant_name)['tenant']
self.data.tenants.append(tenant)
# second:create a user
- user = self.client.create_user(user_name, user_password,
- tenant['id'], '')['user']
+ user = self.users_client.create_user(user_name, user_password,
+ tenant['id'], '')['user']
self.data.users.append(user)
# then get a token for the user
body = self.token_client.auth(user_name,
@@ -62,8 +62,8 @@
user_password = data_utils.rand_password()
tenant_id = None # No default tenant so will get unscoped token.
email = ''
- user = self.client.create_user(user_name, user_password,
- tenant_id, email)['user']
+ user = self.users_client.create_user(user_name, user_password,
+ tenant_id, email)['user']
self.data.users.append(user)
# Create a couple tenants.
@@ -77,15 +77,15 @@
# Create a role
role_name = data_utils.rand_name(name='role')
- role = self.client.create_role(role_name)['role']
+ role = self.roles_client.create_role(name=role_name)['role']
self.data.roles.append(role)
# Grant the user the role on the tenants.
- self.client.assign_user_role(tenant1['id'], user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant1['id'], user['id'],
+ role['id'])
- self.client.assign_user_role(tenant2['id'], user['id'],
- role['id'])
+ self.roles_client.assign_user_role(tenant2['id'], user['id'],
+ role['id'])
# Get an unscoped token.
body = self.token_client.auth(user_name, user_password)
diff --git a/tempest/api/identity/admin/v2/test_users.py b/tempest/api/identity/admin/v2/test_users.py
index 9cf1ce0..4497575 100644
--- a/tempest/api/identity/admin/v2/test_users.py
+++ b/tempest/api/identity/admin/v2/test_users.py
@@ -34,9 +34,9 @@
def test_create_user(self):
# Create a user
self.data.setup_test_tenant()
- user = self.client.create_user(self.alt_user, self.alt_password,
- self.data.tenant['id'],
- self.alt_email)['user']
+ user = self.users_client.create_user(self.alt_user, self.alt_password,
+ self.data.tenant['id'],
+ self.alt_email)['user']
self.data.users.append(user)
self.assertEqual(self.alt_user, user['name'])
@@ -45,9 +45,10 @@
# Create a user with enabled : False
self.data.setup_test_tenant()
name = data_utils.rand_name('test_user')
- user = self.client.create_user(name, self.alt_password,
- self.data.tenant['id'],
- self.alt_email, enabled=False)['user']
+ user = self.users_client.create_user(name, self.alt_password,
+ self.data.tenant['id'],
+ self.alt_email,
+ enabled=False)['user']
self.data.users.append(user)
self.assertEqual(name, user['name'])
self.assertEqual(False, user['enabled'])
@@ -58,22 +59,22 @@
# Test case to check if updating of user attributes is successful.
test_user = data_utils.rand_name('test_user')
self.data.setup_test_tenant()
- user = self.client.create_user(test_user, self.alt_password,
- self.data.tenant['id'],
- self.alt_email)['user']
+ user = self.users_client.create_user(test_user, self.alt_password,
+ self.data.tenant['id'],
+ self.alt_email)['user']
# Delete the User at the end of this method
- self.addCleanup(self.client.delete_user, user['id'])
+ self.addCleanup(self.users_client.delete_user, user['id'])
# Updating user details with new values
u_name2 = data_utils.rand_name('user2')
u_email2 = u_name2 + '@testmail.tm'
- update_user = self.client.update_user(user['id'], name=u_name2,
- email=u_email2,
- enabled=False)['user']
+ update_user = self.users_client.update_user(user['id'], name=u_name2,
+ email=u_email2,
+ enabled=False)['user']
self.assertEqual(u_name2, update_user['name'])
self.assertEqual(u_email2, update_user['email'])
self.assertEqual(False, update_user['enabled'])
# GET by id after updating
- updated_user = self.client.show_user(user['id'])['user']
+ updated_user = self.users_client.show_user(user['id'])['user']
# Assert response body of GET after updating
self.assertEqual(u_name2, updated_user['name'])
self.assertEqual(u_email2, updated_user['email'])
@@ -84,10 +85,10 @@
# Delete a user
test_user = data_utils.rand_name('test_user')
self.data.setup_test_tenant()
- user = self.client.create_user(test_user, self.alt_password,
- self.data.tenant['id'],
- self.alt_email)['user']
- self.client.delete_user(user['id'])
+ user = self.users_client.create_user(test_user, self.alt_password,
+ self.data.tenant['id'],
+ self.alt_email)['user']
+ self.users_client.delete_user(user['id'])
@test.idempotent_id('aca696c3-d645-4f45-b728-63646045beb1')
def test_user_authentication(self):
@@ -121,7 +122,7 @@
def test_get_users(self):
# Get a list of users and find the test user
self.data.setup_test_user()
- users = self.client.list_users()['users']
+ users = self.users_client.list_users()['users']
self.assertThat([u['name'] for u in users],
matchers.Contains(self.data.test_user),
"Could not find %s" % self.data.test_user)
@@ -134,16 +135,16 @@
fetched_user_ids = list()
password1 = data_utils.rand_password()
alt_tenant_user1 = data_utils.rand_name('tenant_user1')
- user1 = self.client.create_user(alt_tenant_user1, password1,
- self.data.tenant['id'],
- 'user1@123')['user']
+ user1 = self.users_client.create_user(alt_tenant_user1, password1,
+ self.data.tenant['id'],
+ 'user1@123')['user']
user_ids.append(user1['id'])
self.data.users.append(user1)
password2 = data_utils.rand_password()
alt_tenant_user2 = data_utils.rand_name('tenant_user2')
- user2 = self.client.create_user(alt_tenant_user2, password2,
- self.data.tenant['id'],
- 'user2@123')['user']
+ user2 = self.users_client.create_user(alt_tenant_user2, password2,
+ self.data.tenant['id'],
+ 'user2@123')['user']
user_ids.append(user2['id'])
self.data.users.append(user2)
# List of users for the respective tenant ID
@@ -170,19 +171,19 @@
user_ids = list()
fetched_user_ids = list()
user_ids.append(user['id'])
- role = self.client.assign_user_role(tenant['id'], user['id'],
- role['id'])['role']
+ role = self.roles_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])['role']
alt_user2 = data_utils.rand_name('second_user')
alt_password2 = data_utils.rand_password()
- second_user = self.client.create_user(alt_user2, alt_password2,
- self.data.tenant['id'],
- 'user2@123')['user']
+ second_user = self.users_client.create_user(alt_user2, alt_password2,
+ self.data.tenant['id'],
+ 'user2@123')['user']
user_ids.append(second_user['id'])
self.data.users.append(second_user)
- role = self.client.assign_user_role(tenant['id'],
- second_user['id'],
- role['id'])['role']
+ role = self.roles_client.assign_user_role(tenant['id'],
+ second_user['id'],
+ role['id'])['role']
# List of users with roles for the respective tenant ID
body = (self.tenants_client.list_tenant_users(self.data.tenant['id'])
['users'])
@@ -201,8 +202,8 @@
self.data.setup_test_user()
# Updating the user with new password
new_pass = data_utils.rand_password()
- update_user = self.client.update_user_password(
- self.data.user['id'], new_pass)['user']
+ update_user = self.users_client.update_user_password(
+ self.data.user['id'], password=new_pass)['user']
self.assertEqual(update_user['id'], self.data.user['id'])
# Validate the updated password
diff --git a/tempest/api/identity/admin/v2/test_users_negative.py b/tempest/api/identity/admin/v2/test_users_negative.py
index 8fa5a36..c5248fd 100644
--- a/tempest/api/identity/admin/v2/test_users_negative.py
+++ b/tempest/api/identity/admin/v2/test_users_negative.py
@@ -37,8 +37,9 @@
# Non-administrator should not be authorized to create a user
self.data.setup_test_tenant()
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.create_user, self.alt_user,
- self.alt_password, self.data.tenant['id'],
+ self.non_admin_users_client.create_user,
+ self.alt_user, self.alt_password,
+ self.data.tenant['id'],
self.alt_email)
@test.attr(type=['negative'])
@@ -46,8 +47,8 @@
def test_create_user_with_empty_name(self):
# User with an empty name should not be created
self.data.setup_test_tenant()
- self.assertRaises(lib_exc.BadRequest, self.client.create_user, '',
- self.alt_password, self.data.tenant['id'],
+ self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
+ '', self.alt_password, self.data.tenant['id'],
self.alt_email)
@test.attr(type=['negative'])
@@ -55,7 +56,7 @@
def test_create_user_with_name_length_over_255(self):
# Length of user name filed should be restricted to 255 characters
self.data.setup_test_tenant()
- self.assertRaises(lib_exc.BadRequest, self.client.create_user,
+ self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
'a' * 256, self.alt_password,
self.data.tenant['id'], self.alt_email)
@@ -64,7 +65,7 @@
def test_create_user_with_duplicate_name(self):
# Duplicate user should not be created
self.data.setup_test_user()
- self.assertRaises(lib_exc.Conflict, self.client.create_user,
+ self.assertRaises(lib_exc.Conflict, self.users_client.create_user,
self.data.test_user, self.data.test_password,
self.data.tenant['id'], self.data.test_email)
@@ -72,7 +73,7 @@
@test.idempotent_id('0132cc22-7c4f-42e1-9e50-ac6aad31d59a')
def test_create_user_for_non_existent_tenant(self):
# Attempt to create a user in a non-existent tenant should fail
- self.assertRaises(lib_exc.NotFound, self.client.create_user,
+ self.assertRaises(lib_exc.NotFound, self.users_client.create_user,
self.alt_user, self.alt_password, '49ffgg99999',
self.alt_email)
@@ -85,7 +86,7 @@
token = self.client.auth_provider.get_token()
# Delete the token from database
self.client.delete_token(token)
- self.assertRaises(lib_exc.Unauthorized, self.client.create_user,
+ self.assertRaises(lib_exc.Unauthorized, self.users_client.create_user,
self.alt_user, self.alt_password,
self.data.tenant['id'], self.alt_email)
@@ -98,7 +99,7 @@
# Attempt to create a user with valid enabled para should fail
self.data.setup_test_tenant()
name = data_utils.rand_name('test_user')
- self.assertRaises(lib_exc.BadRequest, self.client.create_user,
+ self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
name, self.alt_password,
self.data.tenant['id'],
self.alt_email, enabled=3)
@@ -109,7 +110,7 @@
# Attempt to update a user non-existent user should fail
user_name = data_utils.rand_name('user')
non_existent_id = str(uuid.uuid4())
- self.assertRaises(lib_exc.NotFound, self.client.update_user,
+ self.assertRaises(lib_exc.NotFound, self.users_client.update_user,
non_existent_id, name=user_name)
@test.attr(type=['negative'])
@@ -121,7 +122,7 @@
token = self.client.auth_provider.get_token()
# Delete the token from database
self.client.delete_token(token)
- self.assertRaises(lib_exc.Unauthorized, self.client.update_user,
+ self.assertRaises(lib_exc.Unauthorized, self.users_client.update_user,
self.alt_user)
# Unset the token to allow further tests to generate a new token
@@ -133,7 +134,8 @@
# Non-administrator should not be authorized to update user
self.data.setup_test_tenant()
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.update_user, self.alt_user)
+ self.non_admin_users_client.update_user,
+ self.alt_user)
@test.attr(type=['negative'])
@test.idempotent_id('d45195d5-33ed-41b9-a452-7d0d6a00f6e9')
@@ -141,14 +143,14 @@
# Non-administrator user should not be authorized to delete a user
self.data.setup_test_user()
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.delete_user,
+ self.non_admin_users_client.delete_user,
self.data.user['id'])
@test.attr(type=['negative'])
@test.idempotent_id('7cc82f7e-9998-4f89-abae-23df36495867')
def test_delete_non_existent_user(self):
# Attempt to delete a non-existent user should fail
- self.assertRaises(lib_exc.NotFound, self.client.delete_user,
+ self.assertRaises(lib_exc.NotFound, self.users_client.delete_user,
'junk12345123')
@test.attr(type=['negative'])
@@ -160,7 +162,7 @@
token = self.client.auth_provider.get_token()
# Delete the token from database
self.client.delete_token(token)
- self.assertRaises(lib_exc.Unauthorized, self.client.delete_user,
+ self.assertRaises(lib_exc.Unauthorized, self.users_client.delete_user,
self.alt_user)
# Unset the token to allow further tests to generate a new token
@@ -222,7 +224,7 @@
# Non-administrator user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(lib_exc.Forbidden,
- self.non_admin_client.list_users)
+ self.non_admin_users_client.list_users)
@test.attr(type=['negative'])
@test.idempotent_id('a73591ec-1903-4ffe-be42-282b39fefc9d')
@@ -230,7 +232,7 @@
# Request to get list of users without a valid token should fail
token = self.client.auth_provider.get_token()
self.client.delete_token(token)
- self.assertRaises(lib_exc.Unauthorized, self.client.list_users)
+ self.assertRaises(lib_exc.Unauthorized, self.users_client.list_users)
self.client.auth_provider.clear_auth()
@test.attr(type=['negative'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 429e2e3..2ac832e 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -34,8 +34,8 @@
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
cls.service_data = (
- cls.service_client.create_service(name=s_name, type=s_type,
- description=s_description))
+ cls.services_client.create_service(name=s_name, type=s_type,
+ description=s_description))
cls.service_data = cls.service_data['service']
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
@@ -56,7 +56,7 @@
for e in cls.setup_endpoints:
cls.client.delete_endpoint(e['id'])
for s in cls.service_ids:
- cls.service_client.delete_service(s)
+ cls.services_client.delete_service(s)
super(EndPointsTestJSON, cls).resource_cleanup()
@test.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
@@ -113,8 +113,8 @@
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
service2 = (
- self.service_client.create_service(name=s_name, type=s_type,
- description=s_description))
+ self.services_client.create_service(name=s_name, type=s_type,
+ description=s_description))
service2 = service2['service']
self.service_ids.append(service2['id'])
# Updating endpoint with new values
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 8f9bf2a..372254f 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -37,8 +37,8 @@
s_type = data_utils.rand_name('type')
s_description = data_utils.rand_name('description')
cls.service_data = (
- cls.service_client.create_service(name=s_name, type=s_type,
- description=s_description)
+ cls.services_client.create_service(name=s_name, type=s_type,
+ description=s_description)
['service'])
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
@@ -46,7 +46,7 @@
@classmethod
def resource_cleanup(cls):
for s in cls.service_ids:
- cls.service_client.delete_service(s)
+ cls.services_client.delete_service(s)
super(EndpointsNegativeTestJSON, cls).resource_cleanup()
@test.attr(type=['negative'])
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index df01074..03b8b29 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -88,8 +88,7 @@
def test_list_user_groups(self):
# create a user
user = self.client.create_user(
- data_utils.rand_name('User'),
- password=data_utils.rand_name('Pass'))['user']
+ data_utils.rand_name('User'), data_utils.rand_password())['user']
self.addCleanup(self.client.delete_user, user['id'])
# create two groups, and add user into them
groups = []
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index ca91ce5..4921c00 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -42,13 +42,13 @@
cls.domain_enabled_user = cls.client.create_user(
u1_name, password=alt_password,
email=cls.alt_email, domain_id=cls.data.domain['id'])['user']
- cls.data.v3_users.append(cls.domain_enabled_user)
+ cls.data.users.append(cls.domain_enabled_user)
# Create default not enabled user
u2_name = data_utils.rand_name('test_user')
cls.non_domain_enabled_user = cls.client.create_user(
u2_name, password=alt_password,
email=cls.alt_email, enabled=False)['user']
- cls.data.v3_users.append(cls.non_domain_enabled_user)
+ cls.data.users.append(cls.non_domain_enabled_user)
@test.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
@@ -79,7 +79,7 @@
# List users
body = self.client.list_users()['users']
fetched_ids = [u['id'] for u in body]
- missing_users = [u['id'] for u in self.data.v3_users
+ missing_users = [u['id'] for u in self.data.users
if u['id'] not in fetched_ids]
self.assertEqual(0, len(missing_users),
"Failed to find user %s in fetched list" %
@@ -88,8 +88,8 @@
@test.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
def test_get_user(self):
# Get a user detail
- user = self.client.show_user(self.data.v3_users[0]['id'])['user']
- self.assertEqual(self.data.v3_users[0]['id'], user['id'])
- self.assertEqual(self.data.v3_users[0]['name'], user['name'])
+ user = self.client.show_user(self.data.users[0]['id'])['user']
+ self.assertEqual(self.data.users[0]['id'], user['id'])
+ self.assertEqual(self.data.users[0]['name'], user['name'])
self.assertEqual(self.alt_email, user['email'])
self.assertEqual(self.data.domain['id'], user['domain_id'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index f38d25d..3b5e5d4 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -21,7 +21,7 @@
class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
def _delete_policy(self, policy_id):
- self.policy_client.delete_policy(policy_id)
+ self.policies_client.delete_policy(policy_id)
@test.idempotent_id('1a0ad286-2d06-4123-ab0d-728893a76201')
def test_list_policies(self):
@@ -31,13 +31,13 @@
for _ in range(3):
blob = data_utils.rand_name('BlobName')
policy_type = data_utils.rand_name('PolicyType')
- policy = self.policy_client.create_policy(
+ policy = self.policies_client.create_policy(
blob=blob, type=policy_type)['policy']
# Delete the Policy at the end of this method
self.addCleanup(self._delete_policy, policy['id'])
policy_ids.append(policy['id'])
# List and Verify Policies
- body = self.policy_client.list_policies()['policies']
+ body = self.policies_client.list_policies()['policies']
for p in body:
fetched_ids.append(p['id'])
missing_pols = [p for p in policy_ids if p not in fetched_ids]
@@ -49,8 +49,8 @@
# Test to update policy
blob = data_utils.rand_name('BlobName')
policy_type = data_utils.rand_name('PolicyType')
- policy = self.policy_client.create_policy(blob=blob,
- type=policy_type)['policy']
+ policy = self.policies_client.create_policy(blob=blob,
+ type=policy_type)['policy']
self.addCleanup(self._delete_policy, policy['id'])
self.assertIn('id', policy)
self.assertIn('type', policy)
@@ -60,11 +60,12 @@
self.assertEqual(policy_type, policy['type'])
# Update policy
update_type = data_utils.rand_name('UpdatedPolicyType')
- data = self.policy_client.update_policy(
+ data = self.policies_client.update_policy(
policy['id'], type=update_type)['policy']
self.assertIn('type', data)
# Assertion for updated value with fetched value
- fetched_policy = self.policy_client.show_policy(policy['id'])['policy']
+ fetched_policy = self.policies_client.show_policy(
+ policy['id'])['policy']
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
self.assertIn('type', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index ec1b12e..8bba3cb 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -25,7 +25,7 @@
@classmethod
def setup_clients(cls):
super(RegionsTestJSON, cls).setup_clients()
- cls.client = cls.region_client
+ cls.client = cls.regions_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index e5a6535..f1f06ee 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -25,8 +25,8 @@
super(RolesV3TestJSON, cls).resource_setup()
for _ in range(3):
role_name = data_utils.rand_name(name='role')
- role = cls.client.create_role(role_name)['role']
- cls.data.v3_roles.append(role)
+ role = cls.client.create_role(name=role_name)['role']
+ cls.data.roles.append(role)
cls.fetched_role_ids = list()
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
@@ -47,7 +47,7 @@
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
cls.role = cls.client.create_role(
- data_utils.rand_name('Role'))['role']
+ name=data_utils.rand_name('Role'))['role']
@classmethod
def resource_cleanup(cls):
@@ -69,13 +69,14 @@
@test.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
def test_role_create_update_get_list(self):
r_name = data_utils.rand_name('Role')
- role = self.client.create_role(r_name)['role']
+ role = self.client.create_role(name=r_name)['role']
self.addCleanup(self.client.delete_role, role['id'])
self.assertIn('name', role)
self.assertEqual(role['name'], r_name)
new_name = data_utils.rand_name('NewRole')
- updated_role = self.client.update_role(new_name, role['id'])['role']
+ updated_role = self.client.update_role(role['id'],
+ name=new_name)['role']
self.assertIn('name', updated_role)
self.assertIn('id', updated_role)
self.assertIn('links', updated_role)
@@ -187,5 +188,5 @@
def test_list_roles(self):
# Return a list of all roles
body = self.client.list_roles()['roles']
- found = [role for role in body if role in self.data.v3_roles]
- self.assertEqual(len(found), len(self.data.v3_roles))
+ found = [role for role in body if role in self.data.roles]
+ self.assertEqual(len(found), len(self.data.roles))
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index d1595dd..c6e3df4 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -24,9 +24,9 @@
def _del_service(self, service_id):
# Used for deleting the services created in this class
- self.service_client.delete_service(service_id)
+ self.services_client.delete_service(service_id)
# Checking whether service is deleted successfully
- self.assertRaises(lib_exc.NotFound, self.service_client.show_service,
+ self.assertRaises(lib_exc.NotFound, self.services_client.show_service,
service_id)
@test.attr(type='smoke')
@@ -36,7 +36,7 @@
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
desc = data_utils.rand_name('description')
- create_service = self.service_client.create_service(
+ create_service = self.services_client.create_service(
type=serv_type, name=name, description=desc)['service']
self.addCleanup(self._del_service, create_service['id'])
self.assertIsNotNone(create_service['id'])
@@ -49,14 +49,14 @@
s_id = create_service['id']
resp1_desc = create_service['description']
s_desc2 = data_utils.rand_name('desc2')
- update_service = self.service_client.update_service(
+ update_service = self.services_client.update_service(
s_id, description=s_desc2)['service']
resp2_desc = update_service['description']
self.assertNotEqual(resp1_desc, resp2_desc)
# Get service
- fetched_service = self.service_client.show_service(s_id)['service']
+ fetched_service = self.services_client.show_service(s_id)['service']
resp3_desc = fetched_service['description']
self.assertEqual(resp2_desc, resp3_desc)
@@ -67,9 +67,9 @@
# Create a service only with name and type
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
- service = self.service_client.create_service(
+ service = self.services_client.create_service(
type=serv_type, name=name)['service']
- self.addCleanup(self.service_client.delete_service, service['id'])
+ self.addCleanup(self.services_client.delete_service, service['id'])
self.assertIn('id', service)
expected_data = {'name': name, 'type': serv_type}
self.assertDictContainsSubset(expected_data, service)
@@ -81,14 +81,14 @@
for _ in range(3):
name = data_utils.rand_name('service')
serv_type = data_utils.rand_name('type')
- create_service = self.service_client.create_service(
+ create_service = self.services_client.create_service(
type=serv_type, name=name)['service']
- self.addCleanup(self.service_client.delete_service,
+ self.addCleanup(self.services_client.delete_service,
create_service['id'])
service_ids.append(create_service['id'])
# List and Verify Services
- services = self.service_client.list_services()['services']
+ services = self.services_client.list_services()['services']
fetched_ids = [service['id'] for service in services]
found = [s for s in fetched_ids if s in service_ids]
self.assertEqual(len(found), len(service_ids))
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index f5b20d5..b1446cf 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -76,7 +76,7 @@
# Create a role
role_name = data_utils.rand_name(name='role')
- role = self.client.create_role(role_name)['role']
+ role = self.client.create_role(name=role_name)['role']
self.addCleanup(self.client.delete_role, role['id'])
# Grant the user the role on both projects.
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index bf7ad71..85961b4 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -69,10 +69,10 @@
self.delegated_role = data_utils.rand_name('DelegatedRole')
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
- role = self.client.create_role(self.delegated_role)['role']
+ role = self.client.create_role(name=self.delegated_role)['role']
self.delegated_role_id = role['id']
- role = self.client.create_role(self.not_delegated_role)['role']
+ role = self.client.create_role(name=self.not_delegated_role)['role']
self.not_delegated_role_id = role['id']
# Assign roles to trustor
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 6dbd443..894a7a9 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -117,7 +117,7 @@
self.addCleanup(self.client.delete_user, user_body['id'])
# Creating Role
role_body = self.client.create_role(
- data_utils.rand_name('role'))['role']
+ name=data_utils.rand_name('role'))['role']
# Delete the Role at the end of this method
self.addCleanup(self.client.delete_role, role_body['id'])
@@ -151,6 +151,6 @@
@test.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
def test_get_user(self):
# Get a user detail
- self.data.setup_test_v3_user()
- user = self.client.show_user(self.data.v3_user['id'])['user']
- self.assertEqual(self.data.v3_user['id'], user['id'])
+ self.data.setup_test_user()
+ user = self.client.show_user(self.data.user['id'])['user']
+ self.assertEqual(self.data.user['id'], user['id'])
diff --git a/tempest/api/identity/admin/v3/test_users_negative.py b/tempest/api/identity/admin/v3/test_users_negative.py
index 4c80bda..b2fb6e0 100644
--- a/tempest/api/identity/admin/v3/test_users_negative.py
+++ b/tempest/api/identity/admin/v3/test_users_negative.py
@@ -38,7 +38,7 @@
@test.idempotent_id('b3c9fccc-4134-46f5-b600-1da6fb0a3b1f')
def test_authentication_for_disabled_user(self):
# Attempt to authenticate for disabled user should fail
- self.data.setup_test_v3_user()
+ self.data.setup_test_user()
self.disable_user(self.data.test_user)
self.assertRaises(lib_exc.Unauthorized, self.token.auth,
username=self.data.test_user,
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 8359b8f..028e9f2 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
-from tempest.common import credentials_factory as common_creds
from tempest.common.utils import data_utils
from tempest import config
import tempest.test
@@ -30,7 +29,7 @@
@classmethod
def disable_user(cls, user_name):
user = cls.get_user_by_name(user_name)
- cls.client.enable_disable_user(user['id'], False)
+ cls.users_client.enable_disable_user(user['id'], enabled=False)
@classmethod
def disable_tenant(cls, tenant_name):
@@ -39,7 +38,7 @@
@classmethod
def get_user_by_name(cls, name):
- users = cls.client.list_users()['users']
+ users = cls.users_client.list_users()['users']
user = [u for u in users if u['name'] == name]
if len(user) > 0:
return user[0]
@@ -56,7 +55,7 @@
@classmethod
def get_role_by_name(cls, name):
- roles = cls.client.list_roles()['roles']
+ roles = cls.roles_client.list_roles()['roles']
role = [r for r in roles if r['name'] == name]
if len(role) > 0:
return role[0]
@@ -76,6 +75,8 @@
cls.non_admin_client = cls.os.identity_public_client
cls.non_admin_token_client = cls.os.token_client
cls.non_admin_tenants_client = cls.os.tenants_public_client
+ cls.non_admin_roles_client = cls.os.roles_public_client
+ cls.non_admin_users_client = cls.os.users_public_client
@classmethod
def resource_setup(cls):
@@ -98,11 +99,17 @@
cls.token_client = cls.os_adm.token_client
cls.tenants_client = cls.os_adm.tenants_client
cls.non_admin_tenants_client = cls.os.tenants_client
+ cls.roles_client = cls.os_adm.roles_client
+ cls.non_admin_roles_client = cls.os.roles_client
+ cls.users_client = cls.os_adm.users_client
+ cls.non_admin_users_client = cls.os.users_client
+ cls.services_client = cls.os_adm.services_v2_client
@classmethod
def resource_setup(cls):
super(BaseIdentityV2AdminTest, cls).resource_setup()
- cls.data = DataGenerator(cls.client, cls.tenants_client)
+ cls.data = DataGeneratorV2(cls.client, cls.tenants_client,
+ cls.users_client, cls.roles_client)
@classmethod
def resource_cleanup(cls):
@@ -139,13 +146,16 @@
cls.client = cls.os_adm.identity_v3_client
cls.token = cls.os_adm.token_v3_client
cls.endpoints_client = cls.os_adm.endpoints_client
- cls.region_client = cls.os_adm.region_client
- cls.service_client = cls.os_adm.service_client
- cls.policy_client = cls.os_adm.policy_client
+ cls.regions_client = cls.os_adm.regions_client
+ cls.services_client = cls.os_adm.identity_services_client
+ cls.policies_client = cls.os_adm.policies_client
cls.creds_client = cls.os_adm.credentials_client
cls.groups_client = cls.os_adm.groups_client
- cls.data = DataGenerator(cls.client)
+ @classmethod
+ def resource_setup(cls):
+ super(BaseIdentityV3AdminTest, cls).resource_setup()
+ cls.data = DataGeneratorV3(cls.client)
@classmethod
def resource_cleanup(cls):
@@ -185,123 +195,107 @@
self.client.delete_domain(domain_id)
-class DataGenerator(object):
+class BaseDataGenerator(object):
- def __init__(self, client, tenants_client=None):
- self.client = client
- # TODO(dmellado) split Datagenerator for v2 and v3
- self.tenants_client = tenants_client
- self.users = []
- self.tenants = []
- self.roles = []
- self.role_name = None
- self.v3_users = []
- self.projects = []
- self.v3_roles = []
- self.domains = []
+ def __init__(self, client, projects_client=None,
+ users_client=None, roles_client=None):
+ self.client = client
+ self.projects_client = projects_client or client
+ self.users_client = users_client or client
+ self.roles_client = roles_client or client
- @property
- def test_credentials(self):
- return common_creds.get_credentials(username=self.test_user,
- user_id=self.user['id'],
- password=self.test_password,
- tenant_name=self.test_tenant,
- tenant_id=self.tenant['id'])
+ self.test_user = ''
+ self.test_password = ''
+ self.test_tenant = ''
+ self.test_project = ''
+ self.test_role = ''
+ self.test_email = ''
- def setup_test_user(self):
- """Set up a test user."""
- self.setup_test_tenant()
- self.test_user = data_utils.rand_name('test_user')
- self.test_password = data_utils.rand_password()
- self.test_email = self.test_user + '@testmail.tm'
- self.user = self.client.create_user(self.test_user,
- self.test_password,
- self.tenant['id'],
- self.test_email)['user']
- self.users.append(self.user)
+ self.user = None
+ self.tenant = None
+ self.project = None
+ self.role = None
+ self.domain = None
- def setup_test_tenant(self):
- """Set up a test tenant."""
- self.test_tenant = data_utils.rand_name('test_tenant')
- self.test_description = data_utils.rand_name('desc')
- self.tenant = self.tenants_client.create_tenant(
- name=self.test_tenant,
- description=self.test_description)['tenant']
- self.tenants.append(self.tenant)
+ self.users = []
+ self.tenants = []
+ self.projects = []
+ self.roles = []
+ self.domains = []
- def setup_test_role(self):
- """Set up a test role."""
- self.test_role = data_utils.rand_name('role')
- self.role = self.client.create_role(self.test_role)['role']
- self.roles.append(self.role)
+ def _create_test_user(self, **kwargs):
+ self.test_user = data_utils.rand_name('test_user')
+ self.test_password = data_utils.rand_password()
+ self.test_email = self.test_user + '@testmail.tm'
+ self.user = self.users_client.create_user(
+ self.test_user, password=self.test_password,
+ email=self.test_email, **kwargs)['user']
+ self.users.append(self.user)
- def setup_test_v3_user(self):
- """Set up a test v3 user."""
- self.setup_test_project()
- self.test_user = data_utils.rand_name('test_user')
- self.test_password = data_utils.rand_password()
- self.test_email = self.test_user + '@testmail.tm'
- self.v3_user = self.client.create_user(
- self.test_user,
- password=self.test_password,
- project_id=self.project['id'],
- email=self.test_email)['user']
- self.v3_users.append(self.v3_user)
+ def setup_test_role(self):
+ """Set up a test role."""
+ self.test_role = data_utils.rand_name('role')
+ self.role = self.roles_client.create_role(name=self.test_role)['role']
+ self.roles.append(self.role)
- def setup_test_project(self):
- """Set up a test project."""
- self.test_project = data_utils.rand_name('test_project')
- self.test_description = data_utils.rand_name('desc')
- self.project = self.client.create_project(
- name=self.test_project,
- description=self.test_description)['project']
- self.projects.append(self.project)
+ @staticmethod
+ def _try_wrapper(func, item, **kwargs):
+ try:
+ func(item['id'], **kwargs)
+ except lib_exc.NotFound:
+ pass
+ except Exception:
+ LOG.exception("Unexpected exception occurred in %s deletion. "
+ "But ignored here." % item['id'])
- def setup_test_v3_role(self):
- """Set up a test v3 role."""
- self.test_role = data_utils.rand_name('role')
- self.v3_role = self.client.create_role(self.test_role)['role']
- self.v3_roles.append(self.v3_role)
+ def teardown_all(self):
+ for user in self.users:
+ self._try_wrapper(self.users_client.delete_user, user)
+ for tenant in self.tenants:
+ self._try_wrapper(self.projects_client.delete_tenant, tenant)
+ for project in self.projects:
+ self._try_wrapper(self.projects_client.delete_project, project)
+ for role in self.roles:
+ self._try_wrapper(self.roles_client.delete_role, role)
+ for domain in self.domains:
+ self._try_wrapper(self.client.update_domain, domain, enabled=False)
+ self._try_wrapper(self.client.delete_domain, domain)
- def setup_test_domain(self):
- """Set up a test domain."""
- self.test_domain = data_utils.rand_name('test_domain')
- self.test_description = data_utils.rand_name('desc')
- self.domain = self.client.create_domain(
- name=self.test_domain,
- description=self.test_description)['domain']
- self.domains.append(self.domain)
- @staticmethod
- def _try_wrapper(func, item, **kwargs):
- try:
- if kwargs:
- func(item['id'], **kwargs)
- else:
- func(item['id'])
- except lib_exc.NotFound:
- pass
- except Exception:
- LOG.exception("Unexpected exception occurred in %s deletion."
- " But ignored here." % item['id'])
+class DataGeneratorV2(BaseDataGenerator):
- def teardown_all(self):
- # NOTE(masayukig): v3 client doesn't have v2 method.
- # (e.g. delete_tenant) So we need to check resources existence
- # before using client methods.
- for user in self.users:
- self._try_wrapper(self.client.delete_user, user)
- for tenant in self.tenants:
- self._try_wrapper(self.tenants_client.delete_tenant, tenant)
- for role in self.roles:
- self._try_wrapper(self.client.delete_role, role)
- for v3_user in self.v3_users:
- self._try_wrapper(self.client.delete_user, v3_user)
- for v3_project in self.projects:
- self._try_wrapper(self.client.delete_project, v3_project)
- for v3_role in self.v3_roles:
- self._try_wrapper(self.client.delete_role, v3_role)
- for domain in self.domains:
- self._try_wrapper(self.client.update_domain, domain,
- enabled=False)
- self._try_wrapper(self.client.delete_domain, domain)
+ def setup_test_user(self):
+ """Set up a test user."""
+ self.setup_test_tenant()
+ self._create_test_user(tenant_id=self.tenant['id'])
+
+ def setup_test_tenant(self):
+ """Set up a test tenant."""
+ self.test_tenant = data_utils.rand_name('test_tenant')
+ self.tenant = self.projects_client.create_tenant(
+ name=self.test_tenant,
+ description=data_utils.rand_name('desc'))['tenant']
+ self.tenants.append(self.tenant)
+
+
+class DataGeneratorV3(BaseDataGenerator):
+
+ def setup_test_user(self):
+ """Set up a test user."""
+ self.setup_test_project()
+ self._create_test_user(project_id=self.project['id'])
+
+ def setup_test_project(self):
+ """Set up a test project."""
+ self.test_project = data_utils.rand_name('test_project')
+ self.project = self.projects_client.create_project(
+ name=self.test_project,
+ description=data_utils.rand_name('desc'))['project']
+ self.projects.append(self.project)
+
+ def setup_test_domain(self):
+ """Set up a test domain."""
+ self.domain = self.client.create_domain(
+ name=data_utils.rand_name('test_domain'),
+ description=data_utils.rand_name('desc'))['domain']
+ self.domains.append(self.domain)
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 88161a3..bd49326 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -36,12 +36,12 @@
@test.idempotent_id('b580fab9-7ae9-46e8-8138-417260cb6f9f')
def test_create_ec2_credentials(self):
"""Create user ec2 credentials."""
- resp = self.non_admin_client.create_user_ec2_credentials(
+ resp = self.non_admin_users_client.create_user_ec2_credentials(
self.creds.credentials.user_id,
tenant_id=self.creds.credentials.tenant_id)["credential"]
access = resp['access']
self.addCleanup(
- self.non_admin_client.delete_user_ec2_credentials,
+ self.non_admin_users_client.delete_user_ec2_credentials,
self.creds.credentials.user_id, access)
self.assertNotEmpty(resp['access'])
self.assertNotEmpty(resp['secret'])
@@ -54,24 +54,24 @@
created_creds = []
fetched_creds = []
# create first ec2 credentials
- creds1 = self.non_admin_client.create_user_ec2_credentials(
+ creds1 = self.non_admin_users_client.create_user_ec2_credentials(
self.creds.credentials.user_id,
tenant_id=self.creds.credentials.tenant_id)["credential"]
created_creds.append(creds1['access'])
# create second ec2 credentials
- creds2 = self.non_admin_client.create_user_ec2_credentials(
+ creds2 = self.non_admin_users_client.create_user_ec2_credentials(
self.creds.credentials.user_id,
tenant_id=self.creds.credentials.tenant_id)["credential"]
created_creds.append(creds2['access'])
# add credentials to be cleaned up
self.addCleanup(
- self.non_admin_client.delete_user_ec2_credentials,
+ self.non_admin_users_client.delete_user_ec2_credentials,
self.creds.credentials.user_id, creds1['access'])
self.addCleanup(
- self.non_admin_client.delete_user_ec2_credentials,
+ self.non_admin_users_client.delete_user_ec2_credentials,
self.creds.credentials.user_id, creds2['access'])
# get the list of user ec2 credentials
- resp = self.non_admin_client.list_user_ec2_credentials(
+ resp = self.non_admin_users_client.list_user_ec2_credentials(
self.creds.credentials.user_id)["credentials"]
fetched_creds = [cred['access'] for cred in resp]
# created credentials should be in a fetched list
@@ -84,14 +84,14 @@
@test.idempotent_id('cb284075-b613-440d-83ca-fe0b33b3c2b8')
def test_show_ec2_credentials(self):
"""Get the definite user ec2 credentials."""
- resp = self.non_admin_client.create_user_ec2_credentials(
+ resp = self.non_admin_users_client.create_user_ec2_credentials(
self.creds.credentials.user_id,
tenant_id=self.creds.credentials.tenant_id)["credential"]
self.addCleanup(
- self.non_admin_client.delete_user_ec2_credentials,
+ self.non_admin_users_client.delete_user_ec2_credentials,
self.creds.credentials.user_id, resp['access'])
- ec2_creds = self.non_admin_client.show_user_ec2_credentials(
+ ec2_creds = self.non_admin_users_client.show_user_ec2_credentials(
self.creds.credentials.user_id, resp['access']
)["credential"]
for key in ['access', 'secret', 'user_id', 'tenant_id']:
@@ -100,14 +100,14 @@
@test.idempotent_id('6aba0d4c-b76b-4e46-aa42-add79bc1551d')
def test_delete_ec2_credentials(self):
"""Delete user ec2 credentials."""
- resp = self.non_admin_client.create_user_ec2_credentials(
+ resp = self.non_admin_users_client.create_user_ec2_credentials(
self.creds.credentials.user_id,
tenant_id=self.creds.credentials.tenant_id)["credential"]
access = resp['access']
- self.non_admin_client.delete_user_ec2_credentials(
+ self.non_admin_users_client.delete_user_ec2_credentials(
self.creds.credentials.user_id, access)
self.assertRaises(
lib_exc.NotFound,
- self.non_admin_client.show_user_ec2_credentials,
+ self.non_admin_users_client.show_user_ec2_credentials,
self.creds.credentials.user_id,
access)
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 5f2a8c4..a59a1a0 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -41,8 +41,9 @@
# we need new non-admin Identity Client with new credentials, since
# current non_admin_client token will be revoked after updating
# password
- self.non_admin_client_for_cleanup = copy.copy(self.non_admin_client)
- self.non_admin_client_for_cleanup.auth_provider = (
+ self.non_admin_users_client_for_cleanup = copy.copy(
+ self.non_admin_users_client)
+ self.non_admin_users_client_for_cleanup.auth_provider = (
manager.get_auth_provider(self.new_creds))
user_id = self.creds.credentials.user_id
old_pass = self.creds.credentials.password
@@ -50,15 +51,11 @@
# to change password back. important for allow_tenant_isolation = false
self.addCleanup(
- self.non_admin_client_for_cleanup.update_user_own_password,
- user_id=user_id,
- new_pass=old_pass,
- old_pass=new_pass)
-
+ self.non_admin_users_client_for_cleanup.update_user_own_password,
+ user_id, original_password=new_pass, password=old_pass)
# user updates own password
- self.non_admin_client.update_user_own_password(
- user_id=user_id, new_pass=new_pass, old_pass=old_pass)
-
+ self.non_admin_users_client.update_user_own_password(
+ user_id, password=new_pass, original_password=old_pass)
# TODO(lbragstad): Sleeping after the response status has been checked
# and the body loaded as JSON allows requests to fail-fast. The sleep
# is necessary because keystone will err on the side of security and
@@ -76,7 +73,7 @@
# authorize with old token should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token_client.auth_token,
- self.non_admin_client.token)
+ self.non_admin_users_client.token)
# authorize with old password should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index da0ce83..ade7b67 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from six import moves
from tempest_lib import exceptions as lib_exc
@@ -22,8 +21,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class BaseImageTest(tempest.test.BaseTestCase):
"""Base test class for Image API tests."""
@@ -62,16 +59,12 @@
@classmethod
def create_image(cls, **kwargs):
"""Wrapper that returns a test image."""
- name = data_utils.rand_name(cls.__name__ + "-instance")
- if 'name' in kwargs:
- name = kwargs.pop('name')
+ if 'name' not in kwargs:
+ name = data_utils.rand_name(cls.__name__ + "-instance")
+ kwargs['name'] = name
- container_format = kwargs.pop('container_format')
- disk_format = kwargs.pop('disk_format')
-
- image = cls.client.create_image(name, container_format,
- disk_format, **kwargs)
+ image = cls.client.create_image(**kwargs)
# Image objects returned by the v1 client have the image
# data inside a dict that is keyed against 'image'.
if 'image' in image:
@@ -156,7 +149,7 @@
def _create_image(self):
name = data_utils.rand_name('image')
- image = self.os_img_client.create_image(name,
+ image = self.os_img_client.create_image(name=name,
container_format='bare',
disk_format='raw')
image_id = image['id']
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 64f3174..1a84d06 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -18,11 +18,27 @@
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
+from tempest import exceptions
from tempest import test
CONF = config.CONF
+def get_container_and_disk_format():
+ a_formats = ['ami', 'ari', 'aki']
+
+ container_format = CONF.image.container_formats[0]
+ disk_format = CONF.image.disk_formats[0]
+
+ if container_format in a_formats and container_format != disk_format:
+ msg = ("The container format and the disk format don't match. "
+ "Contaiter format: %(container)s, Disk format: %(disk)s." %
+ {'container': container_format, 'disk': disk_format})
+ raise exceptions.InvalidConfiguration(message=msg)
+
+ return container_format, disk_format
+
+
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@@ -30,9 +46,10 @@
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
+ container_format, disk_format = get_container_and_disk_format()
body = self.create_image(name='New Name',
- container_format='bare',
- disk_format='raw',
+ container_format=container_format,
+ disk_format=disk_format,
is_public=False,
properties=properties)
self.assertIn('id', body)
@@ -52,9 +69,10 @@
@test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
# Register a new remote image
+ container_format, disk_format = get_container_and_disk_format()
body = self.create_image(name='New Remote Image',
- container_format='bare',
- disk_format='raw', is_public=False,
+ container_format=container_format,
+ disk_format=disk_format, is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
@@ -68,9 +86,10 @@
@test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
+ container_format, disk_format = get_container_and_disk_format()
body = self.create_image(name='New Http Image',
- container_format='bare',
- disk_format='raw', is_public=False,
+ container_format=container_format,
+ disk_format=disk_format, is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
@@ -82,10 +101,11 @@
@test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
# Register an image with min ram
+ container_format, disk_format = get_container_and_disk_format()
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
- container_format='bare',
- disk_format='raw',
+ container_format=container_format,
+ disk_format=disk_format,
is_public=False,
min_ram=40,
properties=properties)
@@ -103,22 +123,51 @@
"""Here we test the listing of image information"""
@classmethod
+ def skip_checks(cls):
+ super(ListImagesTest, cls).skip_checks()
+ if (len(CONF.image.container_formats) < 2
+ or len(CONF.image.disk_formats) < 2):
+ skip_msg = ("%s skipped as multiple container formats "
+ "or disk formats are not available." % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
- img1 = cls._create_remote_image('one', 'bare', 'raw')
- img2 = cls._create_remote_image('two', 'ami', 'ami')
- img3 = cls._create_remote_image('dup', 'bare', 'raw')
- img4 = cls._create_remote_image('dup', 'bare', 'raw')
- img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
- img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
- img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
- img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
+ a_formats = ['ami', 'ari', 'aki']
+
+ (cls.container_format,
+ cls.container_format_alt) = CONF.image.container_formats[:2]
+ cls.disk_format, cls.disk_format_alt = CONF.image.disk_formats[:2]
+ if cls.container_format in a_formats:
+ cls.disk_format = cls.container_format
+ if cls.container_format_alt in a_formats:
+ cls.disk_format_alt = cls.container_format_alt
+
+ img1 = cls._create_remote_image('one', cls.container_format,
+ cls.disk_format)
+ img2 = cls._create_remote_image('two', cls.container_format_alt,
+ cls.disk_format_alt)
+ img3 = cls._create_remote_image('dup', cls.container_format,
+ cls.disk_format)
+ img4 = cls._create_remote_image('dup', cls.container_format,
+ cls.disk_format)
+ img5 = cls._create_standard_image('1', cls.container_format_alt,
+ cls.disk_format_alt, 42)
+ img6 = cls._create_standard_image('2', cls.container_format_alt,
+ cls.disk_format_alt, 142)
+ img7 = cls._create_standard_image('33', cls.container_format,
+ cls.disk_format, 142)
+ img8 = cls._create_standard_image('33', cls.container_format,
+ cls.disk_format, 142)
cls.created_set = set(cls.created_images)
- # 5x bare, 3x ami
- cls.bare_set = set((img1, img3, img4, img7, img8))
- cls.ami_set = set((img2, img5, img6))
+ # same container format
+ cls.same_container_format_set = set((img1, img3, img4, img7, img8))
+ # same disk format
+ cls.same_disk_format_set = set((img2, img5, img6))
+
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
@@ -167,22 +216,25 @@
@test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
- images_list = self.client.list_images(disk_format='ami')['images']
+ images_list = self.client.list_images(
+ disk_format=self.disk_format_alt)['images']
for image in images_list:
- self.assertEqual(image['disk_format'], 'ami')
+ self.assertEqual(image['disk_format'], self.disk_format_alt)
result_set = set(map(lambda x: x['id'], images_list))
- self.assertTrue(self.ami_set <= result_set)
- self.assertFalse(self.created_set - self.ami_set <= result_set)
+ self.assertTrue(self.same_disk_format_set <= result_set)
+ self.assertFalse(self.created_set - self.same_disk_format_set
+ <= result_set)
@test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
- images_list = (self.client.list_images(container_format='bare')
- ['images'])
+ images_list = self.client.list_images(
+ container_format=self.container_format)['images']
for image in images_list:
- self.assertEqual(image['container_format'], 'bare')
+ self.assertEqual(image['container_format'], self.container_format)
result_set = set(map(lambda x: x['id'], images_list))
- self.assertTrue(self.bare_set <= result_set)
- self.assertFalse(self.created_set - self.bare_set <= result_set)
+ self.assertTrue(self.same_container_format_set <= result_set)
+ self.assertFalse(self.created_set - self.same_container_format_set
+ <= result_set)
@test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
@@ -231,7 +283,9 @@
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
- cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
+ container_format, disk_format = get_container_and_disk_format()
+ cls.image_id = cls._create_standard_image('1', container_format,
+ disk_format, 42)
@classmethod
def _create_standard_image(cls, name, container_format,
diff --git a/tempest/api/image/v1/test_images_negative.py b/tempest/api/image/v1/test_images_negative.py
index 3d94408..f16b80e 100644
--- a/tempest/api/image/v1/test_images_negative.py
+++ b/tempest/api/image/v1/test_images_negative.py
@@ -27,13 +27,17 @@
def test_register_with_invalid_container_format(self):
# Negative tests for invalid data supplied to POST /images
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
- 'test', 'wrong', 'vhd')
+ name='test',
+ container_format='wrong',
+ disk_format='vhd',)
@test.attr(type=['negative'])
@test.idempotent_id('993face5-921d-4e84-aabf-c1bba4234a67')
def test_register_with_invalid_disk_format(self):
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
- 'test', 'bare', 'wrong')
+ name='test',
+ container_format='bare',
+ disk_format='wrong',)
@test.attr(type=['negative'])
@test.idempotent_id('bb016f15-0820-4f27-a92d-09b2f67d2488')
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 2e6c268..04582c6 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -147,7 +147,7 @@
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
- LOG.debug("Creating a image"
+ LOG.debug("Creating an image"
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index 71c8c7a..485942e 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -90,10 +90,12 @@
def test_register_with_invalid_container_format(self):
# Negative tests for invalid data supplied to POST /images
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
- 'test', 'wrong', 'vhd')
+ name='test', container_format='wrong',
+ disk_format='vhd')
@test.attr(type=['negative'])
@test.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
- 'test', 'bare', 'wrong')
+ name='test', container_format='bare',
+ disk_format='wrong')
diff --git a/tempest/api/messaging/base.py b/tempest/api/messaging/base.py
index 528fbea..a324c37 100644
--- a/tempest/api/messaging/base.py
+++ b/tempest/api/messaging/base.py
@@ -13,16 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from oslo_log import log as logging
-
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class BaseMessagingTest(test.BaseTestCase):
diff --git a/tempest/api/messaging/test_claims.py b/tempest/api/messaging/test_claims.py
index 57b8c7f..99edde1 100644
--- a/tempest/api/messaging/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import logging
-
from six.moves.urllib import parse as urlparse
from tempest_lib import decorators
@@ -24,7 +22,6 @@
from tempest import test
-LOG = logging.getLogger(__name__)
CONF = config.CONF
diff --git a/tempest/api/messaging/test_messages.py b/tempest/api/messaging/test_messages.py
index efbbf56..7f4182a 100644
--- a/tempest/api/messaging/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import logging
from tempest.api.messaging import base
from tempest.common.utils import data_utils
@@ -21,7 +20,6 @@
from tempest import test
-LOG = logging.getLogger(__name__)
CONF = config.CONF
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index df49663..dcb5450 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import logging
from six import moves
from tempest_lib import exceptions as lib_exc
@@ -24,9 +23,6 @@
from tempest import test
-LOG = logging.getLogger(__name__)
-
-
class TestQueues(base.BaseMessagingTest):
@test.attr(type='smoke')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index c5d0d57..61f8e15 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -29,13 +29,13 @@
@classmethod
def resource_setup(cls):
super(AgentManagementTestJSON, cls).resource_setup()
- body = cls.admin_client.list_agents()
+ body = cls.admin_agents_client.list_agents()
agents = body['agents']
cls.agent = agents[0]
@test.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
def test_list_agent(self):
- body = self.admin_client.list_agents()
+ body = self.admin_agents_client.list_agents()
agents = body['agents']
# Hearthbeats must be excluded from comparison
self.agent.pop('heartbeat_timestamp', None)
@@ -47,12 +47,12 @@
@test.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
def test_list_agents_non_admin(self):
- body = self.client.list_agents()
+ body = self.agents_client.list_agents()
self.assertEqual(len(body["agents"]), 0)
@test.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
def test_show_agent(self):
- body = self.admin_client.show_agent(self.agent['id'])
+ body = self.admin_agents_client.show_agent(self.agent['id'])
agent = body['agent']
self.assertEqual(agent['id'], self.agent['id'])
@@ -62,8 +62,8 @@
# Try to update the 'admin_state_up' to the original
# one to avoid the negative effect.
agent_status = {'admin_state_up': origin_status}
- body = self.admin_client.update_agent(agent_id=self.agent['id'],
- agent_info=agent_status)
+ body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+ agent=agent_status)
updated_status = body['agent']['admin_state_up']
self.assertEqual(origin_status, updated_status)
@@ -72,8 +72,8 @@
self.useFixture(fixtures.LockFixture('agent_description'))
description = 'description for update agent.'
agent_description = {'description': description}
- body = self.admin_client.update_agent(agent_id=self.agent['id'],
- agent_info=agent_description)
+ body = self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+ agent=agent_description)
self.addCleanup(self._restore_agent)
updated_description = body['agent']['description']
self.assertEqual(updated_description, description)
@@ -83,5 +83,5 @@
description = self.agent['description'] or ''
origin_agent = {'description': description}
- self.admin_client.update_agent(agent_id=self.agent['id'],
- agent_info=origin_agent)
+ self.admin_agents_client.update_agent(agent_id=self.agent['id'],
+ agent=origin_agent)
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 7692b56..fcb6fce 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -51,7 +51,7 @@
def _check_network_in_dhcp_agent(self, network_id, agent):
network_ids = []
- body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
+ body = self.admin_agents_client.list_networks_hosted_by_one_dhcp_agent(
agent['id'])
networks = body['networks']
for network in networks:
@@ -65,7 +65,7 @@
self.ports.remove(self.port)
agent = dict()
agent['agent_type'] = None
- body = self.admin_client.list_agents()
+ body = self.admin_agents_client.list_agents()
agents = body['agents']
for a in agents:
if a['agent_type'] == 'DHCP agent':
@@ -84,14 +84,14 @@
self._remove_network_from_dhcp_agent(network_id, agent)
def _remove_network_from_dhcp_agent(self, network_id, agent):
- self.admin_client.remove_network_from_dhcp_agent(
+ self.admin_agents_client.delete_network_from_dhcp_agent(
agent_id=agent['id'],
network_id=network_id)
self.assertFalse(self._check_network_in_dhcp_agent(
network_id, agent))
def _add_dhcp_agent_to_network(self, network_id, agent):
- self.admin_client.add_dhcp_agent_to_network(agent['id'],
- network_id)
+ self.admin_agents_client.add_dhcp_agent_to_network(
+ agent['id'], network_id=network_id)
self.assertTrue(self._check_network_in_dhcp_agent(
network_id, agent))
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index 36747a3..5d12e17 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -14,9 +14,11 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
AGENT_TYPE = 'L3 agent'
AGENT_MODES = (
'legacy',
@@ -51,7 +53,7 @@
@classmethod
def resource_setup(cls):
super(L3AgentSchedulerTestJSON, cls).resource_setup()
- body = cls.admin_client.list_agents()
+ body = cls.admin_agents_client.list_agents()
agents = body['agents']
for agent in agents:
# TODO(armax): falling back on default _agent_mode can be
@@ -80,6 +82,19 @@
cls.port = cls.create_port(cls.network)
cls.client.add_router_interface_with_port_id(
cls.router['id'], cls.port['id'])
+ # NOTE: Sometimes we have seen this test fail with dvr in,
+ # multinode tests, since the dhcp port is not created before
+ # the test gets executed and so the router is not scheduled
+ # on the given agent. By adding the external gateway info to
+ # the router, the router should be properly scheduled in the
+ # dvr_snat node.
+ # This is a temporary work around to prevent a race condition.
+ external_gateway_info = {
+ 'network_id': CONF.network.public_network_id,
+ 'enable_snat': True}
+ cls.admin_client.update_router_with_snat_gw_info(
+ cls.router['id'],
+ external_gateway_info=external_gateway_info)
@classmethod
def resource_cleanup(cls):
@@ -90,12 +105,12 @@
@test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
def test_list_routers_on_l3_agent(self):
- self.admin_client.list_routers_on_l3_agent(self.agent['id'])
+ self.admin_agents_client.list_routers_on_l3_agent(self.agent['id'])
@test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
def test_add_list_remove_router_on_l3_agent(self):
l3_agent_ids = list()
- self.admin_client.add_router_to_l3_agent(
+ self.admin_agents_client.create_router_on_l3_agent(
self.agent['id'],
router_id=self.router['id'])
body = (
@@ -105,7 +120,7 @@
self.assertIn('agent_type', agent)
self.assertEqual('L3 agent', agent['agent_type'])
self.assertIn(self.agent['id'], l3_agent_ids)
- body = self.admin_client.remove_router_from_l3_agent(
+ body = self.admin_agents_client.delete_router_from_l3_agent(
self.agent['id'],
self.router['id'])
# NOTE(afazekas): The deletion not asserted, because neutron
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index f1fd3e9..db4561f 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -14,7 +14,6 @@
# under the License.
import netaddr
-from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common.utils import data_utils
@@ -24,8 +23,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class BaseNetworkTest(tempest.test.BaseTestCase):
"""Base class for the Neutron tests
@@ -71,11 +68,17 @@
def setup_clients(cls):
super(BaseNetworkTest, cls).setup_clients()
cls.client = cls.os.network_client
+ cls.agents_client = cls.os.network_agents_client
+ cls.network_extensions_client = cls.os.network_extensions_client
cls.networks_client = cls.os.networks_client
+ cls.subnetpools_client = cls.os.subnetpools_client
cls.subnets_client = cls.os.subnets_client
cls.ports_client = cls.os.ports_client
cls.quotas_client = cls.os.network_quotas_client
cls.floating_ips_client = cls.os.floating_ips_client
+ cls.security_groups_client = cls.os.security_groups_client
+ cls.security_group_rules_client = (
+ cls.os.security_group_rules_client)
@classmethod
def resource_setup(cls):
@@ -272,6 +275,7 @@
def setup_clients(cls):
super(BaseAdminNetworkTest, cls).setup_clients()
cls.admin_client = cls.os_adm.network_client
+ cls.admin_agents_client = cls.os_adm.network_agents_client
cls.admin_networks_client = cls.os_adm.networks_client
cls.admin_subnets_client = cls.os_adm.subnets_client
cls.admin_ports_client = cls.os_adm.ports_client
diff --git a/tempest/api/network/base_security_groups.py b/tempest/api/network/base_security_groups.py
index 1cef2cc..3ea3aea 100644
--- a/tempest/api/network/base_security_groups.py
+++ b/tempest/api/network/base_security_groups.py
@@ -22,27 +22,29 @@
def _create_security_group(self):
# Create a security group
name = data_utils.rand_name('secgroup-')
- group_create_body = self.client.create_security_group(name=name)
+ group_create_body = (
+ self.security_groups_client.create_security_group(name=name))
self.addCleanup(self._delete_security_group,
group_create_body['security_group']['id'])
self.assertEqual(group_create_body['security_group']['name'], name)
return group_create_body, name
def _delete_security_group(self, secgroup_id):
- self.client.delete_security_group(secgroup_id)
+ self.security_groups_client.delete_security_group(secgroup_id)
# Asserting that the security group is not found in the list
# after deletion
- list_body = self.client.list_security_groups()
+ list_body = self.security_groups_client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertNotIn(secgroup_id, secgroup_list)
def _delete_security_group_rule(self, rule_id):
- self.client.delete_security_group_rule(rule_id)
+ self.security_group_rules_client.delete_security_group_rule(rule_id)
# Asserting that the security group is not found in the list
# after deletion
- list_body = self.client.list_security_group_rules()
+ list_body = (
+ self.security_group_rules_client.list_security_group_rules())
rules_list = list()
for rule in list_body['security_group_rules']:
rules_list.append(rule['id'])
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index b83d2b0..d71d600 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -41,14 +41,15 @@
expected_alias = [ext for ext in expected_alias if
test.is_extension_enabled(ext, 'network')]
actual_alias = list()
- extensions = self.client.list_extensions()
+ extensions = self.network_extensions_client.list_extensions()
list_extensions = extensions['extensions']
# Show and verify the details of the available extensions
for ext in list_extensions:
ext_name = ext['name']
ext_alias = ext['alias']
actual_alias.append(ext['alias'])
- ext_details = self.client.show_extension(ext_alias)
+ ext_details = self.network_extensions_client.show_extension(
+ ext_alias)
ext_details = ext_details['extension']
self.assertIsNotNone(ext_details)
diff --git a/tempest/api/network/test_metering_extensions.py b/tempest/api/network/test_metering_extensions.py
index 007ba3b..299700f 100644
--- a/tempest/api/network/test_metering_extensions.py
+++ b/tempest/api/network/test_metering_extensions.py
@@ -12,16 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
-LOG = logging.getLogger(__name__)
-
-
class MeteringTestJSON(base.BaseAdminNetworkTest):
"""Tests the following operations in the Neutron API:
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index a266142..1c446ef 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -449,9 +449,9 @@
@test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
- network_names = [data_utils.rand_name('network-'),
- data_utils.rand_name('network-')]
- body = self.client.create_bulk_network(network_names)
+ network_list = [{'name': data_utils.rand_name('network-')},
+ {'name': data_utils.rand_name('network-')}]
+ body = self.client.create_bulk_network(networks=network_list)
created_networks = body['networks']
self.addCleanup(self._delete_networks, created_networks)
# Asserting that the networks are found in the list after creation
@@ -486,7 +486,7 @@
}
subnets_list.append(p1)
del subnets_list[1]['name']
- body = self.client.create_bulk_subnet(subnets_list)
+ body = self.client.create_bulk_subnet(subnets=subnets_list)
created_subnets = body['subnets']
self.addCleanup(self._delete_subnets, created_subnets)
# Asserting that the subnets are found in the list after creation
@@ -512,7 +512,7 @@
}
port_list.append(p1)
del port_list[1]['name']
- body = self.client.create_bulk_port(port_list)
+ body = self.client.create_bulk_port(ports=port_list)
created_ports = body['ports']
self.addCleanup(self._delete_ports, created_ports)
# Asserting that the ports are found in the list after creation
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 43da1c4..67f2c83 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -74,7 +74,7 @@
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
- body = self.client.create_bulk_port(port_list)
+ body = self.client.create_bulk_port(ports=port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
@@ -250,17 +250,19 @@
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
+ sec_grps_client = self.security_groups_client
for name in security_groups_names:
- group_create_body = self.client.create_security_group(
+ group_create_body = sec_grps_client.create_security_group(
name=name)
- self.addCleanup(self.client.delete_security_group,
+ self.addCleanup(self.security_groups_client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
- security_group = self.client.create_security_group(name=sec_grp_name)
- self.addCleanup(self.client.delete_security_group,
+ security_group = sec_grps_client.create_security_group(
+ name=sec_grp_name)
+ self.addCleanup(self.security_groups_client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index ed191b6..406ad44 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -301,7 +301,7 @@
test_routes.sort(key=lambda x: x['destination'])
extra_route = self.client.update_extra_routes(router['id'],
- test_routes)
+ routes=test_routes)
show_body = self.client.show_router(router['id'])
# Assert the number of routes
self.assertEqual(routes_num, len(extra_route['router']['routes']))
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index ccc5232..7d0765e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -41,7 +41,8 @@
remote_ip_prefix=None):
# Create Security Group rule with the input params and validate
# that SG rule is created with the same parameters.
- rule_create_body = self.client.create_security_group_rule(
+ sec_group_rules_client = self.security_group_rules_client
+ rule_create_body = sec_group_rules_client.create_security_group_rule(
security_group_id=sg_id,
direction=direction,
ethertype=ethertype,
@@ -71,7 +72,7 @@
@test.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
def test_list_security_groups(self):
# Verify the that security group belonging to tenant exist in list
- body = self.client.list_security_groups()
+ body = self.security_groups_client.list_security_groups()
security_groups = body['security_groups']
found = None
for n in security_groups:
@@ -86,7 +87,7 @@
group_create_body, name = self._create_security_group()
# List security groups and verify if created group is there in response
- list_body = self.client.list_security_groups()
+ list_body = self.security_groups_client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
@@ -94,7 +95,7 @@
# Update the security group
new_name = data_utils.rand_name('security-')
new_description = data_utils.rand_name('security-description')
- update_body = self.client.update_security_group(
+ update_body = self.security_groups_client.update_security_group(
group_create_body['security_group']['id'],
name=new_name,
description=new_description)
@@ -103,7 +104,7 @@
self.assertEqual(update_body['security_group']['description'],
new_description)
# Show details of the updated security group
- show_body = self.client.show_security_group(
+ show_body = self.security_groups_client.show_security_group(
group_create_body['security_group']['id'])
self.assertEqual(show_body['security_group']['name'], new_name)
self.assertEqual(show_body['security_group']['description'],
@@ -116,8 +117,9 @@
# Create rules for each protocol
protocols = ['tcp', 'udp', 'icmp']
+ client = self.security_group_rules_client
for protocol in protocols:
- rule_create_body = self.client.create_security_group_rule(
+ rule_create_body = client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
protocol=protocol,
direction='ingress',
@@ -125,7 +127,7 @@
)
# Show details of the created security rule
- show_rule_body = self.client.show_security_group_rule(
+ show_rule_body = client.show_security_group_rule(
rule_create_body['security_group_rule']['id']
)
create_dict = rule_create_body['security_group_rule']
@@ -135,7 +137,8 @@
"%s does not match." % key)
# List rules and verify created rule is in response
- rule_list_body = self.client.list_security_group_rules()
+ rule_list_body = (
+ self.security_group_rules_client.list_security_group_rules())
rule_list = [rule['id']
for rule in rule_list_body['security_group_rules']]
self.assertIn(rule_create_body['security_group_rule']['id'],
@@ -223,7 +226,8 @@
direction = 'ingress'
protocol = 17
security_group_id = group_create_body['security_group']['id']
- rule_create_body = self.client.create_security_group_rule(
+ client = self.security_group_rules_client
+ rule_create_body = client.create_security_group_rule(
security_group_id=security_group_id,
direction=direction,
protocol=protocol
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index f80ea59..ff38e9e 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -38,23 +38,25 @@
@test.idempotent_id('424fd5c3-9ddc-486a-b45f-39bf0c820fc6')
def test_show_non_existent_security_group(self):
non_exist_id = str(uuid.uuid4())
- self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
- non_exist_id)
+ self.assertRaises(
+ lib_exc.NotFound, self.security_groups_client.show_security_group,
+ non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('4c094c09-000b-4e41-8100-9617600c02a6')
def test_show_non_existent_security_group_rule(self):
non_exist_id = str(uuid.uuid4())
- self.assertRaises(lib_exc.NotFound,
- self.client.show_security_group_rule,
- non_exist_id)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.security_group_rules_client.show_security_group_rule,
+ non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('1f1bb89d-5664-4956-9fcd-83ee0fa603df')
def test_delete_non_existent_security_group(self):
non_exist_id = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound,
- self.client.delete_security_group,
+ self.security_groups_client.delete_security_group,
non_exist_id
)
@@ -66,7 +68,8 @@
# Create rule with bad protocol name
pname = 'bad_protocol_name'
self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol=pname, direction='ingress', ethertype=self.ethertype)
@@ -79,7 +82,8 @@
prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
for remote_ip_prefix in prefix:
self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', direction='ingress', ethertype=self.ethertype,
remote_ip_prefix=remote_ip_prefix)
@@ -94,7 +98,8 @@
group_ids = ['bad_group_id', non_exist_id]
for remote_group_id in group_ids:
self.assertRaises(
- lib_exc.NotFound, self.client.create_security_group_rule,
+ lib_exc.NotFound,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', direction='ingress', ethertype=self.ethertype,
remote_group_id=remote_group_id)
@@ -108,7 +113,8 @@
# Create rule specifying both remote_ip_prefix and remote_group_id
prefix = self._tenant_network_cidr
self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=sg1_body['security_group']['id'],
protocol='tcp', direction='ingress',
ethertype=self.ethertype, remote_ip_prefix=prefix,
@@ -122,7 +128,8 @@
# Create rule with bad ethertype
ethertype = 'bad_ethertype'
self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='udp', direction='ingress', ethertype=ethertype)
@@ -139,7 +146,8 @@
(-16, 65536, 'Invalid value for port')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', port_range_min=pmin, port_range_max=pmax,
direction='ingress', ethertype=self.ethertype)
@@ -151,7 +159,8 @@
(300, 1, 'Invalid value for ICMP type')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
- lib_exc.BadRequest, self.client.create_security_group_rule,
+ lib_exc.BadRequest,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='icmp', port_range_min=pmin, port_range_max=pmax,
direction='ingress', ethertype=self.ethertype)
@@ -163,7 +172,7 @@
# Create security group named 'default', it should be failed.
name = 'default'
self.assertRaises(lib_exc.Conflict,
- self.client.create_security_group,
+ self.security_groups_client.create_security_group,
name=name)
@test.attr(type=['negative'])
@@ -175,7 +184,7 @@
min_port = 66
max_port = 67
# Create a rule with valid params
- self.client.create_security_group_rule(
+ self.security_group_rules_client.create_security_group_rule(
security_group_id=body['security_group']['id'],
direction='ingress',
ethertype=self.ethertype,
@@ -186,7 +195,8 @@
# Try creating the same security group rule, it should fail
self.assertRaises(
- lib_exc.Conflict, self.client.create_security_group_rule,
+ lib_exc.Conflict,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=body['security_group']['id'],
protocol='tcp', direction='ingress', ethertype=self.ethertype,
port_range_min=min_port, port_range_max=max_port)
@@ -196,10 +206,11 @@
def test_create_security_group_rule_with_non_existent_security_group(self):
# Create security group rules with not existing security group.
non_existent_sg = str(uuid.uuid4())
- self.assertRaises(lib_exc.NotFound,
- self.client.create_security_group_rule,
- security_group_id=non_existent_sg,
- direction='ingress', ethertype=self.ethertype)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.security_group_rules_client.create_security_group_rule,
+ security_group_id=non_existent_sg,
+ direction='ingress', ethertype=self.ethertype)
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
@@ -220,7 +231,7 @@
self.assertRaisesRegexp(
lib_exc.BadRequest,
"Conflicting value ethertype",
- self.client.create_security_group_rule,
+ self.security_group_rules_client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', direction='ingress',
ethertype=pair['ethertype'],
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 8a61ff8..e5d0462 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -50,27 +50,28 @@
subnetpool_name = data_utils.rand_name('subnetpools')
# create subnet pool
prefix = CONF.network.default_network
- body = self.client.create_subnetpools(name=subnetpool_name,
- prefixes=prefix)
+ body = self.subnetpools_client.create_subnetpool(name=subnetpool_name,
+ prefixes=prefix)
subnetpool_id = body["subnetpool"]["id"]
self.addCleanup(self._cleanup_subnetpools, subnetpool_id)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# get detail about subnet pool
- body = self.client.show_subnetpools(subnetpool_id)
+ body = self.subnetpools_client.show_subnetpool(subnetpool_id)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# update the subnet pool
subnetpool_name = data_utils.rand_name('subnetpools_update')
- body = self.client.update_subnetpools(subnetpool_id,
- name=subnetpool_name)
+ body = self.subnetpools_client.update_subnetpool(subnetpool_id,
+ name=subnetpool_name)
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
# delete subnet pool
- body = self.client.delete_subnetpools(subnetpool_id)
- self.assertRaises(lib_exc.NotFound, self.client.show_subnetpools,
+ body = self.subnetpools_client.delete_subnetpool(subnetpool_id)
+ self.assertRaises(lib_exc.NotFound,
+ self.subnetpools_client.show_subnetpool,
subnetpool_id)
def _cleanup_subnetpools(self, subnetpool_id):
# this is used to cleanup the resources
try:
- self.client.delete_subnetpools(subnetpool_id)
+ self.subnetpools_client.delete_subnetpool(subnetpool_id)
except lib_exc.NotFound:
pass
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 4968835..c93b5ed 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -12,7 +12,6 @@
import os.path
-from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
import yaml
@@ -22,8 +21,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class BaseOrchestrationTest(tempest.test.BaseTestCase):
"""Base test case class for all Orchestration API tests."""
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 0416bc7..9d2b425 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
@@ -19,7 +17,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class StackEnvironmentTest(base.BaseOrchestrationTest):
diff --git a/tempest/api/orchestration/stacks/test_limits.py b/tempest/api/orchestration/stacks/test_limits.py
index bb5b89d..2acf97b 100644
--- a/tempest/api/orchestration/stacks/test_limits.py
+++ b/tempest/api/orchestration/stacks/test_limits.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
@@ -21,8 +19,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class TestServerStackLimits(base.BaseOrchestrationTest):
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 8466e11..09e863e 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -16,7 +16,6 @@
import netaddr
from tempest.api.orchestration import base
-from tempest.common import credentials_factory as credentials
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -37,8 +36,8 @@
@classmethod
def setup_credentials(cls):
+ cls.set_network_resources()
super(NeutronResourcesTestJSON, cls).setup_credentials()
- cls.os = credentials.ConfiguredUserManager()
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 4bc2c17..3be5bb6 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import config
@@ -19,8 +17,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class StacksTestJSON(base.BaseOrchestrationTest):
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index b4d7fa0..0400e76 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -10,17 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import logging
-
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import test
-LOG = logging.getLogger(__name__)
-
-
class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
_tpl_type = 'yaml'
_resource = 'resources'
diff --git a/tempest/api/orchestration/stacks/test_soft_conf.py b/tempest/api/orchestration/stacks/test_soft_conf.py
index 34d93e4..ab45929 100644
--- a/tempest/api/orchestration/stacks/test_soft_conf.py
+++ b/tempest/api/orchestration/stacks/test_soft_conf.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
@@ -18,7 +17,6 @@
from tempest import config
from tempest import test
-LOG = logging.getLogger(__name__)
CONF = config.CONF
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index f766b00..28463ab 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -10,16 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import test
-LOG = logging.getLogger(__name__)
-
-
class StacksTestJSON(base.BaseOrchestrationTest):
empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index ae9a411..e51551b 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
@@ -21,7 +19,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class CinderResourcesTest(base.BaseOrchestrationTest):
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index bbd01f0..bbf6db2 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -75,10 +75,11 @@
return body
@classmethod
- def create_image(cls, client):
- body = client.create_image(
- data_utils.rand_name('image'), container_format='bare',
- disk_format='raw', visibility='private')
+ def create_image(cls, client, **kwargs):
+ body = client.create_image(name=data_utils.rand_name('image'),
+ container_format='bare',
+ disk_format='raw',
+ **kwargs)
# TODO(jswarren) Move ['image'] up to initial body value assignment
# once both v1 and v2 glance clients include the full response
# object.
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 7511505..a575125 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -39,7 +39,7 @@
@testtools.skipIf(not CONF.image_feature_enabled.api_v1,
"Glance api v1 is disabled")
def test_check_glance_v1_notifications(self):
- body = self.create_image(self.image_client)
+ body = self.create_image(self.image_client, is_public=False)
self.image_client.update_image(body['id'], data='data')
query = 'resource', 'eq', body['id']
@@ -55,7 +55,7 @@
@testtools.skipIf(not CONF.image_feature_enabled.api_v2,
"Glance api v2 is disabled")
def test_check_glance_v2_notifications(self):
- body = self.create_image(self.image_client_v2)
+ body = self.create_image(self.image_client_v2, visibility='private')
self.image_client_v2.store_image_file(body['id'], "file")
self.image_client_v2.show_image_file(body['id'])
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 6d2aaea..60e6e6c 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
import six
from tempest.api.volume import base
from tempest.common.utils import data_utils
@@ -19,8 +18,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class VolumeMultiBackendV2Test(base.BaseVolumeAdminTest):
@@ -71,7 +68,7 @@
else:
extra_specs = {spec_key_without_prefix: backend_name_key}
self.type = self.volume_types_client.create_volume_type(
- type_name, extra_specs=extra_specs)['volume_type']
+ name=type_name, extra_specs=extra_specs)['volume_type']
self.volume_type_id_list.append(self.type['id'])
params = {self.name_field: vol_name, 'volume_type': type_name}
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index aa6bfdf..f2bf613 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -50,7 +50,7 @@
snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
params = {cls.name_field: snap_name}
cls.snapshot = cls.client.create_snapshot(
- cls.volume['id'], **params)['snapshot']
+ volume_id=cls.volume['id'], **params)['snapshot']
cls.client.wait_for_snapshot_status(cls.snapshot['id'],
'available')
@@ -77,7 +77,7 @@
def _create_reset_and_force_delete_temp_snapshot(self, status=None):
# Create snapshot, reset snapshot status,
# and force delete temp snapshot
- temp_snapshot = self.create_snapshot(self.volume['id'])
+ temp_snapshot = self.create_snapshot(volume_id=self.volume['id'])
if status:
self.admin_snapshots_client.\
reset_snapshot_status(temp_snapshot['id'], status)
@@ -110,7 +110,7 @@
status = 'error'
progress_alias = self._get_progress_alias()
self.client.update_snapshot_status(self.snapshot['id'],
- status, progress)
+ status=status, progress=progress)
snapshot_get = self.admin_snapshots_client.show_snapshot(
self.snapshot['id'])['snapshot']
self.assertEqual(status, snapshot_get['status'])
diff --git a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
index ce0b618..c66207f 100644
--- a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
@@ -52,14 +52,14 @@
# NOTE(gfidente): no need to delete in tearDown as
# they are created using utility wrapper methods.
cls.volume = cls.create_volume()
- cls.snapshot = cls.create_snapshot(cls.volume['id'])
+ cls.snapshot = cls.create_snapshot(volume_id=cls.volume['id'])
@test.attr(type='negative')
@test.idempotent_id('02bbf63f-6c05-4357-9d98-2926a94064ff')
def test_quota_volume_snapshots(self):
self.assertRaises(lib_exc.OverLimit,
self.snapshots_client.create_snapshot,
- self.volume['id'])
+ volume_id=self.volume['id'])
@test.attr(type='negative')
@test.idempotent_id('c99a1ca9-6cdf-498d-9fdf-25832babef27')
@@ -74,7 +74,7 @@
**new_quota_set)
self.assertRaises(lib_exc.OverLimit,
self.snapshots_client.create_snapshot,
- self.volume['id'])
+ volume_id=self.volume['id'])
class VolumeSnapshotNegativeV1TestJSON(VolumeSnapshotQuotasNegativeV2TestJSON):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 2d9019a..c032d9c 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -32,7 +32,7 @@
@test.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
def test_volume_type_list(self):
- # List Volume types.
+ # List volume types.
body = self.volume_types_client.list_volume_types()['volume_types']
self.assertIsInstance(body, list)
@@ -50,7 +50,7 @@
for i in range(2):
vol_type_name = data_utils.rand_name("volume-type")
vol_type = self.volume_types_client.create_volume_type(
- vol_type_name,
+ name=vol_type_name,
extra_specs=extra_specs)['volume_type']
volume_types.append(vol_type)
self.addCleanup(self._delete_volume_type, vol_type['id'])
@@ -70,7 +70,7 @@
# Update volume with new volume_type
self.volumes_client.retype_volume(volume['id'],
- volume_type=volume_types[1]['id'])
+ new_type=volume_types[1]['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# Get volume details and Verify
@@ -97,7 +97,7 @@
extra_specs = {"storage_protocol": proto,
"vendor_name": vendor}
body = self.volume_types_client.create_volume_type(
- name,
+ name=name,
extra_specs=extra_specs)['volume_type']
self.assertIn('id', body)
self.addCleanup(self._delete_volume_type, body['id'])
@@ -125,7 +125,8 @@
provider = "LuksEncryptor"
control_location = "front-end"
name = data_utils.rand_name("volume-type")
- body = self.volume_types_client.create_volume_type(name)['volume_type']
+ body = self.volume_types_client.create_volume_type(
+ name=name)['volume_type']
self.addCleanup(self._delete_volume_type, body['id'])
# Create encryption type
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index bec803c..502cd86 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -25,7 +25,7 @@
super(VolumeTypesExtraSpecsV2Test, cls).resource_setup()
vol_type_name = data_utils.rand_name('Volume-type')
cls.volume_type = cls.volume_types_client.create_volume_type(
- vol_type_name)['volume_type']
+ name=vol_type_name)['volume_type']
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 040ef53..6483af3 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -30,7 +30,7 @@
vol_type_name = data_utils.rand_name('Volume-type')
cls.extra_specs = {"spec1": "val1"}
cls.volume_type = cls.volume_types_client.create_volume_type(
- vol_type_name,
+ name=vol_type_name,
extra_specs=cls.extra_specs)['volume_type']
@classmethod
@@ -70,7 +70,7 @@
def test_update_multiple_extra_spec(self):
# Should not update volume type extra specs with multiple specs as
# body.
- extra_spec = {"spec1": "val2", 'spec2': 'val1'}
+ extra_spec = {"spec1": "val2", "spec2": "val1"}
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.update_volume_type_extra_specs,
@@ -101,7 +101,7 @@
self.assertRaises(
lib_exc.BadRequest,
self.volume_types_client.create_volume_type_extra_specs,
- self.volume_type['id'], ['invalid'])
+ self.volume_type['id'], extra_specs=['invalid'])
@test.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
def test_delete_nonexistent_volume_type_id(self):
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 2694b63..bc32fc9 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -36,7 +36,7 @@
def test_create_with_empty_name(self):
# Should not be able to create volume type with an empty name.
self.assertRaises(lib_exc.BadRequest,
- self.volume_types_client.create_volume_type, '')
+ self.volume_types_client.create_volume_type, name='')
@test.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
def test_get_nonexistent_type_id(self):
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 6c32321..253a3e1 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -48,12 +48,12 @@
def _reset_volume_status(self, volume_id, status):
# Reset the volume status
body = self.admin_volume_client.reset_volume_status(volume_id,
- status)
+ status=status)
return body
def tearDown(self):
# Set volume's status to available after test
- self._reset_volume_status(self.volume['id'], 'available')
+ self._reset_volume_status(self.volume['id'], status='available')
super(VolumesActionsV2Test, self).tearDown()
def _create_temp_volume(self):
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index ed34a9b..4b2d3f3 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from tempest_lib import decorators
from tempest.api.volume import base
@@ -22,7 +21,6 @@
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class VolumesBackupsV2Test(base.BaseVolumeAdminTest):
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 12e6761..cc906e5 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import compute
@@ -24,8 +23,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class BaseVolumeTest(tempest.test.BaseTestCase):
"""Base test case class for all Cinder API tests."""
@@ -62,7 +59,7 @@
super(BaseVolumeTest, cls).setup_clients()
cls.servers_client = cls.os.servers_client
cls.compute_networks_client = cls.os.compute_networks_client
- cls.images_client = cls.os.images_client
+ cls.compute_images_client = cls.os.compute_images_client
if cls._api_version == 1:
cls.snapshots_client = cls.os.snapshots_client
@@ -106,14 +103,14 @@
super(BaseVolumeTest, cls).resource_cleanup()
@classmethod
- def create_volume(cls, size=None, **kwargs):
+ def create_volume(cls, **kwargs):
"""Wrapper utility that returns a test volume."""
name = data_utils.rand_name('Volume')
name_field = cls.special_fields['name_field']
kwargs[name_field] = name
- volume = cls.volumes_client.create_volume(size, **kwargs)['volume']
+ volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.volumes.append(volume)
cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
@@ -123,7 +120,7 @@
def create_snapshot(cls, volume_id=1, **kwargs):
"""Wrapper utility that returns a test snapshot."""
snapshot = cls.snapshots_client.create_snapshot(
- volume_id, **kwargs)['snapshot']
+ volume_id=volume_id, **kwargs)['snapshot']
cls.snapshots.append(snapshot)
cls.snapshots_client.wait_for_snapshot_status(snapshot['id'],
'available')
@@ -217,8 +214,8 @@
"""create a test Qos-Specs."""
name = name or data_utils.rand_name(cls.__name__ + '-QoS')
consumer = consumer or 'front-end'
- qos_specs = cls.volume_qos_client.create_qos(name, consumer,
- **kwargs)['qos_specs']
+ qos_specs = cls.volume_qos_client.create_qos(
+ name=name, consumer=consumer, **kwargs)['qos_specs']
cls.qos_specs.append(qos_specs['id'])
return qos_specs
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 2f7c3df..722a39a 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -53,7 +53,7 @@
def _create_test_volume_type(self):
vol_type_name = utils.rand_name("volume-type")
vol_type = self.volume_types_client.create_volume_type(
- vol_type_name)['volume_type']
+ name=vol_type_name)['volume_type']
self.addCleanup(self.volume_types_client.delete_volume_type,
vol_type['id'])
return vol_type
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index e50ca95..688baf5 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -45,7 +45,7 @@
def tearDown(self):
# Update the metadata to {}
- self.client.update_snapshot_metadata(self.snapshot_id, {})
+ self.client.update_snapshot_metadata(self.snapshot_id, metadata={})
super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.idempotent_id('a2f20f99-e363-4584-be97-bc33afb1a56c')
@@ -89,7 +89,7 @@
# Update metadata item
body = self.client.update_snapshot_metadata(
- self.snapshot_id, update)['metadata']
+ self.snapshot_id, metadata=update)['metadata']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
@@ -114,7 +114,7 @@
self.assertThat(body.items(), matchers.ContainsAll(metadata.items()))
# Update metadata item
body = self.client.update_snapshot_metadata_item(
- self.snapshot_id, "key3", update_item)['meta']
+ self.snapshot_id, "key3", meta=update_item)['meta']
# Get the metadata of the snapshot
body = self.client.show_snapshot_metadata(
self.snapshot_id)['metadata']
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index c0b6b7e..7046dcf 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -47,7 +47,8 @@
self.addCleanup(self._delete_volume, volume['id'])
# Create a volume transfer
- transfer = self.client.create_volume_transfer(volume['id'])['transfer']
+ transfer = self.client.create_volume_transfer(
+ volume_id=volume['id'])['transfer']
transfer_id = transfer['id']
auth_key = transfer['auth_key']
self.client.wait_for_volume_status(volume['id'],
@@ -63,8 +64,8 @@
self.assertThat(len(body), matchers.GreaterThan(0))
# Accept a volume transfer by alt_tenant
- body = self.alt_client.accept_volume_transfer(transfer_id,
- auth_key)['transfer']
+ body = self.alt_client.accept_volume_transfer(
+ transfer_id, auth_key=auth_key)['transfer']
self.alt_client.wait_for_volume_status(volume['id'], 'available')
@test.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
@@ -74,7 +75,8 @@
self.addCleanup(self._delete_volume, volume['id'])
# Create a volume transfer
- body = self.client.create_volume_transfer(volume['id'])['transfer']
+ body = self.client.create_volume_transfer(
+ volume_id=volume['id'])['transfer']
transfer_id = body['id']
self.client.wait_for_volume_status(volume['id'],
'awaiting-transfer')
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index d4636ee..5f9ea7f 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -62,8 +62,8 @@
# Volume is attached and detached successfully from an instance
mountpoint = '/dev/vdc'
self.client.attach_volume(self.volume['id'],
- self.server['id'],
- mountpoint)
+ instance_uuid=self.server['id'],
+ mountpoint=mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
self.client.detach_volume(self.volume['id'])
self.client.wait_for_volume_status(self.volume['id'], 'available')
@@ -74,7 +74,8 @@
def test_volume_bootable(self):
# Verify that a volume bootable flag is retrieved
for bool_bootable in [True, False]:
- self.client.set_bootable_volume(self.volume['id'], bool_bootable)
+ self.client.set_bootable_volume(self.volume['id'],
+ bootable=bool_bootable)
fetched_volume = self.client.show_volume(
self.volume['id'])['volume']
# Get Volume information
@@ -88,8 +89,8 @@
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
self.client.attach_volume(self.volume['id'],
- self.server['id'],
- mountpoint)
+ instance_uuid=self.server['id'],
+ mountpoint=mountpoint)
self.client.wait_for_volume_status(self.volume['id'], 'in-use')
# NOTE(gfidente): added in reverse order because functions will be
# called in reverse order to the order they are added (LIFO)
@@ -114,8 +115,8 @@
# using the Glance image_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name('Image')
body = self.client.upload_volume(
- self.volume['id'], image_name,
- CONF.volume.disk_format)['os-volume_upload_image']
+ self.volume['id'], image_name=image_name,
+ disk_format=CONF.volume.disk_format)['os-volume_upload_image']
image_id = body["image_id"]
self.addCleanup(self.image_client.delete_image, image_id)
self.image_client.wait_for_image_status(image_id, 'active')
@@ -142,7 +143,7 @@
# Update volume readonly true
readonly = True
self.client.update_volume_readonly(self.volume['id'],
- readonly)
+ readonly=readonly)
# Get Volume information
fetched_volume = self.client.show_volume(self.volume['id'])['volume']
bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
@@ -150,7 +151,8 @@
# Update volume readonly false
readonly = False
- self.client.update_volume_readonly(self.volume['id'], readonly)
+ self.client.update_volume_readonly(self.volume['id'],
+ readonly=readonly)
# Get Volume information
fetched_volume = self.client.show_volume(self.volume['id'])['volume']
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 78f5571..ed1e5c5 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -32,7 +32,7 @@
# Extend Volume Test.
self.volume = self.create_volume()
extend_size = int(self.volume['size']) + 1
- self.client.extend_volume(self.volume['id'], extend_size)
+ self.client.extend_volume(self.volume['id'], new_size=extend_size)
self.client.wait_for_volume_status(self.volume['id'], 'available')
volume = self.client.show_volume(self.volume['id'])['volume']
self.assertEqual(int(volume['size']), extend_size)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 35c8898..aa3ef2f 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -133,7 +133,8 @@
@test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
@test.services('image')
def test_volume_create_get_update_delete_from_image(self):
- image = self.images_client.show_image(CONF.compute.image_ref)['image']
+ image = self.compute_images_client.show_image(
+ CONF.compute.image_ref)['image']
min_disk = image.get('minDisk')
disk_size = max(min_disk, CONF.volume.volume_size)
self._volume_create_get_update_delete(
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index b776494..38a5a80 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -15,15 +15,12 @@
# under the License.
import operator
-from oslo_log import log as logging
from testtools import matchers
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import test
-LOG = logging.getLogger(__name__)
-
class VolumesV2ListTestJSON(base.BaseVolumeTest):
# NOTE: This test creates a number of 1G volumes. To run successfully,
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 0af40ea..ad6f556 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -190,8 +190,8 @@
self.assertRaises(lib_exc.NotFound,
self.client.attach_volume,
str(uuid.uuid4()),
- server['id'],
- self.mountpoint)
+ instance_uuid=server['id'],
+ mountpoint=self.mountpoint)
@test.attr(type=['negative'])
@test.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
@@ -206,7 +206,7 @@
# Extend volume with smaller size than original size.
extend_size = 0
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
- self.volume['id'], extend_size)
+ self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
@@ -214,7 +214,7 @@
# Extend volume when size is non number.
extend_size = 'abc'
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
- self.volume['id'], extend_size)
+ self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
@@ -222,7 +222,7 @@
# Extend volume with None size.
extend_size = None
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
- self.volume['id'], extend_size)
+ self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
@@ -230,7 +230,7 @@
# Extend volume size when volume is nonexistent.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
- str(uuid.uuid4()), extend_size)
+ str(uuid.uuid4()), new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
@@ -238,7 +238,7 @@
# Extend volume size when passing volume id is None.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
- None, extend_size)
+ None, new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 856adcc..c79235a 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -10,14 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
-LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -42,15 +39,15 @@
self.volumes_client.detach_volume(volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
- def _list_by_param_values_and_assert(self, params, with_detail=False):
+ def _list_by_param_values_and_assert(self, with_detail=False, **params):
"""list or list_details with given params and validates result."""
if with_detail:
fetched_snap_list = self.snapshots_client.list_snapshots(
- detail=True, params=params)['snapshots']
+ detail=True, **params)['snapshots']
else:
fetched_snap_list = self.snapshots_client.list_snapshots(
- params=params)['snapshots']
+ **params)['snapshots']
# Validating params of fetched snapshots
for snap in fetched_snap_list:
@@ -135,16 +132,16 @@
# Verify list snapshots by display_name filter
params = {self.name_field: snapshot[self.name_field]}
- self._list_by_param_values_and_assert(params)
+ self._list_by_param_values_and_assert(**params)
# Verify list snapshots by status filter
params = {'status': 'available'}
- self._list_by_param_values_and_assert(params)
+ self._list_by_param_values_and_assert(**params)
# Verify list snapshots by status and display name filter
params = {'status': 'available',
self.name_field: snapshot[self.name_field]}
- self._list_by_param_values_and_assert(params)
+ self._list_by_param_values_and_assert(**params)
@test.idempotent_id('220a1022-1fcd-4a74-a7bd-6b859156cda2')
def test_snapshots_list_details_with_params(self):
@@ -157,14 +154,14 @@
# Verify list snapshot details by display_name filter
params = {self.name_field: snapshot[self.name_field]}
- self._list_by_param_values_and_assert(params, with_detail=True)
+ self._list_by_param_values_and_assert(with_detail=True, **params)
# Verify list snapshot details by status filter
params = {'status': 'available'}
- self._list_by_param_values_and_assert(params, with_detail=True)
+ self._list_by_param_values_and_assert(with_detail=True, **params)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
self.name_field: snapshot[self.name_field]}
- self._list_by_param_values_and_assert(params, with_detail=True)
+ self._list_by_param_values_and_assert(with_detail=True, **params)
@test.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
def test_volume_from_snapshot(self):
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index b604360..d46c9b5 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -37,7 +37,7 @@
s_name = data_utils.rand_name('snap')
self.assertRaises(lib_exc.NotFound,
self.snapshots_client.create_snapshot,
- str(uuid.uuid4()), display_name=s_name)
+ volume_id=str(uuid.uuid4()), display_name=s_name)
@test.attr(type=['negative'])
@test.idempotent_id('bb9da53e-d335-4309-9c15-7e76fd5e4d6d')
@@ -46,7 +46,7 @@
s_name = data_utils.rand_name('snap')
self.assertRaises(lib_exc.NotFound,
self.snapshots_client.create_snapshot,
- None, display_name=s_name)
+ volume_id=None, display_name=s_name)
class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
diff --git a/tempest/api_schema/response/compute/v2_1/floating_ips.py b/tempest/api_schema/response/compute/v2_1/floating_ips.py
deleted file mode 100644
index 3551681..0000000
--- a/tempest/api_schema/response/compute/v2_1/floating_ips.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-common_floating_ip_info = {
- 'type': 'object',
- 'properties': {
- # NOTE: Now the type of 'id' is integer, but
- # here allows 'string' also because we will be
- # able to change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
- 'pool': {'type': ['string', 'null']},
- 'instance_id': {'type': ['string', 'null']},
- 'ip': parameter_types.ip_address,
- 'fixed_ip': parameter_types.ip_address
- },
- 'additionalProperties': False,
- 'required': ['id', 'pool', 'instance_id',
- 'ip', 'fixed_ip'],
-
-}
-list_floating_ips = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ips': {
- 'type': 'array',
- 'items': common_floating_ip_info
- },
- },
- 'additionalProperties': False,
- 'required': ['floating_ips'],
- }
-}
-
-create_get_floating_ip = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ip': common_floating_ip_info
- },
- 'additionalProperties': False,
- 'required': ['floating_ip'],
- }
-}
-
-list_floating_ip_pools = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ip_pools': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'name': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['name'],
- }
- }
- },
- 'additionalProperties': False,
- 'required': ['floating_ip_pools'],
- }
-}
-
-add_remove_floating_ip = {
- 'status_code': [202]
-}
-
-create_floating_ips_bulk = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ips_bulk_create': {
- 'type': 'object',
- 'properties': {
- 'interface': {'type': ['string', 'null']},
- 'ip_range': {'type': 'string'},
- 'pool': {'type': ['string', 'null']},
- },
- 'additionalProperties': False,
- 'required': ['interface', 'ip_range', 'pool'],
- }
- },
- 'additionalProperties': False,
- 'required': ['floating_ips_bulk_create'],
- }
-}
-
-delete_floating_ips_bulk = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ips_bulk_delete': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['floating_ips_bulk_delete'],
- }
-}
-
-list_floating_ips_bulk = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'floating_ip_info': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'address': parameter_types.ip_address,
- 'instance_uuid': {'type': ['string', 'null']},
- 'interface': {'type': ['string', 'null']},
- 'pool': {'type': ['string', 'null']},
- 'project_id': {'type': ['string', 'null']},
- 'fixed_ip': parameter_types.ip_address
- },
- 'additionalProperties': False,
- # NOTE: fixed_ip is introduced after JUNO release,
- # So it is not defined as 'required'.
- 'required': ['address', 'instance_uuid', 'interface',
- 'pool', 'project_id'],
- }
- }
- },
- 'additionalProperties': False,
- 'required': ['floating_ip_info'],
- }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/parameter_types.py b/tempest/api_schema/response/compute/v2_1/parameter_types.py
deleted file mode 100644
index 07cc890..0000000
--- a/tempest/api_schema/response/compute/v2_1/parameter_types.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-links = {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'href': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'rel': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['href', 'rel']
- }
-}
-
-mac_address = {
- 'type': 'string',
- 'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
-}
-
-ip_address = {
- 'oneOf': [
- {
- 'type': 'string',
- 'oneOf': [
- {'format': 'ipv4'},
- {'format': 'ipv6'}
- ]
- },
- {'type': 'null'}
- ]
-}
-
-access_ip_v4 = {
- 'type': 'string',
- 'oneOf': [{'format': 'ipv4'}, {'enum': ['']}]
-}
-
-access_ip_v6 = {
- 'type': 'string',
- 'oneOf': [{'format': 'ipv6'}, {'enum': ['']}]
-}
-
-addresses = {
- 'type': 'object',
- 'patternProperties': {
- # NOTE: Here is for 'private' or something.
- '^[a-zA-Z0-9-_.]+$': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'version': {'type': 'integer'},
- 'addr': {
- 'type': 'string',
- 'oneOf': [
- {'format': 'ipv4'},
- {'format': 'ipv6'}
- ]
- }
- },
- 'additionalProperties': False,
- 'required': ['version', 'addr']
- }
- }
- }
-}
-
-response_header = {
- 'connection': {'type': 'string'},
- 'content-length': {'type': 'string'},
- 'content-type': {'type': 'string'},
- 'status': {'type': 'string'},
- 'x-compute-request-id': {'type': 'string'},
- 'vary': {'type': 'string'},
- 'x-openstack-nova-api-version': {'type': 'string'},
- 'date': {
- 'type': 'string',
- 'format': 'data-time'
- }
-}
diff --git a/tempest/api_schema/response/compute/v2_1/security_groups.py b/tempest/api_schema/response/compute/v2_1/security_groups.py
deleted file mode 100644
index 5ed5a5c..0000000
--- a/tempest/api_schema/response/compute/v2_1/security_groups.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-common_security_group_rule = {
- 'from_port': {'type': ['integer', 'null']},
- 'to_port': {'type': ['integer', 'null']},
- 'group': {
- 'type': 'object',
- 'properties': {
- 'tenant_id': {'type': 'string'},
- 'name': {'type': 'string'}
- },
- 'additionalProperties': False,
- },
- 'ip_protocol': {'type': ['string', 'null']},
- # 'parent_group_id' can be UUID so defining it as 'string' also.
- 'parent_group_id': {'type': ['string', 'integer', 'null']},
- 'ip_range': {
- 'type': 'object',
- 'properties': {
- 'cidr': {'type': 'string'}
- },
- 'additionalProperties': False,
- # When optional argument is provided in request body
- # like 'group_id' then, attribute 'cidr' does not
- # comes in response body. So it is not 'required'.
- },
- 'id': {'type': ['string', 'integer']}
-}
-
-common_security_group = {
- 'type': 'object',
- 'properties': {
- 'id': {'type': ['integer', 'string']},
- 'name': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'rules': {
- 'type': 'array',
- 'items': {
- 'type': ['object', 'null'],
- 'properties': common_security_group_rule,
- 'additionalProperties': False,
- }
- },
- 'description': {'type': 'string'},
- },
- 'additionalProperties': False,
- 'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
-}
-
-list_security_groups = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'security_groups': {
- 'type': 'array',
- 'items': common_security_group
- }
- },
- 'additionalProperties': False,
- 'required': ['security_groups']
- }
-}
-
-get_security_group = create_security_group = update_security_group = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'security_group': common_security_group
- },
- 'additionalProperties': False,
- 'required': ['security_group']
- }
-}
-
-delete_security_group = {
- 'status_code': [202]
-}
-
-create_security_group_rule = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'security_group_rule': {
- 'type': 'object',
- 'properties': common_security_group_rule,
- 'additionalProperties': False,
- 'required': ['from_port', 'to_port', 'group', 'ip_protocol',
- 'parent_group_id', 'id', 'ip_range']
- }
- },
- 'additionalProperties': False,
- 'required': ['security_group_rule']
- }
-}
-
-delete_security_group_rule = {
- 'status_code': [202]
-}
diff --git a/tempest/api_schema/response/compute/v2_1/servers.py b/tempest/api_schema/response/compute/v2_1/servers.py
deleted file mode 100644
index 38f7c82..0000000
--- a/tempest/api_schema/response/compute/v2_1/servers.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from tempest.api_schema.response.compute.v2_1 import parameter_types
-
-create_server = {
- 'status_code': [202],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'server': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'security_groups': {'type': 'array'},
- 'links': parameter_types.links,
- 'OS-DCF:diskConfig': {'type': 'string'}
- },
- 'additionalProperties': False,
- # NOTE: OS-DCF:diskConfig & security_groups are API extension,
- # and some environments return a response without these
- # attributes.So they are not 'required'.
- 'required': ['id', 'links']
- }
- },
- 'additionalProperties': False,
- 'required': ['server']
- }
-}
-
-create_server_with_admin_pass = copy.deepcopy(create_server)
-create_server_with_admin_pass['response_body']['properties']['server'][
- 'properties'].update({'adminPass': {'type': 'string'}})
-create_server_with_admin_pass['response_body']['properties']['server'][
- 'required'].append('adminPass')
-
-list_servers = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'servers': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links,
- 'name': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['id', 'links', 'name']
- }
- },
- 'servers_links': parameter_types.links
- },
- 'additionalProperties': False,
- # NOTE(gmann): servers_links attribute is not necessary to be
- # present always So it is not 'required'.
- 'required': ['servers']
- }
-}
-
-delete_server = {
- 'status_code': [204],
-}
-
-common_show_server = {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'name': {'type': 'string'},
- 'status': {'type': 'string'},
- 'image': {'oneOf': [
- {'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'additionalProperties': False,
- 'required': ['id', 'links']},
- {'type': ['string', 'null']}
- ]},
- 'flavor': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'additionalProperties': False,
- 'required': ['id', 'links']
- },
- 'fault': {
- 'type': 'object',
- 'properties': {
- 'code': {'type': 'integer'},
- 'created': {'type': 'string'},
- 'message': {'type': 'string'},
- 'details': {'type': 'string'},
- },
- 'additionalProperties': False,
- # NOTE(gmann): 'details' is not necessary to be present
- # in the 'fault'. So it is not defined as 'required'.
- 'required': ['code', 'created', 'message']
- },
- 'user_id': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'created': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'links': parameter_types.links,
- 'addresses': parameter_types.addresses,
- 'hostId': {'type': 'string'},
- 'OS-DCF:diskConfig': {'type': 'string'},
- 'accessIPv4': parameter_types.access_ip_v4,
- 'accessIPv6': parameter_types.access_ip_v6
- },
- 'additionalProperties': False,
- # NOTE(GMann): 'progress' attribute is present in the response
- # only when server's status is one of the progress statuses
- # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
- # 'fault' attribute is present in the response
- # only when server's status is one of the "ERROR", "DELETED".
- # OS-DCF:diskConfig and accessIPv4/v6 are API
- # extensions, and some environments return a response
- # without these attributes.So these are not defined as 'required'.
- 'required': ['id', 'name', 'status', 'image', 'flavor',
- 'user_id', 'tenant_id', 'created', 'updated',
- 'metadata', 'links', 'addresses', 'hostId']
-}
-
-update_server = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'server': common_show_server
- },
- 'additionalProperties': False,
- 'required': ['server']
- }
-}
-
-server_detail = copy.deepcopy(common_show_server)
-server_detail['properties'].update({
- 'key_name': {'type': ['string', 'null']},
- 'security_groups': {'type': 'array'},
-
- # NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
- # attributes.
- 'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
- 'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
- 'OS-EXT-AZ:availability_zone': {'type': 'string'},
-
- # NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
- # attributes.
- 'OS-EXT-STS:task_state': {'type': ['string', 'null']},
- 'OS-EXT-STS:vm_state': {'type': 'string'},
- 'OS-EXT-STS:power_state': {'type': 'integer'},
- 'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
- 'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
- 'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
- 'os-extended-volumes:volumes_attached': {'type': 'array'},
- 'config_drive': {'type': 'string'}
-})
-server_detail['properties']['addresses']['patternProperties'][
- '^[a-zA-Z0-9-_.]+$']['items']['properties'].update({
- 'OS-EXT-IPS:type': {'type': 'string'},
- 'OS-EXT-IPS-MAC:mac_addr': parameter_types.mac_address})
-# NOTE(gmann): Update OS-EXT-IPS:type and OS-EXT-IPS-MAC:mac_addr
-# attributes in server address. Those are API extension,
-# and some environments return a response without
-# these attributes. So they are not 'required'.
-
-get_server = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'server': server_detail
- },
- 'additionalProperties': False,
- 'required': ['server']
- }
-}
-
-list_servers_detail = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'servers': {
- 'type': 'array',
- 'items': server_detail
- },
- 'servers_links': parameter_types.links
- },
- 'additionalProperties': False,
- # NOTE(gmann): servers_links attribute is not necessary to be
- # present always So it is not 'required'.
- 'required': ['servers']
- }
-}
-
-rebuild_server = copy.deepcopy(update_server)
-rebuild_server['status_code'] = [202]
-
-rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
-rebuild_server_with_admin_pass['response_body']['properties']['server'][
- 'properties'].update({'adminPass': {'type': 'string'}})
-rebuild_server_with_admin_pass['response_body']['properties']['server'][
- 'required'].append('adminPass')
-
-rescue_server = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'adminPass': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['adminPass']
- }
-}
-
-list_virtual_interfaces = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'virtual_interfaces': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'mac_address': parameter_types.mac_address,
- 'OS-EXT-VIF-NET:net_id': {'type': 'string'}
- },
- 'additionalProperties': False,
- # 'OS-EXT-VIF-NET:net_id' is API extension So it is
- # not defined as 'required'
- 'required': ['id', 'mac_address']
- }
- }
- },
- 'additionalProperties': False,
- 'required': ['virtual_interfaces']
- }
-}
-
-common_attach_volume_info = {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'device': {'type': 'string'},
- 'volumeId': {'type': 'string'},
- 'serverId': {'type': ['integer', 'string']}
- },
- 'additionalProperties': False,
- 'required': ['id', 'device', 'volumeId', 'serverId']
-}
-
-attach_volume = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'volumeAttachment': common_attach_volume_info
- },
- 'additionalProperties': False,
- 'required': ['volumeAttachment']
- }
-}
-
-detach_volume = {
- 'status_code': [202]
-}
-
-show_volume_attachment = copy.deepcopy(attach_volume)
-show_volume_attachment['response_body']['properties'][
- 'volumeAttachment']['properties'].update({'serverId': {'type': 'string'}})
-
-list_volume_attachments = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'volumeAttachments': {
- 'type': 'array',
- 'items': common_attach_volume_info
- }
- },
- 'additionalProperties': False,
- 'required': ['volumeAttachments']
- }
-}
-list_volume_attachments['response_body']['properties'][
- 'volumeAttachments']['items']['properties'].update(
- {'serverId': {'type': 'string'}})
-
-list_addresses_by_network = {
- 'status_code': [200],
- 'response_body': parameter_types.addresses
-}
-
-list_addresses = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'addresses': parameter_types.addresses
- },
- 'additionalProperties': False,
- 'required': ['addresses']
- }
-}
-
-common_server_group = {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'name': {'type': 'string'},
- 'policies': {
- 'type': 'array',
- 'items': {'type': 'string'}
- },
- # 'members' attribute contains the array of instance's UUID of
- # instances present in server group
- 'members': {
- 'type': 'array',
- 'items': {'type': 'string'}
- },
- 'metadata': {'type': 'object'}
- },
- 'additionalProperties': False,
- 'required': ['id', 'name', 'policies', 'members', 'metadata']
-}
-
-create_show_server_group = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'server_group': common_server_group
- },
- 'additionalProperties': False,
- 'required': ['server_group']
- }
-}
-
-delete_server_group = {
- 'status_code': [204]
-}
-
-list_server_groups = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'server_groups': {
- 'type': 'array',
- 'items': common_server_group
- }
- },
- 'additionalProperties': False,
- 'required': ['server_groups']
- }
-}
-
-instance_actions = {
- 'type': 'object',
- 'properties': {
- 'action': {'type': 'string'},
- 'request_id': {'type': 'string'},
- 'user_id': {'type': 'string'},
- 'project_id': {'type': 'string'},
- 'start_time': {'type': 'string'},
- 'message': {'type': ['string', 'null']},
- 'instance_uuid': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['action', 'request_id', 'user_id', 'project_id',
- 'start_time', 'message', 'instance_uuid']
-}
-
-instance_action_events = {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'event': {'type': 'string'},
- 'start_time': {'type': 'string'},
- 'finish_time': {'type': 'string'},
- 'result': {'type': 'string'},
- 'traceback': {'type': ['string', 'null']}
- },
- 'additionalProperties': False,
- 'required': ['event', 'start_time', 'finish_time', 'result',
- 'traceback']
- }
-}
-
-list_instance_actions = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'instanceActions': {
- 'type': 'array',
- 'items': instance_actions
- }
- },
- 'additionalProperties': False,
- 'required': ['instanceActions']
- }
-}
-
-instance_actions_with_events = copy.deepcopy(instance_actions)
-instance_actions_with_events['properties'].update({
- 'events': instance_action_events})
-# 'events' does not come in response body always so it is not
-# defined as 'required'
-
-show_instance_action = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'instanceAction': instance_actions_with_events
- },
- 'additionalProperties': False,
- 'required': ['instanceAction']
- }
-}
-
-show_password = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'password': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['password']
- }
-}
-
-get_vnc_console = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'console': {
- 'type': 'object',
- 'properties': {
- 'type': {'type': 'string'},
- 'url': {
- 'type': 'string',
- 'format': 'uri'
- }
- },
- 'additionalProperties': False,
- 'required': ['type', 'url']
- }
- },
- 'additionalProperties': False,
- 'required': ['console']
- }
-}
-
-get_console_output = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'output': {'type': 'string'}
- },
- 'additionalProperties': False,
- 'required': ['output']
- }
-}
-
-set_server_metadata = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'metadata': {
- 'type': 'object',
- 'patternProperties': {
- '^.+$': {'type': 'string'}
- }
- }
- },
- 'additionalProperties': False,
- 'required': ['metadata']
- }
-}
-
-list_server_metadata = copy.deepcopy(set_server_metadata)
-
-update_server_metadata = copy.deepcopy(set_server_metadata)
-
-delete_server_metadata_item = {
- 'status_code': [204]
-}
-
-set_show_server_metadata_item = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'meta': {
- 'type': 'object',
- 'patternProperties': {
- '^.+$': {'type': 'string'}
- }
- }
- },
- 'additionalProperties': False,
- 'required': ['meta']
- }
-}
-
-server_actions_common_schema = {
- 'status_code': [202]
-}
-
-server_actions_delete_password = {
- 'status_code': [204]
-}
-
-server_actions_confirm_resize = copy.deepcopy(
- server_actions_delete_password)
diff --git a/tempest/thirdparty/__init__.py b/tempest/api_schema/response/compute/v2_2/__init__.py
similarity index 100%
rename from tempest/thirdparty/__init__.py
rename to tempest/api_schema/response/compute/v2_2/__init__.py
diff --git a/tempest/api_schema/response/compute/v2_2/keypairs.py b/tempest/api_schema/response/compute/v2_2/keypairs.py
new file mode 100644
index 0000000..5d8d24d
--- /dev/null
+++ b/tempest/api_schema/response/compute/v2_2/keypairs.py
@@ -0,0 +1,41 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.response.compute.v2_1 import keypairs
+
+get_keypair = copy.deepcopy(keypairs.get_keypair)
+get_keypair['response_body']['properties']['keypair'][
+ 'properties'].update({'type': {'type': 'string'}})
+get_keypair['response_body']['properties']['keypair'][
+ 'required'].append('type')
+
+create_keypair = copy.deepcopy(keypairs.create_keypair)
+create_keypair['status_code'] = [201]
+create_keypair['response_body']['properties']['keypair'][
+ 'properties'].update({'type': {'type': 'string'}})
+create_keypair['response_body']['properties']['keypair'][
+ 'required'].append('type')
+
+delete_keypair = {
+ 'status_code': [204],
+}
+
+list_keypairs = copy.deepcopy(keypairs.list_keypairs)
+list_keypairs['response_body']['properties']['keypairs'][
+ 'items']['properties']['keypair'][
+ 'properties'].update({'type': {'type': 'string'}})
+list_keypairs['response_body']['properties']['keypairs'][
+ 'items']['properties']['keypair']['required'].append('type')
diff --git a/tempest/clients.py b/tempest/clients.py
index 74f8684..9135891 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -32,6 +32,8 @@
FloatingIPPoolsClient
from tempest_lib.services.compute.floating_ips_bulk_client import \
FloatingIPsBulkClient
+from tempest_lib.services.compute.floating_ips_client import \
+ FloatingIPsClient as ComputeFloatingIPsClient
from tempest_lib.services.compute.hosts_client import HostsClient
from tempest_lib.services.compute.hypervisor_client import \
HypervisorClient
@@ -49,8 +51,13 @@
from tempest_lib.services.compute.quotas_client import QuotasClient
from tempest_lib.services.compute.security_group_default_rules_client import \
SecurityGroupDefaultRulesClient
+from tempest_lib.services.compute.security_group_rules_client import \
+ SecurityGroupRulesClient as ComputeSecurityGroupRulesClient
from tempest_lib.services.compute.security_groups_client import \
SecurityGroupsClient as ComputeSecurityGroupsClient
+from tempest_lib.services.compute.server_groups_client import \
+ ServerGroupsClient
+from tempest_lib.services.compute.servers_client import ServersClient
from tempest_lib.services.compute.services_client import ServicesClient
from tempest_lib.services.compute.snapshots_client import \
SnapshotsClient as ComputeSnapshotsClient
@@ -63,6 +70,14 @@
VolumesClient as ComputeVolumesClient
from tempest_lib.services.identity.v2.token_client import TokenClient
from tempest_lib.services.identity.v3.token_client import V3TokenClient
+from tempest_lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest_lib.services.network.metering_label_rules_client import \
+ MeteringLabelRulesClient
+from tempest_lib.services.network.metering_labels_client import \
+ MeteringLabelsClient
+from tempest_lib.services.network.networks_client import NetworksClient
+from tempest_lib.services.network.ports_client import PortsClient
+from tempest_lib.services.network.subnets_client import SubnetsClient
from tempest.common import negative_rest_client
from tempest import config
@@ -70,15 +85,7 @@
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClient
-from tempest.services import botoclients
-from tempest.services.compute.json.floating_ips_client import \
- FloatingIPsClient as ComputeFloatingIPsClient
from tempest.services.compute.json.keypairs_client import KeyPairsClient
-from tempest.services.compute.json.security_group_rules_client import \
- SecurityGroupRulesClient
-from tempest.services.compute.json.server_groups_client import \
- ServerGroupsClient
-from tempest.services.compute.json.servers_client import ServersClient
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
@@ -89,8 +96,14 @@
DatabaseVersionsClient
from tempest.services.identity.v2.json.identity_client import \
IdentityClient
+from tempest.services.identity.v2.json.roles_client import \
+ RolesClient
+from tempest.services.identity.v2.json.services_client import \
+ ServicesClient as ServicesV2Client
from tempest.services.identity.v2.json.tenants_client import \
TenantsClient
+from tempest.services.identity.v2.json.users_client import \
+ UsersClient
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClient as CredentialsV3Client
from tempest.services.identity.v3.json.endpoints_client import \
@@ -98,27 +111,28 @@
from tempest.services.identity.v3.json.groups_client import \
GroupsClient as GroupsV3Client
from tempest.services.identity.v3.json.identity_client import IdentityV3Client
-from tempest.services.identity.v3.json.policy_client import \
- PolicyClient as PolicyV3Client
-from tempest.services.identity.v3.json.region_client import \
- RegionClient as RegionV3Client
-from tempest.services.identity.v3.json.service_client import \
- ServiceClient as ServiceV3Client
+from tempest.services.identity.v3.json.policies_client import \
+ PoliciesClient as PoliciesV3Client
+from tempest.services.identity.v3.json.regions_client import \
+ RegionsClient as RegionsV3Client
+from tempest.services.identity.v3.json.services_client import \
+ ServicesClient as IdentityServicesV3Client
from tempest.services.image.v1.json.images_client import ImagesClient
from tempest.services.image.v2.json.images_client import ImagesClientV2
from tempest.services.messaging.json.messaging_client import \
MessagingClient
-from tempest.services.network.json.floating_ips_client import FloatingIPsClient
-from tempest.services.network.json.metering_label_rules_client import \
- MeteringLabelRulesClient
-from tempest.services.network.json.metering_labels_client import \
- MeteringLabelsClient
+from tempest.services.network.json.agents_client import AgentsClient \
+ as NetworkAgentsClient
+from tempest.services.network.json.extensions_client import \
+ ExtensionsClient as NetworkExtensionsClient
from tempest.services.network.json.network_client import NetworkClient
-from tempest.services.network.json.networks_client import NetworksClient
-from tempest.services.network.json.ports_client import PortsClient
from tempest.services.network.json.quotas_client import QuotasClient \
as NetworkQuotasClient
-from tempest.services.network.json.subnets_client import SubnetsClient
+from tempest.services.network.json.security_group_rules_client import \
+ SecurityGroupRulesClient
+from tempest.services.network.json.security_groups_client import \
+ SecurityGroupsClient
+from tempest.services.network.json.subnetpools_client import SubnetpoolsClient
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
@@ -186,9 +200,30 @@
}
default_params_with_timeout_values.update(default_params)
- def __init__(self, credentials, service=None):
- super(Manager, self).__init__(credentials=credentials)
+ def __init__(self, credentials, service=None, api_microversions=None):
+ """Initialization of Manager class.
+ Setup all services clients and make them available for tests cases.
+ :param credentials: type Credentials or TestResources
+ :param service: Service name
+ :param api_microversions: This is dict of services catalog type
+ and their microversion which will be set on respective
+ services clients.
+ {<service catalog type>: request_microversion}
+ Example :
+ {'compute': request_microversion}
+ - request_microversion will be set on all compute
+ service clients.
+ OR
+ {'compute': request_microversion,
+ 'volume': request_microversion}
+ - request_microversion of compute will be set on all
+ compute service clients.
+ - request_microversion of volume will be set on all
+ volume service clients.
+ """
+ super(Manager, self).__init__(credentials=credentials)
+ self.api_microversions = api_microversions or {}
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
@@ -201,6 +236,22 @@
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
+ self.network_agents_client = NetworkAgentsClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
+ self.network_extensions_client = NetworkExtensionsClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
self.network_client = NetworkClient(
self.auth_provider,
CONF.network.catalog_type,
@@ -217,6 +268,14 @@
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
+ self.subnetpools_client = SubnetpoolsClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
self.subnets_client = SubnetsClient(
self.auth_provider,
CONF.network.catalog_type,
@@ -265,6 +324,22 @@
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
+ self.security_group_rules_client = SecurityGroupRulesClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
+ self.security_groups_client = SecurityGroupsClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
self.messaging_client = MessagingClient(
self.auth_provider,
CONF.messaging.catalog_type,
@@ -318,14 +393,7 @@
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
- # Generating EC2 credentials in tempest is only supported
- # with identity v2
- if CONF.identity_feature_enabled.api_v2 and \
- CONF.identity.auth_version == 'v2':
- # EC2 and S3 clients, if used, will check configured AWS
- # credentials and generate new ones if needed
- self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
- self.s3_client = botoclients.ObjectClientS3(self.identity_client)
+ self._set_api_microversions()
def _set_compute_clients(self):
params = {
@@ -354,7 +422,8 @@
self.server_groups_client = ServerGroupsClient(
self.auth_provider, **params)
self.limits_client = LimitsClient(self.auth_provider, **params)
- self.images_client = ComputeImagesClient(self.auth_provider, **params)
+ self.compute_images_client = ComputeImagesClient(self.auth_provider,
+ **params)
self.keypairs_client = KeyPairsClient(self.auth_provider, **params)
self.quotas_client = QuotasClient(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClient(self.auth_provider,
@@ -368,8 +437,8 @@
self.auth_provider, **params)
self.compute_floating_ips_client = ComputeFloatingIPsClient(
self.auth_provider, **params)
- self.security_group_rules_client = SecurityGroupRulesClient(
- self.auth_provider, **params)
+ self.compute_security_group_rules_client = \
+ ComputeSecurityGroupRulesClient(self.auth_provider, **params)
self.compute_security_groups_client = ComputeSecurityGroupsClient(
self.auth_provider, **params)
self.interfaces_client = InterfacesClient(self.auth_provider,
@@ -437,6 +506,12 @@
**params_v2_admin)
self.tenants_client = TenantsClient(self.auth_provider,
**params_v2_admin)
+ self.roles_client = RolesClient(self.auth_provider,
+ **params_v2_admin)
+ self.users_client = UsersClient(self.auth_provider,
+ **params_v2_admin)
+ self.services_v2_client = ServicesV2Client(self.auth_provider,
+ **params_v2_admin)
params_v2_public = params.copy()
params_v2_public['endpoint_type'] = (
CONF.identity.v2_public_endpoint_type)
@@ -445,6 +520,10 @@
**params_v2_public)
self.tenants_public_client = TenantsClient(self.auth_provider,
**params_v2_public)
+ self.roles_public_client = RolesClient(self.auth_provider,
+ **params_v2_public)
+ self.users_public_client = UsersClient(self.auth_provider,
+ **params_v2_public)
params_v3 = params.copy()
params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
# Clients below use the endpoint type of Keystone API v3
@@ -452,9 +531,11 @@
**params_v3)
self.endpoints_client = EndPointV3Client(self.auth_provider,
**params_v3)
- self.service_client = ServiceV3Client(self.auth_provider, **params_v3)
- self.policy_client = PolicyV3Client(self.auth_provider, **params_v3)
- self.region_client = RegionV3Client(self.auth_provider, **params_v3)
+ self.identity_services_client = IdentityServicesV3Client(
+ self.auth_provider, **params_v3)
+ self.policies_client = PoliciesV3Client(self.auth_provider,
+ **params_v3)
+ self.regions_client = RegionsV3Client(self.auth_provider, **params_v3)
self.credentials_client = CredentialsV3Client(self.auth_provider,
**params_v3)
self.groups_client = GroupsV3Client(self.auth_provider, **params_v3)
@@ -539,3 +620,15 @@
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
+
+ def _set_api_microversions(self):
+ service_clients = [x for x in self.__dict__ if x.endswith('_client')]
+ for client in service_clients:
+ client_obj = getattr(self, client)
+ microversion = self.api_microversions.get(client_obj.service)
+ if microversion:
+ if hasattr(client_obj, 'set_api_microversion'):
+ client_obj.set_api_microversion(microversion)
+ else:
+ LOG.debug("Need to implement set_api_microversion on %s"
+ % client)
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 64c9b00..ddfc75d 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -85,24 +85,33 @@
import argparse
import netaddr
import os
+import traceback
+from cliff import command
from oslo_log import log as logging
+import tempest_lib.auth
+from tempest_lib.common.utils import data_utils
+import tempest_lib.exceptions
+from tempest_lib.services.network import networks_client
+from tempest_lib.services.network import subnets_client
import yaml
from tempest.common import identity
from tempest import config
from tempest import exceptions as exc
from tempest.services.identity.v2.json import identity_client
+from tempest.services.identity.v2.json import roles_client
from tempest.services.identity.v2.json import tenants_client
+from tempest.services.identity.v2.json import users_client
from tempest.services.network.json import network_client
-from tempest.services.network.json import networks_client
-from tempest.services.network.json import subnets_client
-import tempest_lib.auth
-from tempest_lib.common.utils import data_utils
-import tempest_lib.exceptions
LOG = None
CONF = config.CONF
+DESCRIPTION = ('Create accounts.yaml file for concurrent test runs.%s'
+ 'One primary user, one alt user, '
+ 'one swift admin, one stack owner '
+ 'and one admin (optionally) will be created '
+ 'for each concurrent thread.' % os.linesep)
def setup_logging():
@@ -146,6 +155,20 @@
endpoint_type='adminURL',
**params
)
+ roles_admin = roles_client.RolesClient(
+ _auth,
+ CONF.identity.catalog_type,
+ CONF.identity.region,
+ endpoint_type='adminURL',
+ **params
+ )
+ users_admin = users_client.UsersClient(
+ _auth,
+ CONF.identity.catalog_type,
+ CONF.identity.region,
+ endpoint_type='adminURL',
+ **params
+ )
network_admin = None
networks_admin = None
subnets_admin = None
@@ -171,14 +194,15 @@
CONF.network.region or CONF.identity.region,
endpoint_type='adminURL',
**params)
- return (identity_admin, tenants_admin, neutron_iso_networks, network_admin,
- networks_admin, subnets_admin)
+ return (identity_admin, tenants_admin, roles_admin, users_admin,
+ neutron_iso_networks, network_admin, networks_admin, subnets_admin)
def create_resources(opts, resources):
- (identity_admin, tenants_admin, neutron_iso_networks,
- network_admin, networks_admin, subnets_admin) = get_admin_clients(opts)
- roles = identity_admin.list_roles()['roles']
+ (identity_admin, tenants_admin, roles_admin, users_admin,
+ neutron_iso_networks, network_admin, networks_admin,
+ subnets_admin) = get_admin_clients(opts)
+ roles = roles_admin.list_roles()['roles']
for u in resources['users']:
u['role_ids'] = []
for r in u.get('roles', ()):
@@ -193,7 +217,8 @@
if tenant not in existing:
tenants_admin.create_tenant(tenant)
else:
- LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+ LOG.warning("Tenant '%s' already exists in this environment"
+ % tenant)
LOG.info('Tenants created')
for u in resources['users']:
try:
@@ -206,14 +231,14 @@
identity.get_user_by_username(tenants_admin,
tenant['id'], u['name'])
except tempest_lib.exceptions.NotFound:
- identity_admin.create_user(
+ users_admin.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
break
else:
- LOG.warn("User '%s' already exists in this environment. "
- "New name generated" % u['name'])
+ LOG.warning("User '%s' already exists in this environment. "
+ "New name generated" % u['name'])
u['name'] = random_user_name(opts.tag, u['prefix'])
LOG.info('Users created')
@@ -240,7 +265,7 @@
continue
for r in u['role_ids']:
try:
- identity_admin.assign_user_role(tenant['id'], user['id'], r)
+ roles_admin.assign_user_role(tenant['id'], user['id'], r)
except tempest_lib.exceptions.Conflict:
# don't care if it's already assigned
pass
@@ -349,7 +374,7 @@
resources['users'].append({
'tenant': tenant,
'name': user,
- 'pass': data_utils.rand_name(),
+ 'pass': data_utils.rand_password(),
'prefix': user_group['prefix'],
'roles': user_group['roles']
})
@@ -365,7 +390,7 @@
'password': user['pass'],
'roles': user['roles']
}
- if 'network' or 'router' in user:
+ if 'network' in user or 'router' in user:
account['resources'] = {}
if 'network' in user:
account['resources']['network'] = user['network']
@@ -379,20 +404,7 @@
LOG.info('%s generated successfully!' % opts.accounts)
-def get_options():
- usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
- 'To see help on specific argument, do:\n'
- 'tempest-account-generator <ARG> -h')
- parser = argparse.ArgumentParser(
- description='Create accounts.yaml file for concurrent test runs. '
- 'One primary user, one alt user, '
- 'one swift admin, one stack owner '
- 'and one admin (optionally) will be created '
- 'for each concurrent thread.',
- formatter_class=argparse.ArgumentDefaultsHelpFormatter,
- usage=usage_string
- )
-
+def _parser_add_args(parser):
parser.add_argument('-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to tempest config file')
@@ -429,16 +441,50 @@
metavar='accounts_file.yaml',
help='Output accounts yaml file')
+
+def get_options():
+ usage_string = ('tempest-account-generator [-h] <ARG> ...\n\n'
+ 'To see help on specific argument, do:\n'
+ 'tempest-account-generator <ARG> -h')
+ parser = argparse.ArgumentParser(
+ description=DESCRIPTION,
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ usage=usage_string
+ )
+
+ _parser_add_args(parser)
opts = parser.parse_args()
- if opts.config_file:
- config.CONF.set_config_path(opts.config_file)
return opts
+class TempestAccountGenerator(command.Command):
+
+ def get_parser(self, prog_name):
+ parser = super(TempestAccountGenerator, self).get_parser(prog_name)
+ _parser_add_args(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ try:
+ return main(parsed_args)
+ except Exception:
+ LOG.exception("Failure generating test accounts.")
+ traceback.print_exc()
+ raise
+ return 0
+
+ def get_description(self):
+ return DESCRIPTION
+
+
def main(opts=None):
- if not opts:
- opts = get_options()
setup_logging()
+ if not opts:
+ LOG.warn("Use of: 'tempest-account-generator' is deprecated, "
+ "please use: 'tempest account-generator'")
+ opts = get_options()
+ if opts.config_file:
+ config.CONF.set_config_path(opts.config_file)
resources = generate_resources(opts)
create_resources(opts, resources)
dump_accounts(opts, resources)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 1c8ddcb..7b73a61 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -51,6 +51,7 @@
Please run with **--help** to see full list of options.
"""
import sys
+import traceback
from cliff import command
from oslo_log import log as logging
@@ -74,6 +75,16 @@
super(TempestCleanup, self).__init__(app, cmd)
def take_action(self, parsed_args):
+ try:
+ self.init(parsed_args)
+ self._cleanup()
+ except Exception:
+ LOG.exception("Failure during cleanup")
+ traceback.print_exc()
+ raise
+ return 0
+
+ def init(self, parsed_args):
cleanup_service.init_conf()
self.options = parsed_args
self.admin_mgr = credentials.AdminManager()
@@ -96,10 +107,9 @@
return
self._load_json()
- self._cleanup()
def _cleanup(self):
- LOG.debug("Begin cleanup")
+ print ("Begin cleanup")
is_dry_run = self.options.dry_run
is_preserve = not self.options.delete_tempest_conf_objects
is_save_state = False
@@ -118,7 +128,7 @@
'is_save_state': is_save_state}
tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
tenants = tenant_service.list()
- LOG.debug("Process %s tenants" % len(tenants))
+ print ("Process %s tenants" % len(tenants))
# Loop through list of tenants and clean them up.
for tenant in tenants:
@@ -149,7 +159,7 @@
self._remove_admin_role(tenant_id)
def _clean_tenant(self, tenant):
- LOG.debug("Cleaning tenant: %s " % tenant['name'])
+ print ("Cleaning tenant: %s " % tenant['name'])
is_dry_run = self.options.dry_run
dry_run_data = self.dry_run_data
is_preserve = not self.options.delete_tempest_conf_objects
@@ -176,17 +186,18 @@
svc.run()
def _init_admin_ids(self):
- id_cl = self.admin_mgr.identity_client
+ tn_cl = self.admin_mgr.tenants_client
+ rl_cl = self.admin_mgr.roles_client
- tenant = identity.get_tenant_by_name(id_cl,
+ tenant = identity.get_tenant_by_name(tn_cl,
CONF.auth.admin_tenant_name)
self.admin_tenant_id = tenant['id']
- user = identity.get_user_by_username(id_cl, self.admin_tenant_id,
+ user = identity.get_user_by_username(tn_cl, self.admin_tenant_id,
CONF.auth.admin_username)
self.admin_id = user['id']
- roles = id_cl.list_roles()['roles']
+ roles = rl_cl.list_roles()['roles']
for role in roles:
if role['name'] == CONF.identity.admin_role:
self.admin_role_id = role['id']
@@ -221,8 +232,9 @@
def _add_admin(self, tenant_id):
id_cl = self.admin_mgr.identity_client
+ rl_cl = self.admin_mgr.roles_client
needs_role = True
- roles = id_cl.list_user_roles(tenant_id, self.admin_id)['roles']
+ roles = rl_cl.list_user_roles(tenant_id, self.admin_id)['roles']
for role in roles:
if role['id'] == self.admin_role_id:
needs_role = False
@@ -247,9 +259,9 @@
"exists, exception: %s" % ex)
def _tenant_exists(self, tenant_id):
- id_cl = self.admin_mgr.identity_client
+ tn_cl = self.admin_mgr.tenants_client
try:
- t = id_cl.show_tenant(tenant_id)
+ t = tn_cl.show_tenant(tenant_id)
LOG.debug("Tenant is: %s" % str(t))
return True
except Exception as ex:
@@ -257,7 +269,7 @@
return False
def _init_state(self):
- LOG.debug("Initializing saved state.")
+ print ("Initializing saved state.")
data = {}
admin_mgr = self.admin_mgr
kwargs = {'data': data,
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index b5b78c6..8a47406 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -389,6 +389,7 @@
self.floating_ips_client = manager.floating_ips_client
self.metering_labels_client = manager.metering_labels_client
self.metering_label_rules_client = manager.metering_label_rules_client
+ self.security_groups_client = manager.security_groups_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
@@ -654,7 +655,7 @@
class NetworkSecGroupService(NetworkService):
def list(self):
- client = self.client
+ client = self.security_groups_client
filter = self.tenant_filter
# cannot delete default sec group so never show it.
secgroups = [secgroup for secgroup in
@@ -774,7 +775,7 @@
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
- self.client = manager.images_client
+ self.client = manager.compute_images_client
def list(self):
client = self.client
@@ -814,11 +815,14 @@
self.client = manager.identity_client
-class UserService(IdentityService):
+class UserService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(UserService, self).__init__(kwargs)
+ self.client = manager.users_client
def list(self):
- client = self.client
- users = client.list_users()['users']
+ users = self.client.list_users()['users']
if not self.is_save_state:
users = [user for user in users if user['id']
@@ -836,11 +840,10 @@
return users
def delete(self):
- client = self.client
users = self.list()
for user in users:
try:
- client.delete_user(user['id'])
+ self.client.delete_user(user['id'])
except Exception:
LOG.exception("Delete User exception.")
@@ -855,12 +858,15 @@
self.data['users'][user['id']] = user['name']
-class RoleService(IdentityService):
+class RoleService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(RoleService, self).__init__(kwargs)
+ self.client = manager.roles_client
def list(self):
- client = self.client
try:
- roles = client.list_roles()['roles']
+ roles = self.client.list_roles()['roles']
# reconcile roles with saved state and never list admin role
if not self.is_save_state:
roles = [role for role in roles if
@@ -874,11 +880,10 @@
return []
def delete(self):
- client = self.client
roles = self.list()
for role in roles:
try:
- client.delete_role(role['id'])
+ self.client.delete_role(role['id'])
except Exception:
LOG.exception("Delete Role exception.")
@@ -893,7 +898,7 @@
self.data['roles'][role['id']] = role['name']
-class TenantService(IdentityService):
+class TenantService(BaseService):
def __init__(self, manager, **kwargs):
super(TenantService, self).__init__(kwargs)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 826d8e8..8012ad7 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -118,20 +118,22 @@
from tempest_lib import auth
from tempest_lib import exceptions as lib_exc
from tempest_lib.services.compute import flavors_client
+from tempest_lib.services.compute import floating_ips_client
+from tempest_lib.services.compute import security_group_rules_client
from tempest_lib.services.compute import security_groups_client
+from tempest_lib.services.compute import servers_client
+from tempest_lib.services.network import subnets_client
import yaml
from tempest.common import identity
from tempest.common import waiters
from tempest import config
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import security_group_rules_client
-from tempest.services.compute.json import servers_client
from tempest.services.identity.v2.json import identity_client
+from tempest.services.identity.v2.json import roles_client
from tempest.services.identity.v2.json import tenants_client
+from tempest.services.identity.v2.json import users_client
from tempest.services.image.v2.json import images_client
from tempest.services.network.json import network_client
-from tempest.services.network.json import subnets_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import alarming_client
@@ -206,6 +208,18 @@
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
+ self.roles = roles_client.RolesClient(
+ _auth,
+ CONF.identity.catalog_type,
+ CONF.identity.region,
+ endpoint_type='adminURL',
+ **default_params_with_timeout_values)
+ self.users = users_client.UsersClient(
+ _auth,
+ CONF.identity.catalog_type,
+ CONF.identity.region,
+ endpoint_type='adminURL',
+ **default_params_with_timeout_values)
self.servers = servers_client.ServersClient(_auth,
**compute_params)
self.flavors = flavors_client.FlavorsClient(_auth,
@@ -304,7 +318,8 @@
if tenant not in existing:
admin.tenants.create_tenant(tenant)['tenant']
else:
- LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+ LOG.warning("Tenant '%s' already exists in this environment"
+ % tenant)
def destroy_tenants(tenants):
@@ -339,11 +354,11 @@
def _assign_swift_role(user, swift_role):
admin = keystone_admin()
- roles = admin.identity.list_roles()
+ roles = admin.roles.list_roles()
role = next(r for r in roles if r['name'] == swift_role)
LOG.debug(USERS[user])
try:
- admin.identity.assign_user_role(
+ admin.roles.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
@@ -367,12 +382,12 @@
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
- identity.get_user_by_username(admin.identity,
+ identity.get_user_by_username(admin.tenants,
tenant['id'], u['name'])
- LOG.warn("User '%s' already exists in this environment"
- % u['name'])
+ LOG.warning("User '%s' already exists in this environment"
+ % u['name'])
except lib_exc.NotFound:
- admin.identity.create_user(
+ admin.users.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
@@ -383,9 +398,9 @@
for user in users:
tenant_id = identity.get_tenant_by_name(admin.tenants,
user['tenant'])['id']
- user_id = identity.get_user_by_username(admin.identity,
+ user_id = identity.get_user_by_username(admin.tenants,
tenant_id, user['name'])['id']
- admin.identity.delete_user(user_id)
+ admin.users.delete_user(user_id)
def collect_users(users):
@@ -396,7 +411,7 @@
tenant = identity.get_tenant_by_name(admin.tenants, u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
- body = identity.get_user_by_username(admin.identity,
+ body = identity.get_user_by_username(admin.tenants,
tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
@@ -450,7 +465,7 @@
LOG.info("checking users")
for name, user in six.iteritems(self.users):
client = keystone_admin()
- found = client.identity.show_user(user['id'])['user']
+ found = client.users.show_user(user['id'])['user']
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
@@ -492,24 +507,26 @@
for network_name, body in found['addresses'].items():
for addr in body:
ip = addr['addr']
- # If floatingip_for_ssh is at True, it's assumed
- # you want to use the floating IP to reach the server,
- # fallback to fixed IP, then other type.
+ # Use floating IP, fixed IP or other type to
+ # reach the server.
# This is useful in multi-node environment.
- if CONF.compute.use_floatingip_for_ssh:
+ if CONF.validation.connect_method == 'floating':
if addr.get('OS-EXT-IPS:type',
'floating') == 'floating':
self._ping_ip(ip, 60)
_floating_is_alive = True
- elif addr.get('OS-EXT-IPS:type', 'fixed') == 'fixed':
- namespace = _get_router_namespace(client,
- network_name)
- self._ping_ip(ip, 60, namespace)
+ elif CONF.validation.connect_method == 'fixed':
+ if addr.get('OS-EXT-IPS:type',
+ 'fixed') == 'fixed':
+ namespace = _get_router_namespace(client,
+ network_name)
+ self._ping_ip(ip, 60, namespace)
else:
self._ping_ip(ip, 60)
- # if floatingip_for_ssh is at True, validate found a
- # floating IP and ping worked.
- if CONF.compute.use_floatingip_for_ssh:
+ # If CONF.validation.connect_method is floating, validate
+ # that the floating IP is attached to the server and the
+ # the server is pingable.
+ if CONF.validation.connect_method == 'floating':
self.assertTrue(_floating_is_alive,
"Server %s has no floating IP." %
server['name'])
@@ -903,7 +920,7 @@
# create security group(s) after server spawning
for secgroup in server['secgroups']:
client.servers.add_security_group(server_id, name=secgroup)
- if CONF.compute.use_floatingip_for_ssh:
+ if CONF.validation.connect_method == 'floating':
floating_ip_pool = server.get('floating_ip_pool')
floating_ip = client.floating_ips.create_floating_ip(
pool_name=floating_ip_pool)['floating_ip']
@@ -1013,7 +1030,9 @@
server_id = _get_server_by_name(client, volume['server'])['id']
volume_id = _get_volume_by_name(client, volume['name'])['id']
device = volume['device']
- client.volumes.attach_volume(volume_id, server_id, device)
+ client.volumes.attach_volume(volume_id,
+ instance_uuid=server_id,
+ mountpoint=device)
#######################
@@ -1065,7 +1084,7 @@
destroy_secgroups(RES['secgroups'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
- LOG.warn("Destroy mode incomplete")
+ LOG.warning("Destroy mode incomplete")
def get_options():
@@ -1121,6 +1140,8 @@
def main():
+ print("Javelin is deprecated and will be removed from Tempest in the "
+ "future.")
global RES
get_options()
setup_logging()
diff --git a/tempest/cmd/list_plugins.py b/tempest/cmd/list_plugins.py
new file mode 100644
index 0000000..1f1ff1a
--- /dev/null
+++ b/tempest/cmd/list_plugins.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for listing all currently installed Tempest plugins.
+
+**Usage:** ``tempest list-plugins``.
+"""
+
+from cliff import command
+from oslo_log import log as logging
+import prettytable
+
+from tempest.test_discover.plugins import TempestTestPluginManager
+
+LOG = logging.getLogger(__name__)
+
+
+class TempestListPlugins(command.Command):
+ def take_action(self, parsed_args):
+ self._list_plugins()
+ return 0
+
+ def get_description(self):
+ return 'List all tempest plugins'
+
+ def _list_plugins(self):
+ plugins = TempestTestPluginManager()
+
+ output = prettytable.PrettyTable(["Name", "EntryPoint"])
+ for plugin in plugins.ext_plugins.extensions:
+ output.add_row([
+ plugin.name, plugin.entry_point_target])
+
+ print(output)
diff --git a/tempest/cmd/main.py b/tempest/cmd/main.py
index 577df9b..acd97a8 100644
--- a/tempest/cmd/main.py
+++ b/tempest/cmd/main.py
@@ -28,6 +28,7 @@
description='Tempest cli application',
version=version.VersionInfo('tempest').version_string(),
command_manager=commandmanager.CommandManager('tempest.cm'),
+ deferred_help=True,
)
def initialize_app(self, argv):
diff --git a/tempest/cmd/run_stress.py b/tempest/cmd/run_stress.py
index f99e5d9..943fe5b 100644
--- a/tempest/cmd/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -22,6 +22,7 @@
except ImportError:
# unittest in python 2.6 does not contain loader, so uses unittest2
from unittest2 import loader
+import traceback
from cliff import command
from oslo_log import log as logging
@@ -79,7 +80,16 @@
return pa
def take_action(self, pa):
- return action(pa)
+ try:
+ action(pa)
+ except Exception:
+ LOG.exception("Failure in the stress test framework")
+ traceback.print_exc()
+ raise
+ return 0
+
+ def get_description(self):
+ return 'Run tempest stress tests'
def add_arguments(parser):
@@ -149,4 +159,5 @@
sys.exit(main())
except Exception:
LOG.exception("Failure in the stress test framework")
+ traceback.print_exc()
sys.exit(1)
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
old mode 100755
new mode 100644
index 9c8e2a0..92aa19e
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -15,10 +15,13 @@
# under the License.
import argparse
+import httplib2
import os
import sys
+import traceback
-import httplib2
+from cliff import command
+from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from six import moves
from six.moves.urllib import parse as urlparse
@@ -31,6 +34,8 @@
CONF = config.CONF
CONF_PARSER = None
+LOG = logging.getLogger(__name__)
+
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
@@ -141,7 +146,7 @@
extensions_client = {
'nova': os.extensions_client,
'cinder': os.volumes_extension_client,
- 'neutron': os.network_client,
+ 'neutron': os.network_extensions_client,
'swift': os.account_client,
}
# NOTE (e0ne): Use Cinder API v2 by default because v1 is deprecated
@@ -152,7 +157,7 @@
if service not in extensions_client:
print('No tempest extensions client for %s' % service)
- exit(1)
+ sys.exit(1)
return extensions_client[service]
@@ -165,7 +170,7 @@
}
if service not in extensions_options:
print('No supported extensions list option for %s' % service)
- exit(1)
+ sys.exit(1)
return extensions_options[service]
@@ -310,8 +315,7 @@
return avail_services
-def parse_args():
- parser = argparse.ArgumentParser()
+def _parser_add_args(parser):
parser.add_argument('-u', '--update', action='store_true',
help='Update the config file with results from api '
'queries. This assumes whatever is set in the '
@@ -329,13 +333,21 @@
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
- args = parser.parse_args()
- return args
-def main():
+def parse_args():
+ parser = argparse.ArgumentParser()
+ _parser_add_args(parser)
+ opts = parser.parse_args()
+ return opts
+
+
+def main(opts=None):
print('Running config verification...')
- opts = parse_args()
+ if opts is None:
+ print("Use of: 'verify-tempest-config' is deprecated, "
+ "please use: 'tempest verify-config'")
+ opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_PARSER
@@ -373,5 +385,22 @@
icreds.clear_creds()
+class TempestVerifyConfig(command.Command):
+ """Verify your current tempest configuration"""
+
+ def get_parser(self, prog_name):
+ parser = super(TempestVerifyConfig, self).get_parser(prog_name)
+ _parser_add_args(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ try:
+ return main(parsed_args)
+ except Exception:
+ LOG.exception("Failure verifying configuration.")
+ traceback.print_exc()
+ raise
+ return 0
+
if __name__ == "__main__":
main()
diff --git a/tempest/common/api_version_utils.py b/tempest/common/api_version_utils.py
index c499f23..98601a7 100644
--- a/tempest/common/api_version_utils.py
+++ b/tempest/common/api_version_utils.py
@@ -39,7 +39,7 @@
if ((min_version > max_version) or
(config_min_version > config_max_version)):
msg = ("Min version is greater than Max version. Test Class versions "
- "[%s - %s]. configration versions [%s - %s]."
+ "[%s - %s]. configuration versions [%s - %s]."
% (min_version.get_string(),
max_version.get_string(),
config_min_version.get_string(),
@@ -56,9 +56,40 @@
if (max_version < config_min_version or
config_max_version < min_version):
msg = ("The microversion range[%s - %s] of this test is out of the "
- "configration range[%s - %s]."
+ "configuration range[%s - %s]."
% (min_version.get_string(),
max_version.get_string(),
config_min_version.get_string(),
config_max_version.get_string()))
raise testtools.TestCase.skipException(msg)
+
+
+def select_request_microversion(test_min_version, cfg_min_version):
+ test_version = api_version_request.APIVersionRequest(test_min_version)
+ cfg_version = api_version_request.APIVersionRequest(cfg_min_version)
+ max_version = cfg_version if cfg_version >= test_version else test_version
+ return max_version.get_string()
+
+
+def assert_version_header_matches_request(api_microversion_header_name,
+ api_microversion,
+ response_header):
+ """Checks API microversion in resposne header
+
+ Verify whether microversion is present in response header
+ and with specified 'api_microversion' value.
+
+ @param: api_microversion_header_name: Microversion header name
+ Example- "X-OpenStack-Nova-API-Version"
+ @param: api_microversion: Microversion number like "2.10"
+ @param: response_header: Response header where microversion is
+ expected to be present.
+ """
+ api_microversion_header_name = api_microversion_header_name.lower()
+ if (api_microversion_header_name not in response_header or
+ api_microversion != response_header[api_microversion_header_name]):
+ msg = ("Microversion header '%s' with value '%s' does not match in "
+ "response - %s. " % (api_microversion_header_name,
+ api_microversion,
+ response_header))
+ raise exceptions.InvalidHTTPResponseHeader(msg)
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 5a14fbe..73505e6 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -29,7 +29,8 @@
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
- volume_backed=False, **kwargs):
+ volume_backed=False, name=None, flavor=None,
+ image_id=None, **kwargs):
"""Common wrapper utility returning a test server.
This method is a common wrapper returning a test server that can be
@@ -48,21 +49,27 @@
# TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
- if 'name' in kwargs:
- name = kwargs.pop('name')
- else:
- name = data_utils.rand_name(__name__ + "-instance")
+ name = name
+ flavor = flavor
+ image_id = image_id
- flavor = kwargs.pop('flavor', CONF.compute.flavor_ref)
- image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+ if name is None:
+ name = data_utils.rand_name(__name__ + "-instance")
+ if flavor is None:
+ flavor = CONF.compute.flavor_ref
+ if image_id is None:
+ image_id = CONF.compute.image_ref
kwargs = fixed_network.set_networks_kwarg(
tenant_network, kwargs) or {}
+ multiple_create_request = (max(kwargs.get('min_count', 0),
+ kwargs.get('max_count', 0)) > 1)
+
if CONF.validation.run_validation and validatable:
# As a first implementation, multiple pingable or sshable servers will
# not be supported
- if 'min_count' in kwargs or 'max_count' in kwargs:
+ if multiple_create_request:
msg = ("Multiple pingable or sshable servers not supported at "
"this stage.")
raise ValueError(msg)
@@ -116,7 +123,7 @@
# handle the case of multiple servers
servers = []
- if 'min_count' in kwargs or 'max_count' in kwargs:
+ if multiple_create_request:
# Get servers created which name match with name param.
body_servers = clients.servers_client.list_servers()
servers = \
diff --git a/tempest/common/cred_client.py b/tempest/common/cred_client.py
index 94515dc..6df7eb2 100644
--- a/tempest/common/cred_client.py
+++ b/tempest/common/cred_client.py
@@ -31,15 +31,18 @@
admin credentials used for generating credentials.
"""
- def __init__(self, identity_client, projects_client=None):
+ def __init__(self, identity_client, projects_client=None,
+ roles_client=None, users_client=None):
# The client implies version and credentials
self.identity_client = identity_client
# this is temporary until the v3 project client is
# separated, then projects_client will become mandatory
self.projects_client = projects_client or identity_client
+ self.roles_client = roles_client or identity_client
+ self.users_client = users_client or identity_client
def create_user(self, username, password, project, email):
- user = self.identity_client.create_user(
+ user = self.users_client.create_user(
username, password, project['id'], email)
if 'user' in user:
user = user['user']
@@ -59,7 +62,7 @@
def create_user_role(self, role_name):
if not self._check_role_exists(role_name):
- self.identity_client.create_role(role_name)
+ self.roles_client.create_role(name=role_name)
def assign_user_role(self, user, project, role_name):
role = self._check_role_exists(role_name)
@@ -67,8 +70,8 @@
msg = 'No "%s" role found' % role_name
raise lib_exc.NotFound(msg)
try:
- self.identity_client.assign_user_role(project['id'], user['id'],
- role['id'])
+ self.roles_client.assign_user_role(project['id'], user['id'],
+ role['id'])
except lib_exc.Conflict:
LOG.debug("Role %s already assigned on project %s for user %s" % (
role['id'], project['id'], user['id']))
@@ -85,17 +88,21 @@
pass
def delete_user(self, user_id):
- self.identity_client.delete_user(user_id)
+ self.users_client.delete_user(user_id)
def _list_roles(self):
- roles = self.identity_client.list_roles()['roles']
+ roles = self.roles_client.list_roles()['roles']
return roles
class V2CredsClient(CredsClient):
- def __init__(self, identity_client, projects_client):
- super(V2CredsClient, self).__init__(identity_client, projects_client)
+ def __init__(self, identity_client, projects_client, roles_client,
+ users_client):
+ super(V2CredsClient, self).__init__(identity_client,
+ projects_client,
+ roles_client,
+ users_client)
def create_project(self, name, description):
tenant = self.projects_client.create_tenant(
@@ -160,8 +167,11 @@
def get_creds_client(identity_client,
projects_client=None,
+ roles_client=None,
+ users_client=None,
project_domain_name=None):
if isinstance(identity_client, v2_identity.IdentityClient):
- return V2CredsClient(identity_client, projects_client)
+ return V2CredsClient(identity_client, projects_client, roles_client,
+ users_client)
else:
return V3CredsClient(identity_client, project_domain_name)
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index aa237e0..9dd89ea 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -14,14 +14,11 @@
import abc
-from oslo_log import log as logging
import six
from tempest_lib import auth
from tempest import exceptions
-LOG = logging.getLogger(__name__)
-
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index 95dcafc..24c1198 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -11,10 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
-
from oslo_concurrency import lockutils
-from oslo_log import log as logging
from tempest_lib import auth
from tempest import clients
@@ -25,7 +22,6 @@
from tempest import exceptions
CONF = config.CONF
-LOG = logging.getLogger(__name__)
"""This module provides factories of credential and credential providers
@@ -170,8 +166,7 @@
admin_creds=admin_creds,
**_get_dynamic_provider_params())
else:
- if (CONF.auth.test_accounts_file and
- os.path.isfile(CONF.auth.test_accounts_file)):
+ if CONF.auth.test_accounts_file:
# Most params are not relevant for pre-created accounts
return preprov_creds.PreProvisionedCredentialProvider(
name=name, identity_version=identity_version,
@@ -193,8 +188,7 @@
if CONF.auth.use_dynamic_credentials:
return is_admin
# Check whether test accounts file has the admin specified or not
- elif (CONF.auth.test_accounts_file and
- os.path.isfile(CONF.auth.test_accounts_file)):
+ elif CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
identity_version=identity_version, name='check_admin',
**_get_preprov_provider_params())
@@ -219,8 +213,7 @@
if CONF.auth.use_dynamic_credentials:
return True
# Check whether test accounts file has the admin specified or not
- if (CONF.auth.test_accounts_file and
- os.path.isfile(CONF.auth.test_accounts_file)):
+ if CONF.auth.test_accounts_file:
check_accounts = preprov_creds.PreProvisionedCredentialProvider(
identity_version=identity_version, name='check_alt',
**_get_preprov_provider_params())
diff --git a/tempest/common/dynamic_creds.py b/tempest/common/dynamic_creds.py
index 813d94f..95ad229 100644
--- a/tempest/common/dynamic_creds.py
+++ b/tempest/common/dynamic_creds.py
@@ -58,10 +58,13 @@
self.ports = []
self.default_admin_creds = admin_creds
(self.identity_admin_client, self.tenants_admin_client,
+ self.roles_admin_client,
+ self.users_admin_client,
self.network_admin_client,
self.networks_admin_client,
self.subnets_admin_client,
- self.ports_admin_client) = self._get_admin_clients()
+ self.ports_admin_client,
+ self.security_groups_admin_client) = self._get_admin_clients()
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
@@ -72,6 +75,8 @@
self.creds_client = cred_client.get_creds_client(
self.identity_admin_client,
self.tenants_admin_client,
+ self.roles_admin_client,
+ self.users_admin_client,
self.creds_domain_name)
def _get_admin_clients(self):
@@ -83,11 +88,14 @@
"""
os = clients.Manager(self.default_admin_creds)
if self.identity_version == 'v2':
- return (os.identity_client, os.tenants_client, os.network_client,
- os.networks_client, os.subnets_client, os.ports_client)
+ return (os.identity_client, os.tenants_client, os.roles_client,
+ os.users_client, os.network_client, os.networks_client,
+ os.subnets_client, os.ports_client,
+ os.security_groups_client)
else:
- return (os.identity_v3_client, None, os.network_client,
- os.networks_client, os.subnets_client, os.ports_client)
+ return (os.identity_v3_client, None, None, None, os.network_client,
+ os.networks_client, os.subnets_client, os.ports_client,
+ os.security_groups_client)
def _create_creds(self, suffix="", admin=False, roles=None):
"""Create random credentials under the following schema.
@@ -280,36 +288,36 @@
try:
net_client.delete_router(router_id)
except lib_exc.NotFound:
- LOG.warn('router with name: %s not found for delete' %
- router_name)
+ LOG.warning('router with name: %s not found for delete' %
+ router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
client = self.subnets_admin_client
try:
client.delete_subnet(subnet_id)
except lib_exc.NotFound:
- LOG.warn('subnet with name: %s not found for delete' %
- subnet_name)
+ LOG.warning('subnet with name: %s not found for delete' %
+ subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.networks_admin_client
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
- LOG.warn('network with name: %s not found for delete' %
- network_name)
+ LOG.warning('network with name: %s not found for delete' %
+ network_name)
def _cleanup_default_secgroup(self, tenant):
- net_client = self.network_admin_client
- resp_body = net_client.list_security_groups(tenant_id=tenant,
+ nsg_client = self.security_groups_admin_client
+ resp_body = nsg_client.list_security_groups(tenant_id=tenant,
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
- net_client.delete_security_group(secgroup['id'])
+ nsg_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
- LOG.warn('Security group %s, id %s not found for clean-up' %
- (secgroup['name'], secgroup['id']))
+ LOG.warning('Security group %s, id %s not found for clean-up' %
+ (secgroup['name'], secgroup['id']))
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
@@ -328,8 +336,8 @@
net_client.remove_router_interface_with_subnet_id(
creds.router['id'], creds.subnet['id'])
except lib_exc.NotFound:
- LOG.warn('router with name: %s not found for delete' %
- creds.router['name'])
+ LOG.warning('router with name: %s not found for delete' %
+ creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
if (not self.network_resources or
@@ -349,15 +357,15 @@
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
- LOG.warn("user with name: %s not found for delete" %
- creds.username)
+ LOG.warning("user with name: %s not found for delete" %
+ creds.username)
try:
if CONF.service_available.neutron:
self._cleanup_default_secgroup(creds.tenant_id)
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
- LOG.warn("tenant with name: %s not found for delete" %
- creds.tenant_name)
+ LOG.warning("tenant with name: %s not found for delete" %
+ creds.tenant_name)
self._creds = {}
def is_multi_user(self):
diff --git a/tempest/common/fixed_network.py b/tempest/common/fixed_network.py
index 56cd331..3fc1365 100644
--- a/tempest/common/fixed_network.py
+++ b/tempest/common/fixed_network.py
@@ -49,13 +49,13 @@
name, networks))
if caller:
msg = '(%s) %s' % (caller, msg)
- LOG.warn(msg)
+ LOG.warning(msg)
raise exceptions.InvalidTestResource(type='network', name=name)
else:
msg = "Network with name: %s not found" % name
if caller:
msg = '(%s) %s' % (caller, msg)
- LOG.warn(msg)
+ LOG.warning(msg)
raise exceptions.InvalidTestResource(type='network', name=name)
# To be consistent between neutron and nova network always use name even
# if label is used in the api response. If neither is present than then
@@ -65,7 +65,7 @@
msg = "Network found from list doesn't contain a valid name or label"
if caller:
msg = '(%s) %s' % (caller, msg)
- LOG.warn(msg)
+ LOG.warning(msg)
raise exceptions.InvalidTestResource(type='network', name=name)
network['name'] = name
return network
@@ -122,6 +122,6 @@
if 'id' in network.keys():
params.update({"networks": [{'uuid': network['id']}]})
else:
- LOG.warn('The provided network dict: %s was invalid and did not '
- ' contain an id' % network)
+ LOG.warning('The provided network dict: %s was invalid and did '
+ 'not contain an id' % network)
return params
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index a66002f..3a51f2e 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -17,11 +17,8 @@
import functools
import jsonschema
-from oslo_log import log as logging
import six
-LOG = logging.getLogger(__name__)
-
def _check_for_expected_result(name, schema):
expected_result = None
diff --git a/tempest/common/generator/negative_generator.py b/tempest/common/generator/negative_generator.py
index 17997a5..67ace54 100644
--- a/tempest/common/generator/negative_generator.py
+++ b/tempest/common/generator/negative_generator.py
@@ -15,13 +15,9 @@
import copy
-from oslo_log import log as logging
-
import tempest.common.generator.base_generator as base
import tempest.common.generator.valid_generator as valid
-LOG = logging.getLogger(__name__)
-
class NegativeTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index 2213b4a..3070489 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -13,15 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
import six
import tempest.common.generator.base_generator as base
-LOG = logging.getLogger(__name__)
-
-
class ValidTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
diff --git a/tempest/common/preprov_creds.py b/tempest/common/preprov_creds.py
index 74cc3f0..34af31e 100644
--- a/tempest/common/preprov_creds.py
+++ b/tempest/common/preprov_creds.py
@@ -31,8 +31,13 @@
def read_accounts_yaml(path):
- with open(path, 'r') as yaml_file:
- accounts = yaml.load(yaml_file)
+ try:
+ with open(path, 'r') as yaml_file:
+ accounts = yaml.load(yaml_file)
+ except IOError:
+ raise exceptions.InvalidConfiguration(
+ 'The path for the test accounts file: %s '
+ 'could not be found' % path)
return accounts
@@ -74,7 +79,7 @@
identity_version=identity_version, name=name,
admin_role=admin_role, credentials_domain=credentials_domain)
self.test_accounts_file = test_accounts_file
- if test_accounts_file and os.path.isfile(test_accounts_file):
+ if test_accounts_file:
accounts = read_accounts_yaml(self.test_accounts_file)
self.use_default_creds = False
else:
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 025b79f..b76c356 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -31,7 +31,7 @@
# NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = CONF.validation.ssh_timeout
- network = CONF.compute.network_for_ssh
+ network = CONF.validation.network_for_ssh
ip_version = CONF.validation.ip_version_for_ssh
connect_timeout = CONF.validation.connect_timeout
if isinstance(server, six.string_types):
@@ -51,7 +51,7 @@
def exec_command(self, cmd):
# Shell options below add more clearness on failures,
# path is extended for some non-cirros guest oses (centos7)
- cmd = CONF.compute.ssh_shell_prologue + " " + cmd
+ cmd = CONF.validation.ssh_shell_prologue + " " + cmd
LOG.debug("Remote command: %s" % cmd)
return self.ssh_client.exec_command(cmd)
@@ -73,7 +73,7 @@
return output.split()[1]
def get_number_of_vcpus(self):
- output = self.exec_command('grep -c processor /proc/cpuinfo')
+ output = self.exec_command('grep -c ^processor /proc/cpuinfo')
return int(output)
def get_partitions(self):
@@ -94,8 +94,8 @@
cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
return self.exec_command(cmd)
- def ping_host(self, host, count=CONF.compute.ping_count,
- size=CONF.compute.ping_size, nic=None):
+ def ping_host(self, host, count=CONF.validation.ping_count,
+ size=CONF.validation.ping_size, nic=None):
addr = netaddr.IPAddress(host)
cmd = 'ping6' if addr.version == 6 else 'ping'
if nic:
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
index f526299..9457a60 100644
--- a/tempest/common/validation_resources.py
+++ b/tempest/common/validation_resources.py
@@ -24,7 +24,7 @@
def create_ssh_security_group(os, add_rule=False):
security_groups_client = os.compute_security_groups_client
- security_group_rules_client = os.security_group_rules_client
+ security_group_rules_client = os.compute_security_group_rules_client
sg_name = data_utils.rand_name('securitygroup-')
sg_description = data_utils.rand_name('description-')
security_group = security_groups_client.create_security_group(
@@ -73,8 +73,8 @@
try:
keypair_client.delete_keypair(keypair_name)
except lib_exc.NotFound:
- LOG.warn("Keypair %s is not found when attempting to delete"
- % keypair_name)
+ LOG.warning("Keypair %s is not found when attempting to delete"
+ % keypair_name)
except Exception as exc:
LOG.exception('Exception raised while deleting key %s'
% keypair_name)
@@ -87,8 +87,8 @@
security_group_client.delete_security_group(sec_id)
security_group_client.wait_for_resource_deletion(sec_id)
except lib_exc.NotFound:
- LOG.warn("Security group %s is not found when attempting to "
- " delete" % sec_id)
+ LOG.warning("Security group %s is not found when attempting "
+ "to delete" % sec_id)
except lib_exc.Conflict as exc:
LOG.exception('Conflict while deleting security '
'group %s VM might not be deleted ' % sec_id)
@@ -105,8 +105,8 @@
try:
floating_client.delete_floating_ip(fip_id)
except lib_exc.NotFound:
- LOG.warn('Floating ip %s not found while attempting to delete'
- % fip_id)
+ LOG.warning('Floating ip %s not found while attempting to '
+ 'delete' % fip_id)
except Exception as exc:
LOG.exception('Exception raised while deleting ip %s '
% fip_id)
diff --git a/tempest/config.py b/tempest/config.py
index a6212fb..6942172 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -17,9 +17,10 @@
import logging as std_logging
import os
+import tempfile
+from oslo_concurrency import lockutils
from oslo_config import cfg
-
from oslo_log import log as logging
from tempest.test_discover import plugins
@@ -231,12 +232,6 @@
cfg.StrOpt('flavor_ref_alt',
default="2",
help='Valid secondary flavor to be used in tests.'),
- cfg.StrOpt('image_ssh_user',
- default="root",
- help="User name used to authenticate to an instance."),
- cfg.StrOpt('image_ssh_password',
- default="password",
- help="Password used to authenticate to an instance."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
@@ -245,41 +240,6 @@
help="Timeout in seconds to wait for an instance to build. "
"Other services that do not define build_timeout will "
"inherit this value."),
- cfg.StrOpt('ssh_shell_prologue',
- default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
- help="Shell fragments to use before executing a command "
- "when sshing to a guest."),
- cfg.StrOpt('ssh_auth_method',
- default='keypair',
- choices=('keypair', 'configured', 'adminpass', 'disabled'),
- help="Auth method used for authenticate to the instance. "
- "Valid choices are: keypair, configured, adminpass "
- "and disabled. "
- "Keypair: start the servers with a ssh keypair. "
- "Configured: use the configured user and password. "
- "Adminpass: use the injected adminPass. "
- "Disabled: avoid using ssh when it is an option."),
- cfg.StrOpt('ssh_connect_method',
- default='floating',
- choices=('fixed', 'floating'),
- help="How to connect to the instance? "
- "fixed: using the first ip belongs the fixed network "
- "floating: creating and using a floating ip."),
- cfg.StrOpt('ssh_user',
- default='root',
- help="User name used to authenticate to an instance."),
- cfg.IntOpt('ping_timeout',
- default=120,
- help="Timeout in seconds to wait for ping to "
- "succeed."),
- cfg.IntOpt('ping_size',
- default=56,
- help="The packet size for ping packets originating "
- "from remote linux hosts"),
- cfg.IntOpt('ping_count',
- default=1,
- help="The number of ping packets originating from remote "
- "linux hosts"),
cfg.IntOpt('ready_wait',
default=0,
help="Additional wait time for clean state, when there is "
@@ -291,13 +251,6 @@
"servers if tempest does not create a network or a "
"network is not specified elsewhere. It may be used for "
"ssh validation only if floating IPs are disabled."),
- cfg.StrOpt('network_for_ssh',
- default='public',
- help="Network used for SSH connections. Ignored if "
- "use_floatingip_for_ssh=true or run_validation=false."),
- cfg.BoolOpt('use_floatingip_for_ssh',
- default=True,
- help="Does SSH use Floating IPs?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
@@ -323,12 +276,6 @@
'when shelved. This time should be the same as the time '
'of nova.conf, and some tests will run for as long as the '
'time.'),
- cfg.StrOpt('floating_ip_range',
- default='10.0.0.0/29',
- help='Unallocated floating IP range, which will be used to '
- 'test the floating IP bulk feature for CRUD operation. '
- 'This block must not overlap an existing floating IP '
- 'pool.'),
cfg.IntOpt('min_compute_nodes',
default=1,
help=('The minimum number of compute nodes expected. This will '
@@ -441,9 +388,6 @@
default=True,
help='Does the test environment support creating snapshot '
'images of running instances?'),
- cfg.BoolOpt('ec2_api',
- default=True,
- help='Does the test environment have the ec2 api running?'),
cfg.BoolOpt('nova_cert',
default=True,
help='Does the test environment have the nova cert running?'),
@@ -672,9 +616,7 @@
cfg.BoolOpt('run_validation',
default=False,
help='Enable ssh on created servers and creation of additional'
- ' validation resources to enable remote access',
- deprecated_opts=[cfg.DeprecatedOpt('run_ssh',
- group='compute')]),
+ ' validation resources to enable remote access'),
cfg.BoolOpt('security_group',
default=True,
help='Enable/disable security groups.'),
@@ -686,31 +628,77 @@
choices=['fixed', 'floating'],
help='Default IP type used for validation: '
'-fixed: uses the first IP belonging to the fixed network '
- '-floating: creates and uses a floating IP'),
+ '-floating: creates and uses a floating IP',
+ deprecated_opts=[cfg.DeprecatedOpt('use_floatingip_for_ssh',
+ group='compute')]),
cfg.StrOpt('auth_method',
default='keypair',
choices=['keypair'],
help='Default authentication method to the instance. '
'Only ssh via keypair is supported for now. '
- 'Additional methods will be handled in a separate spec.'),
+ 'Additional methods will be handled in a separate spec.',
+ deprecated_opts=[cfg.DeprecatedOpt('ssh_auth_method',
+ group='compute')]),
cfg.IntOpt('ip_version_for_ssh',
default=4,
- help='Default IP version for ssh connections.',
- deprecated_opts=[cfg.DeprecatedOpt('ip_version_for_ssh',
- group='compute')]),
+ help='Default IP version for ssh connections.'),
cfg.IntOpt('ping_timeout',
default=120,
- help='Timeout in seconds to wait for ping to succeed.'),
+ help='Timeout in seconds to wait for ping to succeed.',
+ deprecated_opts=[cfg.DeprecatedOpt('ping_timeout',
+ group='compute')]),
cfg.IntOpt('connect_timeout',
default=60,
help='Timeout in seconds to wait for the TCP connection to be '
- 'successful.',
- deprecated_opts=[cfg.DeprecatedOpt('ssh_channel_timeout',
- group='compute')]),
+ 'successful.'),
cfg.IntOpt('ssh_timeout',
default=300,
- help='Timeout in seconds to wait for the ssh banner.',
- deprecated_opts=[cfg.DeprecatedOpt('ssh_timeout',
+ help='Timeout in seconds to wait for the ssh banner.'),
+ cfg.StrOpt('image_ssh_user',
+ default="root",
+ help="User name used to authenticate to an instance.",
+ deprecated_opts=[cfg.DeprecatedOpt('image_ssh_user',
+ group='compute'),
+ cfg.DeprecatedOpt('ssh_user',
+ group='compute'),
+ cfg.DeprecatedOpt('ssh_user',
+ group='scenario')]),
+ cfg.StrOpt('image_ssh_password',
+ default="password",
+ help="Password used to authenticate to an instance.",
+ deprecated_opts=[cfg.DeprecatedOpt('image_ssh_password',
+ group='compute')]),
+ cfg.StrOpt('ssh_shell_prologue',
+ default="set -eu -o pipefail; PATH=$$PATH:/sbin;",
+ help="Shell fragments to use before executing a command "
+ "when sshing to a guest.",
+ deprecated_opts=[cfg.DeprecatedOpt('ssh_shell_prologue',
+ group='compute')]),
+ cfg.IntOpt('ping_size',
+ default=56,
+ help="The packet size for ping packets originating "
+ "from remote linux hosts",
+ deprecated_opts=[cfg.DeprecatedOpt('ping_size',
+ group='compute')]),
+ cfg.IntOpt('ping_count',
+ default=1,
+ help="The number of ping packets originating from remote "
+ "linux hosts",
+ deprecated_opts=[cfg.DeprecatedOpt('ping_count',
+ group='compute')]),
+ cfg.StrOpt('floating_ip_range',
+ default='10.0.0.0/29',
+ help='Unallocated floating IP range, which will be used to '
+ 'test the floating IP bulk feature for CRUD operation. '
+ 'This block must not overlap an existing floating IP '
+ 'pool.',
+ deprecated_opts=[cfg.DeprecatedOpt('floating_ip_range',
+ group='compute')]),
+ cfg.StrOpt('network_for_ssh',
+ default='public',
+ help="Network used for SSH connections. Ignored if "
+ "use_floatingip_for_ssh=true or run_validation=false.",
+ deprecated_opts=[cfg.DeprecatedOpt('network_for_ssh',
group='compute')]),
]
@@ -749,7 +737,7 @@
deprecated_for_removal=True),
cfg.ListOpt('backend_names',
default=['BACKEND_1', 'BACKEND_2'],
- help='A list of backend names seperated by comma .'
+ help='A list of backend names separated by comma. '
'The backend name must be declared in cinder.conf',
deprecated_opts=[cfg.DeprecatedOpt('BACKEND_1',
group='volume'),
@@ -797,9 +785,10 @@
default=True,
help="Is the v2 volume API enabled"),
cfg.BoolOpt('bootable',
- default=False,
+ default=True,
help='Update bootable status of a volume '
- 'Not implemented on icehouse ')
+ 'Not implemented on icehouse ',
+ deprecated_for_removal=True)
]
@@ -1013,54 +1002,6 @@
help="List of enabled data processing plugins")
]
-
-boto_group = cfg.OptGroup(name='boto',
- title='EC2/S3 options')
-BotoGroup = [
- cfg.StrOpt('ec2_url',
- default="http://localhost:8773/services/Cloud",
- help="EC2 URL"),
- cfg.StrOpt('s3_url',
- default="http://localhost:8080",
- help="S3 URL"),
- cfg.StrOpt('aws_secret',
- help="AWS Secret Key",
- secret=True),
- cfg.StrOpt('aws_access',
- help="AWS Access Key"),
- cfg.StrOpt('aws_zone',
- default="nova",
- help="AWS Zone for EC2 tests"),
- cfg.StrOpt('s3_materials_path',
- default="/opt/stack/devstack/files/images/"
- "s3-materials/cirros-0.3.0",
- help="S3 Materials Path"),
- cfg.StrOpt('ari_manifest',
- default="cirros-0.3.0-x86_64-initrd.manifest.xml",
- help="ARI Ramdisk Image manifest"),
- cfg.StrOpt('ami_manifest',
- default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
- help="AMI Machine Image manifest"),
- cfg.StrOpt('aki_manifest',
- default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
- help="AKI Kernel Image manifest"),
- cfg.StrOpt('instance_type',
- default="m1.tiny",
- help="Instance type"),
- cfg.IntOpt('http_socket_timeout',
- default=3,
- help="boto Http socket timeout"),
- cfg.IntOpt('num_retries',
- default=1,
- help="boto num_retries on error"),
- cfg.IntOpt('build_timeout',
- default=60,
- help="Status Change Timeout"),
- cfg.IntOpt('build_interval',
- default=1,
- help="Status Change Test Interval"),
-]
-
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
StressGroup = [
@@ -1105,7 +1046,8 @@
cfg.StrOpt('img_dir',
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
- help='Directory containing image files'),
+ help='Directory containing image files',
+ deprecated_for_removal=True),
cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
help='Image file name'),
@@ -1119,16 +1061,16 @@
'Use for custom images which require them'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
- help='AMI image file name'),
+ help='AMI image file name',
+ deprecated_for_removal=True),
cfg.StrOpt('ari_img_file',
default='cirros-0.3.1-x86_64-initrd',
- help='ARI image file name'),
+ help='ARI image file name',
+ deprecated_for_removal=True),
cfg.StrOpt('aki_img_file',
default='cirros-0.3.1-x86_64-vmlinuz',
- help='AKI image file name'),
- cfg.StrOpt('ssh_user',
- default='cirros',
- help='ssh username for the image file'),
+ help='AKI image file name',
+ deprecated_for_removal=True),
cfg.IntOpt(
'large_ops_number',
default=0,
@@ -1322,7 +1264,6 @@
(dashboard_group, DashboardGroup),
(data_processing_group, DataProcessingGroup),
(data_processing_feature_group, DataProcessingFeaturesGroup),
- (boto_group, BotoGroup),
(stress_group, StressGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
@@ -1392,7 +1333,6 @@
self.data_processing = _CONF['data-processing']
self.data_processing_feature_enabled = _CONF[
'data-processing-feature-enabled']
- self.boto = _CONF.boto
self.stress = _CONF.stress
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
@@ -1406,6 +1346,7 @@
_CONF.set_default('alt_domain_name',
self.auth.default_credentials_domain_name,
group='identity')
+ logging.tempest_set_log_file('tempest.log')
def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
@@ -1436,6 +1377,13 @@
_CONF([], project='tempest', default_config_files=config_files)
else:
_CONF([], project='tempest')
+
+ logging_cfg_path = "%s/logging.conf" % os.path.dirname(path)
+ if (not hasattr(_CONF, 'log_config_append') and
+ os.path.isfile(logging_cfg_path)):
+ # if logging conf is in place we need to set log_config_append
+ _CONF.log_config_append = logging_cfg_path
+
logging.setup(_CONF, 'tempest')
LOG = logging.getLogger('tempest')
LOG.info("Using tempest config file %s" % path)
@@ -1462,6 +1410,8 @@
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
+ lock_dir = os.path.join(tempfile.gettempdir(), 'tempest-lock')
+ lockutils.set_defaults(lock_dir)
self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 1d725af..931737d 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -181,6 +181,11 @@
"be of format MajorNum.MinorNum or string 'latest'.")
+class JSONSchemaNotFound(TempestException):
+ message = ("JSON Schema for %(version)s is not found in \n"
+ " %(schema_versions_info)s")
+
+
class CommandFailed(Exception):
def __init__(self, returncode, cmd, output, stderr):
super(CommandFailed, self).__init__()
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 1dad3ba..88598de 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -185,7 +185,8 @@
# the end of a method
return
- if 'self.get(' not in line:
+ if 'self.get(' not in line and ('self.show_resource(' not in line and
+ 'self.list_resources(' not in line):
continue
if METHOD_GET_RESOURCE.match(logical_line):
@@ -211,7 +212,7 @@
# the end of a method
return
- if 'self.delete(' not in line:
+ if 'self.delete(' not in line and 'self.delete_resource(' not in line:
continue
if METHOD_DELETE_RESOURCE.match(logical_line):
diff --git a/tempest/hacking/ignored_list_T110.txt b/tempest/hacking/ignored_list_T110.txt
index 8de3151..ce69931 100644
--- a/tempest/hacking/ignored_list_T110.txt
+++ b/tempest/hacking/ignored_list_T110.txt
@@ -6,3 +6,4 @@
./tempest/services/volume/base/base_qos_client.py
./tempest/services/volume/base/base_backups_client.py
./tempest/services/baremetal/base.py
+./tempest/services/network/json/network_client.py
diff --git a/tempest/hacking/ignored_list_T111.txt b/tempest/hacking/ignored_list_T111.txt
index 8017e76..20d58d2 100644
--- a/tempest/hacking/ignored_list_T111.txt
+++ b/tempest/hacking/ignored_list_T111.txt
@@ -1 +1,2 @@
./tempest/services/baremetal/base.py
+./tempest/services/network/json/quotas_client.py
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 3e6a947..6776220 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -49,16 +49,17 @@
cls.flavors_client = cls.manager.flavors_client
cls.compute_floating_ips_client = (
cls.manager.compute_floating_ips_client)
- # Glance image client v1
- cls.image_client = cls.manager.image_client
+ if CONF.service_available.glance:
+ # Glance image client v1
+ cls.image_client = cls.manager.image_client
# Compute image client
- cls.images_client = cls.manager.images_client
+ cls.compute_images_client = cls.manager.compute_images_client
cls.keypairs_client = cls.manager.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.manager.compute_security_groups_client)
- cls.security_group_rules_client = (
- cls.manager.security_group_rules_client)
+ cls.compute_security_group_rules_client = (
+ cls.manager.compute_security_group_rules_client)
cls.servers_client = cls.manager.servers_client
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
@@ -67,6 +68,9 @@
cls.ports_client = cls.manager.ports_client
cls.subnets_client = cls.manager.subnets_client
cls.floating_ips_client = cls.manager.floating_ips_client
+ cls.security_groups_client = cls.manager.security_groups_client
+ cls.security_group_rules_client = (
+ cls.manager.security_group_rules_client)
# Heat client
cls.orchestration_client = cls.manager.orchestration_client
@@ -158,7 +162,7 @@
self.addCleanup(client.delete_keypair, name)
return body['keypair']
- def create_server(self, name=None, image=None, flavor=None,
+ def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until=None,
wait_on_delete=True, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
@@ -196,7 +200,7 @@
# to pass to create_port
if 'security_groups' in kwargs:
security_groups =\
- clients.network_client.list_security_groups(
+ clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
@@ -238,7 +242,8 @@
clients,
tenant_network=tenant_network,
wait_until=wait_until,
- **kwargs)
+ name=name, flavor=flavor,
+ image_id=image_id, **kwargs)
# TODO(jlanoux) Move wait_on_delete in compute.py
if wait_on_delete:
@@ -259,9 +264,13 @@
imageRef=None, volume_type=None, wait_on_delete=True):
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
- volume = self.volumes_client.create_volume(
- size=size, display_name=name, snapshot_id=snapshot_id,
- imageRef=imageRef, volume_type=volume_type)['volume']
+ kwargs = {'display_name': name,
+ 'snapshot_id': snapshot_id,
+ 'imageRef': imageRef,
+ 'volume_type': volume_type}
+ if size is not None:
+ kwargs.update({'size': size})
+ volume = self.volumes_client.create_volume(**kwargs)['volume']
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
@@ -288,7 +297,7 @@
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
- _client_rules = self.security_group_rules_client
+ _client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
@@ -343,23 +352,19 @@
return secgroup
- def get_remote_client(self, server_or_ip, username=None, private_key=None,
- log_console_of_servers=None):
+ def get_remote_client(self, server_or_ip, username=None, private_key=None):
"""Get a SSH client to a remote server
@param server_or_ip a server object as returned by Tempest compute
client or an IP address to connect to
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
- @param log_console_of_servers a list of server objects. Each server
- in the list will have its console printed in the logs in case the
- SSH connection failed to be established
@return a RemoteClient object
"""
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
- addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
+ addrs = server_or_ip['addresses'][CONF.validation.network_for_ssh]
try:
ip = (addr['addr'] for addr in addrs if
netaddr.valid_ipv4(addr['addr'])).next()
@@ -368,7 +373,7 @@
"remote server.")
if username is None:
- username = CONF.scenario.ssh_user
+ username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
@@ -376,7 +381,7 @@
if private_key is None:
private_key = self.keypair['private_key']
else:
- password = CONF.compute.image_ssh_password
+ password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
@@ -390,10 +395,7 @@
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
- # If we don't explicitly set for which servers we want to
- # log the console output then all the servers will be logged.
- # See the definition of _log_console_output()
- self._log_console_output(log_console_of_servers)
+ self._log_console_output()
raise
return linux_client
@@ -470,7 +472,7 @@
# Glance client
_image_client = self.image_client
# Compute client
- _images_client = self.images_client
+ _images_client = self.compute_images_client
if name is None:
name = data_utils.rand_name('scenario-snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
@@ -728,7 +730,7 @@
def _list_agents(self, *args, **kwargs):
"""List agents using admin creds """
- agents_list = self.admin_manager.network_client.list_agents(
+ agents_list = self.admin_manager.network_agents_client.list_agents(
*args, **kwargs)
return agents_list['agents']
@@ -812,8 +814,9 @@
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self._list_ports(device_id=server['id'], status='ACTIVE',
fixed_ip=ip_addr)
- # it might happen here that this port has more then one ip address
- # as in case of dual stack- when this port is created on 2 subnets
+ # A port can have more then one IP address in some cases.
+ # If the network is dual-stack (IPv4 + IPv6), this port is associated
+ # with 2 subnets
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
@@ -930,8 +933,8 @@
try:
source.ping_host(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
- LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
- % (dest, source.ssh_client.host))
+ LOG.warning('Failed to ping IP: %s via a ssh connection '
+ 'from: %s.' % (dest, source.ssh_client.host))
return not should_succeed
return should_succeed
@@ -939,19 +942,25 @@
CONF.validation.ping_timeout,
1)
- def _create_security_group(self, client=None, tenant_id=None,
- namestart='secgroup-smoke'):
- if client is None:
- client = self.network_client
+ def _create_security_group(self, security_group_rules_client=None,
+ tenant_id=None,
+ namestart='secgroup-smoke',
+ security_groups_client=None):
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
if tenant_id is None:
- tenant_id = client.tenant_id
- secgroup = self._create_empty_security_group(namestart=namestart,
- client=client,
- tenant_id=tenant_id)
+ tenant_id = security_groups_client.tenant_id
+ secgroup = self._create_empty_security_group(
+ namestart=namestart, client=security_groups_client,
+ tenant_id=tenant_id)
# Add rules to the security group
- rules = self._create_loginable_secgroup_rule(client=client,
- secgroup=secgroup)
+ rules = self._create_loginable_secgroup_rule(
+ security_group_rules_client=security_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
@@ -969,7 +978,7 @@
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
- client = self.network_client
+ client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
@@ -994,7 +1003,7 @@
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
- client = self.network_client
+ client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
@@ -1006,8 +1015,10 @@
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
- def _create_security_group_rule(self, secgroup=None, client=None,
- tenant_id=None, **kwargs):
+ def _create_security_group_rule(self, secgroup=None,
+ sec_group_rules_client=None,
+ tenant_id=None,
+ security_groups_client=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
@@ -1025,21 +1036,23 @@
port_range_max: 22
}
"""
- if client is None:
- client = self.network_client
+ if sec_group_rules_client is None:
+ sec_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
if not tenant_id:
- tenant_id = client.tenant_id
+ tenant_id = security_groups_client.tenant_id
if secgroup is None:
- secgroup = self._default_security_group(client=client,
- tenant_id=tenant_id)
+ secgroup = self._default_security_group(
+ client=security_groups_client, tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
ruleset.update(kwargs)
- sg_rule = client.create_security_group_rule(**ruleset)
+ sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
sg_rule = net_resources.DeletableSecurityGroupRule(
- client=client,
+ client=sec_group_rules_client,
**sg_rule['security_group_rule']
)
self.addCleanup(self.delete_wrapper, sg_rule.delete)
@@ -1048,7 +1061,9 @@
return sg_rule
- def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
+ def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
"""Create loginable security group rule
These rules are intended to permit inbound ssh and icmp
@@ -1057,8 +1072,10 @@
belonging to the same security group.
"""
- if client is None:
- client = self.network_client
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
rules = []
rulesets = [
dict(
@@ -1077,12 +1094,16 @@
ethertype='IPv6',
)
]
+ sec_group_rules_client = security_group_rules_client
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
- client=client, secgroup=secgroup, **ruleset)
+ sec_group_rules_client=sec_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client,
+ **ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
@@ -1094,12 +1115,6 @@
return rules
- def _ssh_to_server(self, server, private_key):
- ssh_login = CONF.compute.image_ssh_user
- return self.get_remote_client(server,
- username=ssh_login,
- private_key=private_key)
-
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
@@ -1364,7 +1379,7 @@
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s", randomized_name)
body = client.create_volume_type(
- randomized_name)['volume_type']
+ name=randomized_name)['volume_type']
self.assertIn('id', body)
self.addCleanup(client.delete_volume_type, body['id'])
return body
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 62c0262..cace90b 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -13,17 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest.scenario import manager
from tempest import test
-LOG = logging.getLogger(__name__)
-
-
class TestAggregatesBasicOps(manager.ScenarioTest):
"""Creates an aggregate within an availability zone
@@ -48,16 +43,14 @@
def _create_aggregate(self, **kwargs):
aggregate = (self.aggregates_client.create_aggregate(**kwargs)
['aggregate'])
- self.addCleanup(self._delete_aggregate, aggregate)
+ self.addCleanup(self.aggregates_client.delete_aggregate,
+ aggregate['id'])
aggregate_name = kwargs['name']
availability_zone = kwargs['availability_zone']
self.assertEqual(aggregate['name'], aggregate_name)
self.assertEqual(aggregate['availability_zone'], availability_zone)
return aggregate
- def _delete_aggregate(self, aggregate):
- self.aggregates_client.delete_aggregate(aggregate['id'])
-
def _get_host_name(self):
hosts = self.hosts_client.list_hosts()['hosts']
self.assertTrue(len(hosts) >= 1)
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index 9415629..93b32f7 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -123,23 +123,9 @@
# the same size as our flavor definition.
eph_size = self.get_flavor_ephemeral_size()
if eph_size:
- preserve_ephemeral = True
-
self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
# Create the test file
- timestamp = self.create_timestamp(
+ self.create_timestamp(
floating_ip, private_key=self.keypair['private_key'])
- else:
- preserve_ephemeral = False
- # Rebuild and preserve the ephemeral partition if it exists
- self.rebuild_instance(preserve_ephemeral)
- self.verify_connectivity()
-
- # Check that we maintained our data
- if eph_size:
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
- timestamp2 = self.get_timestamp(
- floating_ip, private_key=self.keypair['private_key'])
- self.assertEqual(timestamp, timestamp2)
self.terminate_instance()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index cb6b968..5d4f7b3 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -97,9 +97,13 @@
req = request.Request(login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
+
+ # Pass the default domain name regardless of the auth version in order
+ # to test the scenario of when horizon is running with keystone v3
params = {'username': username,
'password': password,
'region': parser.region,
+ 'domain': CONF.auth.default_credentials_domain_name,
'csrfmiddlewaretoken': parser.csrf_token}
self.opener.open(req, parse.urlencode(params))
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b549ecb..402077f 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
@@ -26,9 +25,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
class TestLargeOpsScenario(manager.ScenarioTest):
"""Test large operations.
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2ef3cee..f7c7434 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest.common import custom_matchers
from tempest.common import waiters
from tempest import config
@@ -24,8 +22,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class TestMinimumBasicScenario(manager.ScenarioTest):
@@ -51,13 +47,6 @@
"""
- def _wait_for_server_status(self, server, status):
- server_id = server['id']
- # Raise on error defaults to True, which is consistent with the
- # original function from scenario tests here
- waiters.wait_for_server_status(self.servers_client,
- server_id, status)
-
def nova_list(self):
servers = self.servers_client.list_servers()
# The list servers in the compute client is inconsistent...
@@ -73,19 +62,14 @@
server, custom_matchers.MatchesDictExceptForKeys(
got_server, excluded_keys=excluded_keys))
- def cinder_create(self):
- return self.create_volume()
-
- def cinder_list(self):
- return self.volumes_client.list_volumes()['volumes']
-
def cinder_show(self, volume):
got_volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual(volume, got_volume)
def nova_reboot(self, server):
self.servers_client.reboot_server(server['id'], type='SOFT')
- self._wait_for_server_status(server, 'ACTIVE')
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
def check_partitions(self):
# NOTE(andreaf) The device name may be different on different guest OS
@@ -125,8 +109,8 @@
self.nova_show(server)
- volume = self.cinder_create()
- volumes = self.cinder_list()
+ volume = self.create_volume()
+ volumes = self.volumes_client.list_volumes()['volumes']
self.assertIn(volume['id'], [x['id'] for x in volumes])
self.cinder_show(volume)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 8aac98e..2cbe6dc 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
import testtools
from tempest.common.utils import data_utils
@@ -23,7 +22,6 @@
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
@@ -74,7 +72,7 @@
def _check_network_connectivity(self, server, keypair, floating_ip,
should_connect=True):
- username = CONF.compute.image_ssh_user
+ username = CONF.validation.image_ssh_user
private_key = keypair['private_key']
self._check_tenant_network_connectivity(
server, username, private_key,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 20ccc59..79a5099 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -172,7 +172,7 @@
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
- ssh_login = CONF.compute.image_ssh_user
+ ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(TestNetworkBasicOps, self).\
@@ -195,7 +195,7 @@
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
"""
- ssh_login = CONF.compute.image_ssh_user
+ ssh_login = CONF.validation.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
@@ -240,8 +240,8 @@
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self._get_server_key(server)
- ssh_client = self.get_remote_client(ip_address,
- private_key=private_key)
+ ssh_client = self.get_remote_client(
+ ip_address, private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server['id'])
@@ -336,7 +336,8 @@
should_connect=True):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
- ssh_source = self._ssh_to_server(ip_address, private_key)
+ ssh_source = self.get_remote_client(
+ ip_address, private_key=private_key)
for remote_ip in address_list:
if should_connect:
@@ -553,7 +554,8 @@
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(server)
- ssh_client = self._ssh_to_server(ip_address, private_key)
+ ssh_client = self.get_remote_client(
+ ip_address, private_key=private_key)
dns_servers = [initial_dns_server]
servers = ssh_client.get_dns_servers()
@@ -683,10 +685,10 @@
list_hosts = (self.admin_manager.network_client.
list_l3_agents_hosting_router)
- schedule_router = (self.admin_manager.network_client.
- add_router_to_l3_agent)
- unschedule_router = (self.admin_manager.network_client.
- remove_router_from_l3_agent)
+ schedule_router = (self.admin_manager.network_agents_client.
+ create_router_on_l3_agent)
+ unschedule_router = (self.admin_manager.network_agents_client.
+ delete_router_from_l3_agent)
agent_list = set(a["id"] for a in
self._list_agents(agent_type="L3 agent"))
@@ -744,20 +746,28 @@
def test_port_security_macspoofing_port(self):
"""Tests port_security extension enforces mac spoofing
- 1. create a new network
- 2. connect VM to new network
- 4. check VM can ping new network DHCP port
- 5. spoof mac on new new network interface
- 6. check Neutron enforces mac spoofing and blocks pings via spoofed
- interface
- 7. disable port-security on the spoofed port
- 8. check Neutron allows pings via spoofed interface
+ Neutron security groups always apply anti-spoof rules on the VMs. This
+ allows traffic to originate and terminate at the VM as expected, but
+ prevents traffic to pass through the VM. Anti-spoof rules are not
+ required in cases where the VM routes traffic through it.
+
+ The test steps are :
+ 1. Create a new network.
+ 2. Connect (hotplug) the VM to a new network.
+ 3. Check the VM can ping the DHCP interface of this network.
+ 4. Spoof the mac address of the new VM interface.
+ 5. Check the Security Group enforces mac spoofing and blocks pings via
+ spoofed interface (VM cannot ping the DHCP interface).
+ 6. Disable port-security of the spoofed port- set the flag to false.
+ 7. Retest 3rd step and check that the Security Group allows pings via
+ the spoofed interface.
"""
+
spoof_mac = "00:00:00:00:00:01"
# Create server
self._setup_network_and_servers()
- self.check_public_network_connectivity(should_connect=False)
+ self.check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
fip, server = self.floating_ip_tuple
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index d6ad46a..cc28873 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -14,7 +14,6 @@
# under the License.
import functools
-from oslo_log import log as logging
import six
from tempest import config
@@ -23,7 +22,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class TestGettingAddress(manager.NetworkScenarioTest):
@@ -114,7 +112,7 @@
return ips
def prepare_server(self, networks=None):
- username = CONF.compute.image_ssh_user
+ username = CONF.validation.image_ssh_user
networks = networks or [self.network]
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index 98dd705..63ffa0b 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -13,16 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class TestObjectStorageBasicOps(manager.ObjectStorageScenarioTest):
"""Test swift basic ops.
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index e266dc2..18bd764 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
-
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -23,8 +21,6 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
@@ -137,6 +133,9 @@
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
+ if not test.is_extension_enabled('security-group', 'network'):
+ msg = "security-group extension not enabled."
+ raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
@@ -176,14 +175,14 @@
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id,
- client=tenant.manager.network_client
+ client=tenant.manager.security_groups_client
)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id,
- client=tenant.manager.network_client
+ client=tenant.manager.security_groups_client
)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
@@ -192,9 +191,11 @@
port_range_max=22,
direction='ingress',
)
- self._create_security_group_rule(secgroup=access_sg,
- client=tenant.manager.network_client,
- **ssh_rule)
+ sec_group_rules_client = tenant.manager.security_group_rules_client
+ self._create_security_group_rule(
+ secgroup=access_sg,
+ sec_group_rules_client=sec_group_rules_client,
+ **ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
@@ -320,8 +321,8 @@
access_point_ssh = \
self.floating_ips[tenant.access_point['id']].floating_ip_address
private_key = tenant.keypair['private_key']
- access_point_ssh = self._ssh_to_server(access_point_ssh,
- private_key=private_key)
+ access_point_ssh = self.get_remote_client(
+ access_point_ssh, private_key=private_key)
return access_point_ssh
def _check_connectivity(self, access_point, ip, should_succeed=True):
@@ -372,9 +373,11 @@
protocol='icmp',
direction='ingress'
)
+ sec_group_rules_client = (
+ dest_tenant.manager.security_group_rules_client)
self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
- client=dest_tenant.manager.network_client,
+ sec_group_rules_client=sec_group_rules_client,
**ruleset
)
access_point_ssh = self._connect_to_access_point(source_tenant)
@@ -386,9 +389,11 @@
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
+ sec_group_rules_client = (
+ source_tenant.manager.security_group_rules_client)
self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
- client=source_tenant.manager.network_client,
+ sec_group_rules_client=sec_group_rules_client,
**ruleset
)
@@ -464,14 +469,15 @@
new_sg = self._create_empty_security_group(
namestart='secgroup_new-',
tenant_id=new_tenant.creds.tenant_id,
- client=new_tenant.manager.network_client)
+ client=new_tenant.manager.security_groups_client)
icmp_rule = dict(
protocol='icmp',
direction='ingress',
)
+ sec_group_rules_client = new_tenant.manager.security_group_rules_client
self._create_security_group_rule(
secgroup=new_sg,
- client=new_tenant.manager.network_client,
+ sec_group_rules_client=sec_group_rules_client,
**icmp_rule)
new_tenant.security_groups.update(new_sg=new_sg)
@@ -512,7 +518,7 @@
tenant = self.primary_tenant
ip = self._get_server_ip(tenant.access_point,
floating=self.floating_ip_access)
- ssh_login = CONF.compute.image_ssh_user
+ ssh_login = CONF.validation.image_ssh_user
private_key = tenant.keypair['private_key']
self.check_vm_connectivity(ip,
should_connect=False)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 239e120..6c24d04 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -69,10 +69,7 @@
image=self.image_ref, flavor=self.flavor_ref,
ssh=self.run_ssh, ssh_user=self.ssh_user))
- def add_keypair(self):
- self.keypair = self.create_keypair()
-
- def verify_ssh(self):
+ def verify_ssh(self, keypair):
if self.run_ssh:
# Obtain a floating IP
self.fip = self.create_floating_ip(self.instance)['ip']
@@ -80,7 +77,7 @@
self.ssh_client = self.get_remote_client(
server_or_ip=self.fip,
username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair['private_key'])
+ private_key=keypair['private_key'])
def verify_metadata(self):
if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
@@ -123,19 +120,19 @@
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_server_basicops(self):
- self.add_keypair()
+ keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
self.instance = self.create_server(
image_id=self.image_ref,
flavor=self.flavor_ref,
- key_name=self.keypair['name'],
+ key_name=keypair['name'],
security_groups=security_groups,
config_drive=CONF.compute_feature_enabled.config_drive,
metadata=self.md,
wait_until='ACTIVE')
- self.verify_ssh()
+ self.verify_ssh(keypair)
self.verify_metadata()
self.verify_metadata_on_config_drive()
self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index 7e0e41c..0cf72c3 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -14,8 +14,6 @@
# under the License.
-from oslo_log import log as logging
-
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
@@ -23,14 +21,20 @@
CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
class TestServerMultinode(manager.ScenarioTest):
"""This is a set of tests specific to multinode testing."""
credentials = ['primary', 'admin']
@classmethod
+ def skip_checks(cls):
+ super(TestServerMultinode, cls).skip_checks()
+
+ if CONF.compute.min_compute_nodes < 2:
+ raise cls.skipException(
+ "Less than 2 compute nodes, skipping multinode tests.")
+
+ @classmethod
def setup_clients(cls):
super(TestServerMultinode, cls).setup_clients()
# Use admin client by default
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index faae800..f88fb14 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -60,14 +60,10 @@
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
- self.snapshots_client.wait_for_snapshot_status(volume_snapshot['id'],
- status)
-
def _create_volume_snapshot(self, volume):
snapshot_name = data_utils.rand_name('scenario-snapshot')
snapshot = self.snapshots_client.create_snapshot(
- volume['id'], display_name=snapshot_name)['snapshot']
+ volume_id=volume['id'], display_name=snapshot_name)['snapshot']
def cleaner():
self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -78,29 +74,12 @@
except lib_exc.NotFound:
pass
self.addCleanup(cleaner)
- self._wait_for_volume_status(volume, 'available')
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
'available')
self.assertEqual(snapshot_name, snapshot['display_name'])
return snapshot
- def _wait_for_volume_status(self, volume, status):
- self.volumes_client.wait_for_volume_status(volume['id'], status)
-
- def _create_volume(self, snapshot_id=None):
- return self.create_volume(snapshot_id=snapshot_id)
-
- def _attach_volume(self, server, volume):
- attached_volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume['id'], device='/dev/%s'
- % CONF.compute.volume_device_name)['volumeAttachment']
- self.assertEqual(volume['id'], attached_volume['id'])
- self._wait_for_volume_status(attached_volume, 'in-use')
-
- def _detach_volume(self, server, volume):
- self.servers_client.detach_volume(server['id'], volume['id'])
- self._wait_for_volume_status(volume, 'available')
-
def _wait_for_volume_available_on_the_system(self, server_or_ip,
private_key):
ssh = self.get_remote_client(server_or_ip, private_key=private_key)
@@ -126,7 +105,7 @@
security_group = self._create_security_group()
# boot an instance and create a timestamp file in it
- volume = self._create_volume()
+ volume = self.create_volume()
server = self.create_server(
image_id=CONF.compute.image_ref,
key_name=keypair['name'],
@@ -136,13 +115,13 @@
# create and add floating IP to server1
ip_for_server = self.get_server_or_ip(server)
- self._attach_volume(server, volume)
+ self.nova_volume_attach(server, volume)
self._wait_for_volume_available_on_the_system(ip_for_server,
keypair['private_key'])
timestamp = self.create_timestamp(ip_for_server,
CONF.compute.volume_device_name,
private_key=keypair['private_key'])
- self._detach_volume(server, volume)
+ self.nova_volume_detach(server, volume)
# snapshot the volume
volume_snapshot = self._create_volume_snapshot(volume)
@@ -151,7 +130,7 @@
snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
- volume_from_snapshot = self._create_volume(
+ volume_from_snapshot = self.create_volume(
snapshot_id=volume_snapshot['id'])
# boot second instance from the snapshot(instance2)
@@ -164,7 +143,7 @@
ip_for_snapshot = self.get_server_or_ip(server_from_snapshot)
# attach volume2 to instance2
- self._attach_volume(server_from_snapshot, volume_from_snapshot)
+ self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
self._wait_for_volume_available_on_the_system(ip_for_snapshot,
keypair['private_key'])
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 81ecda0..7b88025 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -70,7 +70,7 @@
create_kwargs.update(self._get_bdm(
vol_id, delete_on_termination=delete_on_termination))
return self.create_server(
- image='',
+ image_id='',
wait_until='ACTIVE',
**create_kwargs)
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index fa7c0c9..3cbb3bc 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -40,11 +40,11 @@
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
- self.images_client = os.images_client
+ self.compute_images_client = os.compute_images_client
self.flavors_client = os.flavors_client
def ssh_user(self, image_id):
- _image = self.images_client.show_image(image_id)['image']
+ _image = self.compute_images_client.show_image(image_id)['image']
for regex, user in self.ssh_users:
# First match wins
if re.match(regex, _image['name']) is not None:
@@ -57,14 +57,14 @@
string=str(image['name']))
def is_sshable_image(self, image_id):
- _image = self.images_client.show_image(image_id)['image']
+ _image = self.compute_images_client.show_image(image_id)['image']
return self._is_sshable_image(_image)
def _is_flavor_enough(self, flavor, image):
return image['minDisk'] <= flavor['disk']
def is_flavor_enough(self, flavor_id, image_id):
- _image = self.images_client.show_image(image_id)['image']
+ _image = self.compute_images_client.show_image(image_id)['image']
_flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
return self._is_flavor_enough(_flavor, _image)
@@ -108,7 +108,7 @@
identity_version=CONF.identity.auth_version,
network_resources=network_resources)
os = clients.Manager(self.cred_provider.get_primary_creds())
- self.images_client = os.images_client
+ self.compute_images_client = os.compute_images_client
self.flavors_client = os.flavors_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -128,7 +128,7 @@
return []
if not hasattr(self, '_scenario_images'):
try:
- images = self.images_client.list_images()['images']
+ images = self.compute_images_client.list_images()['images']
self._scenario_images = [
(self._normalize_name(i['name']), dict(image_ref=i['id']))
for i in images if re.search(self.image_pattern,
diff --git a/tempest/services/base_microversion_client.py b/tempest/services/base_microversion_client.py
new file mode 100644
index 0000000..4c750f5
--- /dev/null
+++ b/tempest/services/base_microversion_client.py
@@ -0,0 +1,54 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest_lib.common import rest_client
+
+
+class BaseMicroversionClient(rest_client.RestClient):
+ """Base class to support microversion in service clients
+
+ This class is used to support microversion in service clients.
+ This provides feature to make API request with microversion.
+ Service clients derived from this class will be able to send API
+ request to server with or without microversion.
+ If api_microversion is not set on service client then API request will be
+ normal request without microversion.
+
+ """
+ def __init__(self, auth_provider, service, region,
+ api_microversion_header_name, **kwargs):
+ """Base Microversion Client __init__
+
+ :param auth_provider: an auth provider object used to wrap requests in
+ auth
+ :param str service: The service name to use for the catalog lookup
+ :param str region: The region to use for the catalog lookup
+ :param str api_microversion_header_name: The microversion header name
+ to use for sending API
+ request with microversion
+ :param kwargs: kwargs required by rest_client.RestClient
+ """
+ super(BaseMicroversionClient, self).__init__(
+ auth_provider, service, region, **kwargs)
+ self.api_microversion_header_name = api_microversion_header_name
+ self.api_microversion = None
+
+ def get_headers(self):
+ headers = super(BaseMicroversionClient, self).get_headers()
+ if self.api_microversion:
+ headers[self.api_microversion_header_name] = self.api_microversion
+ return headers
+
+ def set_api_microversion(self, microversion):
+ self.api_microversion = microversion
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
deleted file mode 100644
index 9d452ff..0000000
--- a/tempest/services/botoclients.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import types
-
-import boto
-import boto.ec2
-import boto.s3.connection
-from six.moves import configparser as ConfigParser
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-from tempest import config
-
-CONF = config.CONF
-
-
-class BotoClientBase(object):
-
- ALLOWED_METHODS = set()
-
- def __init__(self, identity_client):
- self.identity_client = identity_client
-
- self.ca_cert = CONF.identity.ca_certificates_file
- self.connection_timeout = str(CONF.boto.http_socket_timeout)
- self.num_retries = str(CONF.boto.num_retries)
- self.build_timeout = CONF.boto.build_timeout
-
- self.connection_data = {}
-
- def _config_boto_timeout(self, timeout, retries):
- try:
- boto.config.add_section("Boto")
- except ConfigParser.DuplicateSectionError:
- pass
- boto.config.set("Boto", "http_socket_timeout", timeout)
- boto.config.set("Boto", "num_retries", retries)
-
- def _config_boto_ca_certificates_file(self, ca_cert):
- if ca_cert is None:
- return
-
- try:
- boto.config.add_section("Boto")
- except ConfigParser.DuplicateSectionError:
- pass
- boto.config.set("Boto", "ca_certificates_file", ca_cert)
-
- def __getattr__(self, name):
- """Automatically creates methods for the allowed methods set."""
- if name in self.ALLOWED_METHODS:
- def func(self, *args, **kwargs):
- with contextlib.closing(self.get_connection()) as conn:
- return getattr(conn, name)(*args, **kwargs)
-
- func.__name__ = name
- setattr(self, name, types.MethodType(func, self, self.__class__))
- setattr(self.__class__, name,
- types.MethodType(func, None, self.__class__))
- return getattr(self, name)
- else:
- raise AttributeError(name)
-
- def get_connection(self):
- self._config_boto_timeout(self.connection_timeout, self.num_retries)
- self._config_boto_ca_certificates_file(self.ca_cert)
-
- ec2_client_args = {'aws_access_key_id': CONF.boto.aws_access,
- 'aws_secret_access_key': CONF.boto.aws_secret}
- if not all(ec2_client_args.values()):
- ec2_client_args = self.get_aws_credentials(self.identity_client)
-
- self.connection_data.update(ec2_client_args)
- return self.connect_method(**self.connection_data)
-
- def get_aws_credentials(self, identity_client):
- """Obtain existing, or create new AWS credentials
-
- :param identity_client: identity client with embedded credentials
- :return: EC2 credentials
- """
- ec2_cred_list = identity_client.list_user_ec2_credentials(
- identity_client.user_id)['credentials']
- for cred in ec2_cred_list:
- if cred['tenant_id'] == identity_client.tenant_id:
- ec2_cred = cred
- break
- else:
- ec2_cred = (identity_client.create_user_ec2_credentials(
- identity_client.user_id,
- tenant_id=identity_client.tenant_id)['credential'])
- if not all((ec2_cred, ec2_cred['access'], ec2_cred['secret'])):
- raise lib_exc.NotFound("Unable to get access and secret keys")
- else:
- ec2_cred_aws = {}
- ec2_cred_aws['aws_access_key_id'] = ec2_cred['access']
- ec2_cred_aws['aws_secret_access_key'] = ec2_cred['secret']
- return ec2_cred_aws
-
-
-class APIClientEC2(BotoClientBase):
-
- def connect_method(self, *args, **kwargs):
- return boto.connect_ec2(*args, **kwargs)
-
- def __init__(self, identity_client):
- super(APIClientEC2, self).__init__(identity_client)
- insecure_ssl = CONF.identity.disable_ssl_certificate_validation
- purl = urlparse.urlparse(CONF.boto.ec2_url)
-
- region_name = CONF.compute.region
- if not region_name:
- region_name = CONF.identity.region
- region = boto.ec2.regioninfo.RegionInfo(name=region_name,
- endpoint=purl.hostname)
- port = purl.port
- if port is None:
- if purl.scheme is not "https":
- port = 80
- else:
- port = 443
- else:
- port = int(port)
- self.connection_data.update({"is_secure": purl.scheme == "https",
- "validate_certs": not insecure_ssl,
- "region": region,
- "host": purl.hostname,
- "port": port,
- "path": purl.path})
-
- ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
- 'delete_key_pair', 'import_key_pair',
- 'get_all_key_pairs',
- 'get_all_tags',
- 'create_image', 'get_image',
- 'register_image', 'deregister_image',
- 'get_all_images', 'get_image_attribute',
- 'modify_image_attribute', 'reset_image_attribute',
- 'get_all_kernels',
- 'create_volume', 'delete_volume',
- 'get_all_volume_status', 'get_all_volumes',
- 'get_volume_attribute', 'modify_volume_attribute'
- 'bundle_instance', 'cancel_spot_instance_requests',
- 'confirm_product_instanc',
- 'get_all_instance_status', 'get_all_instances',
- 'get_all_reserved_instances',
- 'get_all_spot_instance_requests',
- 'get_instance_attribute', 'monitor_instance',
- 'monitor_instances', 'unmonitor_instance',
- 'unmonitor_instances',
- 'purchase_reserved_instance_offering',
- 'reboot_instances', 'request_spot_instances',
- 'reset_instance_attribute', 'run_instances',
- 'start_instances', 'stop_instances',
- 'terminate_instances',
- 'attach_network_interface', 'attach_volume',
- 'detach_network_interface', 'detach_volume',
- 'get_console_output',
- 'delete_network_interface', 'create_subnet',
- 'create_network_interface', 'delete_subnet',
- 'get_all_network_interfaces',
- 'allocate_address', 'associate_address',
- 'disassociate_address', 'get_all_addresses',
- 'release_address',
- 'create_snapshot', 'delete_snapshot',
- 'get_all_snapshots', 'get_snapshot_attribute',
- 'modify_snapshot_attribute',
- 'reset_snapshot_attribute', 'trim_snapshots',
- 'get_all_regions', 'get_all_zones',
- 'get_all_security_groups', 'create_security_group',
- 'delete_security_group', 'authorize_security_group',
- 'authorize_security_group_egress',
- 'revoke_security_group',
- 'revoke_security_group_egress'))
-
-
-class ObjectClientS3(BotoClientBase):
-
- def connect_method(self, *args, **kwargs):
- return boto.connect_s3(*args, **kwargs)
-
- def __init__(self, identity_client):
- super(ObjectClientS3, self).__init__(identity_client)
- insecure_ssl = CONF.identity.disable_ssl_certificate_validation
- purl = urlparse.urlparse(CONF.boto.s3_url)
- port = purl.port
- if port is None:
- if purl.scheme is not "https":
- port = 80
- else:
- port = 443
- else:
- port = int(port)
- self.connection_data.update({"is_secure": purl.scheme == "https",
- "validate_certs": not insecure_ssl,
- "host": purl.hostname,
- "port": port,
- "calling_format": boto.s3.connection.
- OrdinaryCallingFormat()})
-
- ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
- 'get_all_buckets', 'get_bucket', 'delete_key',
- 'lookup'))
diff --git a/tempest/services/compute/json/base.py b/tempest/services/compute/json/base.py
deleted file mode 100644
index 02e9f8b..0000000
--- a/tempest/services/compute/json/base.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2015 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common import service_client
-
-
-class BaseComputeClient(service_client.ServiceClient):
- api_microversion = None
-
- def get_headers(self):
- headers = super(BaseComputeClient, self).get_headers()
- if self.api_microversion:
- headers['X-OpenStack-Nova-API-Version'] = self.api_microversion
- return headers
diff --git a/tempest/services/compute/json/base_compute_client.py b/tempest/services/compute/json/base_compute_client.py
new file mode 100644
index 0000000..5349af6
--- /dev/null
+++ b/tempest/services/compute/json/base_compute_client.py
@@ -0,0 +1,72 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import api_version_request
+from tempest.common import api_version_utils
+from tempest import exceptions
+from tempest.services import base_microversion_client
+
+
+class BaseComputeClient(base_microversion_client.BaseMicroversionClient):
+
+ def __init__(self, auth_provider, service, region,
+ api_microversion_header_name='X-OpenStack-Nova-API-Version',
+ **kwargs):
+ super(BaseComputeClient, self).__init__(
+ auth_provider, service, region,
+ api_microversion_header_name, **kwargs)
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
+ resp, resp_body = super(BaseComputeClient, self).request(
+ method, url, extra_headers, headers, body)
+ if self.api_microversion and self.api_microversion != 'latest':
+ api_version_utils.assert_version_header_matches_request(
+ self.api_microversion_header_name,
+ self.api_microversion,
+ resp)
+ return resp, resp_body
+
+ def get_schema(self, schema_versions_info):
+ """Get JSON schema
+
+ This method provides the matching schema for requested
+ microversion (self.api_microversion).
+ :param schema_versions_info: List of dict which provides schema
+ information with range of valid versions.
+ Example -
+ schema_versions_info = [
+ {'min': None, 'max': '2.1', 'schema': schemav21},
+ {'min': '2.2', 'max': '2.9', 'schema': schemav22},
+ {'min': '2.10', 'max': None, 'schema': schemav210}]
+ """
+ schema = None
+ version = api_version_request.APIVersionRequest(self.api_microversion)
+ for items in schema_versions_info:
+ min_version = api_version_request.APIVersionRequest(items['min'])
+ max_version = api_version_request.APIVersionRequest(items['max'])
+ # This is case where self.api_microversion is None, which means
+ # request without microversion So select base v2.1 schema.
+ if version.is_null() and items['min'] is None:
+ schema = items['schema']
+ break
+ # else select appropriate schema as per self.api_microversion
+ elif version.matches(min_version, max_version):
+ schema = items['schema']
+ break
+ if schema is None:
+ raise exceptions.JSONSchemaNotFound(
+ version=version.get_string(),
+ schema_versions_info=schema_versions_info)
+ return schema
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
deleted file mode 100644
index b3e2f2f..0000000
--- a/tempest/services/compute/json/floating_ips_client.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-from tempest_lib import exceptions as lib_exc
-
-from tempest.api_schema.response.compute.v2_1 import floating_ips as schema
-from tempest.common import service_client
-
-
-class FloatingIPsClient(service_client.ServiceClient):
-
- def list_floating_ips(self, **params):
- """Returns a list of all floating IPs filtered by any parameters."""
- url = 'os-floating-ips'
- if params:
- url += '?%s' % urllib.urlencode(params)
-
- resp, body = self.get(url)
- body = json.loads(body)
- self.validate_response(schema.list_floating_ips, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_floating_ip(self, floating_ip_id):
- """Get the details of a floating IP."""
- url = "os-floating-ips/%s" % floating_ip_id
- resp, body = self.get(url)
- body = json.loads(body)
- self.validate_response(schema.create_get_floating_ip, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def create_floating_ip(self, **kwargs):
- """Allocate a floating IP to the project.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#createFloatingIP
- """
- url = 'os-floating-ips'
- post_body = json.dumps(kwargs)
- resp, body = self.post(url, post_body)
- body = json.loads(body)
- self.validate_response(schema.create_get_floating_ip, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_floating_ip(self, floating_ip_id):
- """Deletes the provided floating IP from the project."""
- url = "os-floating-ips/%s" % floating_ip_id
- resp, body = self.delete(url)
- self.validate_response(schema.add_remove_floating_ip, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def associate_floating_ip_to_server(self, floating_ip, server_id):
- """Associate the provided floating IP to a specific server."""
- url = "servers/%s/action" % server_id
- post_body = {
- 'addFloatingIp': {
- 'address': floating_ip,
- }
- }
-
- post_body = json.dumps(post_body)
- resp, body = self.post(url, post_body)
- self.validate_response(schema.add_remove_floating_ip, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def disassociate_floating_ip_from_server(self, floating_ip, server_id):
- """Disassociate the provided floating IP from a specific server."""
- url = "servers/%s/action" % server_id
- post_body = {
- 'removeFloatingIp': {
- 'address': floating_ip,
- }
- }
-
- post_body = json.dumps(post_body)
- resp, body = self.post(url, post_body)
- self.validate_response(schema.add_remove_floating_ip, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def is_resource_deleted(self, id):
- try:
- self.show_floating_ip(id)
- except lib_exc.NotFound:
- return True
- return False
-
- @property
- def resource_type(self):
- """Returns the primary type of resource this client works with."""
- return 'floating_ip'
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 2e22bc6..ec9b1e0 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -15,21 +15,28 @@
from oslo_serialization import jsonutils as json
-from tempest.api_schema.response.compute.v2_1 import keypairs as schema
+from tempest.api_schema.response.compute.v2_1 import keypairs as schemav21
+from tempest.api_schema.response.compute.v2_2 import keypairs as schemav22
from tempest.common import service_client
+from tempest.services.compute.json import base_compute_client
-class KeyPairsClient(service_client.ServiceClient):
+class KeyPairsClient(base_compute_client.BaseComputeClient):
+
+ schema_versions_info = [{'min': None, 'max': '2.1', 'schema': schemav21},
+ {'min': '2.2', 'max': None, 'schema': schemav22}]
def list_keypairs(self):
resp, body = self.get("os-keypairs")
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_keypairs, resp, body)
return service_client.ResponseBody(resp, body)
def show_keypair(self, keypair_name):
resp, body = self.get("os-keypairs/%s" % keypair_name)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_keypair, resp, body)
return service_client.ResponseBody(resp, body)
@@ -37,10 +44,12 @@
post_body = json.dumps({'keypair': kwargs})
resp, body = self.post("os-keypairs", body=post_body)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.create_keypair, resp, body)
return service_client.ResponseBody(resp, body)
def delete_keypair(self, keypair_name):
resp, body = self.delete("os-keypairs/%s" % keypair_name)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.delete_keypair, resp, body)
return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/security_group_rules_client.py b/tempest/services/compute/json/security_group_rules_client.py
deleted file mode 100644
index 314b1ed..0000000
--- a/tempest/services/compute/json/security_group_rules_client.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import security_groups as schema
-from tempest.common import service_client
-
-
-class SecurityGroupRulesClient(service_client.ServiceClient):
-
- def create_security_group_rule(self, **kwargs):
- """Creating a new security group rules.
-
- parent_group_id :ID of Security group
- ip_protocol : ip_proto (icmp, tcp, udp).
- from_port: Port at start of range.
- to_port : Port at end of range.
- Following optional keyword arguments are accepted:
- cidr : CIDR for address range.
- group_id : ID of the Source group
- """
- post_body = json.dumps({'security_group_rule': kwargs})
- url = 'os-security-group-rules'
- resp, body = self.post(url, post_body)
- body = json.loads(body)
- self.validate_response(schema.create_security_group_rule, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_security_group_rule(self, group_rule_id):
- """Deletes the provided Security Group rule."""
- resp, body = self.delete('os-security-group-rules/%s' %
- group_rule_id)
- self.validate_response(schema.delete_security_group_rule, resp, body)
- return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/server_groups_client.py b/tempest/services/compute/json/server_groups_client.py
deleted file mode 100644
index 44ac015..0000000
--- a/tempest/services/compute/json/server_groups_client.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.api_schema.response.compute.v2_1 import servers as schema
-from tempest.common import service_client
-
-
-class ServerGroupsClient(service_client.ServiceClient):
-
- def create_server_group(self, **kwargs):
- """Create the server group
-
- name : Name of the server-group
- policies : List of the policies - affinity/anti-affinity)
- """
- post_body = json.dumps({'server_group': kwargs})
- resp, body = self.post('os-server-groups', post_body)
-
- body = json.loads(body)
- self.validate_response(schema.create_show_server_group, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_server_group(self, server_group_id):
- """Delete the given server-group."""
- resp, body = self.delete("os-server-groups/%s" % server_group_id)
- self.validate_response(schema.delete_server_group, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def list_server_groups(self):
- """List the server-groups."""
- resp, body = self.get("os-server-groups")
- body = json.loads(body)
- self.validate_response(schema.list_server_groups, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_server_group(self, server_group_id):
- """Get the details of given server_group."""
- resp, body = self.get("os-server-groups/%s" % server_group_id)
- body = json.loads(body)
- self.validate_response(schema.create_show_server_group, resp, body)
- return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
deleted file mode 100644
index c20295b..0000000
--- a/tempest/services/compute/json/servers_client.py
+++ /dev/null
@@ -1,561 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.api_schema.response.compute.v2_1 import servers as schema
-from tempest.common import service_client
-
-
-class ServersClient(service_client.ServiceClient):
-
- def __init__(self, auth_provider, service, region,
- enable_instance_password=True, **kwargs):
- super(ServersClient, self).__init__(
- auth_provider, service, region, **kwargs)
- self.enable_instance_password = enable_instance_password
-
- def create_server(self, **kwargs):
- """Create server.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#createServer
-
- Most parameters except the following are passed to the API without
- any changes.
- :param disk_config: The name is changed to OS-DCF:diskConfig
- :param scheduler_hints: The name is changed to os:scheduler_hints and
- the parameter is set in the same level as the parameter 'server'.
- """
- body = copy.deepcopy(kwargs)
- if body.get('disk_config'):
- body['OS-DCF:diskConfig'] = body.pop('disk_config')
-
- hints = None
- if body.get('scheduler_hints'):
- hints = {'os:scheduler_hints': body.pop('scheduler_hints')}
-
- post_body = {'server': body}
-
- if hints:
- post_body = dict(post_body.items() + hints.items())
-
- post_body = json.dumps(post_body)
- resp, body = self.post('servers', post_body)
-
- body = json.loads(body)
- # NOTE(maurosr): this deals with the case of multiple server create
- # with return reservation id set True
- if 'reservation_id' in body:
- return service_client.ResponseBody(resp, body)
- if self.enable_instance_password:
- create_schema = schema.create_server_with_admin_pass
- else:
- create_schema = schema.create_server
- self.validate_response(create_schema, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def update_server(self, server_id, **kwargs):
- """Update server.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#updateServer
-
- Most parameters except the following are passed to the API without
- any changes.
- :param disk_config: The name is changed to OS-DCF:diskConfig
- """
- if kwargs.get('disk_config'):
- kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
-
- post_body = json.dumps({'server': kwargs})
- resp, body = self.put("servers/%s" % server_id, post_body)
- body = json.loads(body)
- self.validate_response(schema.update_server, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_server(self, server_id):
- """Get server details."""
- resp, body = self.get("servers/%s" % server_id)
- body = json.loads(body)
- self.validate_response(schema.get_server, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_server(self, server_id):
- """Delete server."""
- resp, body = self.delete("servers/%s" % server_id)
- self.validate_response(schema.delete_server, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def list_servers(self, detail=False, **params):
- """List servers.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#listServers
- and http://developer.openstack.org/
- api-ref-compute-v2.1.html#listDetailServers
- """
-
- url = 'servers'
- _schema = schema.list_servers
-
- if detail:
- url += '/detail'
- _schema = schema.list_servers_detail
- if params:
- url += '?%s' % urllib.urlencode(params)
-
- resp, body = self.get(url)
- body = json.loads(body)
- self.validate_response(_schema, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def list_addresses(self, server_id):
- """Lists all addresses for a server."""
- resp, body = self.get("servers/%s/ips" % server_id)
- body = json.loads(body)
- self.validate_response(schema.list_addresses, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def list_addresses_by_network(self, server_id, network_id):
- """Lists all addresses of a specific network type for a server."""
- resp, body = self.get("servers/%s/ips/%s" %
- (server_id, network_id))
- body = json.loads(body)
- self.validate_response(schema.list_addresses_by_network, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def action(self, server_id, action_name,
- schema=schema.server_actions_common_schema,
- **kwargs):
- post_body = json.dumps({action_name: kwargs})
- resp, body = self.post('servers/%s/action' % server_id,
- post_body)
- if body:
- body = json.loads(body)
- self.validate_response(schema, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def create_backup(self, server_id, **kwargs):
- """Backup a server instance.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#createBackup
- """
- return self.action(server_id, "createBackup", **kwargs)
-
- def change_password(self, server_id, **kwargs):
- """Change the root password for the server.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#changePassword
- """
- return self.action(server_id, 'changePassword', **kwargs)
-
- def show_password(self, server_id):
- resp, body = self.get("servers/%s/os-server-password" %
- server_id)
- body = json.loads(body)
- self.validate_response(schema.show_password, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_password(self, server_id):
- """Removes the encrypted server password from the metadata server
-
- Note that this does not actually change the instance server
- password.
- """
- resp, body = self.delete("servers/%s/os-server-password" %
- server_id)
- self.validate_response(schema.server_actions_delete_password,
- resp, body)
- return service_client.ResponseBody(resp, body)
-
- def reboot_server(self, server_id, **kwargs):
- """Reboot a server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#reboot
- """
- return self.action(server_id, 'reboot', **kwargs)
-
- def rebuild_server(self, server_id, image_ref, **kwargs):
- """Rebuild a server with a new image.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#rebuild
-
- Most parameters except the following are passed to the API without
- any changes.
- :param disk_config: The name is changed to OS-DCF:diskConfig
- """
- kwargs['imageRef'] = image_ref
- if 'disk_config' in kwargs:
- kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
- if self.enable_instance_password:
- rebuild_schema = schema.rebuild_server_with_admin_pass
- else:
- rebuild_schema = schema.rebuild_server
- return self.action(server_id, 'rebuild',
- rebuild_schema, **kwargs)
-
- def resize_server(self, server_id, flavor_ref, **kwargs):
- """Change the flavor of a server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#resize
-
- Most parameters except the following are passed to the API without
- any changes.
- :param disk_config: The name is changed to OS-DCF:diskConfig
- """
- kwargs['flavorRef'] = flavor_ref
- if 'disk_config' in kwargs:
- kwargs['OS-DCF:diskConfig'] = kwargs.pop('disk_config')
- return self.action(server_id, 'resize', **kwargs)
-
- def confirm_resize_server(self, server_id, **kwargs):
- """Confirm the flavor change for a server.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#confirmResize
- """
- return self.action(server_id, 'confirmResize',
- schema.server_actions_confirm_resize,
- **kwargs)
-
- def revert_resize_server(self, server_id, **kwargs):
- """Revert a server back to its original flavor.
-
- Available params: see http://developer.openstack.org/
- api-ref-compute-v2.1.html#revertResize
- """
- return self.action(server_id, 'revertResize', **kwargs)
-
- def list_server_metadata(self, server_id):
- resp, body = self.get("servers/%s/metadata" % server_id)
- body = json.loads(body)
- self.validate_response(schema.list_server_metadata, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def set_server_metadata(self, server_id, meta, no_metadata_field=False):
- if no_metadata_field:
- post_body = ""
- else:
- post_body = json.dumps({'metadata': meta})
- resp, body = self.put('servers/%s/metadata' % server_id,
- post_body)
- body = json.loads(body)
- self.validate_response(schema.set_server_metadata, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def update_server_metadata(self, server_id, meta):
- post_body = json.dumps({'metadata': meta})
- resp, body = self.post('servers/%s/metadata' % server_id,
- post_body)
- body = json.loads(body)
- self.validate_response(schema.update_server_metadata,
- resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_server_metadata_item(self, server_id, key):
- resp, body = self.get("servers/%s/metadata/%s" % (server_id, key))
- body = json.loads(body)
- self.validate_response(schema.set_show_server_metadata_item,
- resp, body)
- return service_client.ResponseBody(resp, body)
-
- def set_server_metadata_item(self, server_id, key, meta):
- post_body = json.dumps({'meta': meta})
- resp, body = self.put('servers/%s/metadata/%s' % (server_id, key),
- post_body)
- body = json.loads(body)
- self.validate_response(schema.set_show_server_metadata_item,
- resp, body)
- return service_client.ResponseBody(resp, body)
-
- def delete_server_metadata_item(self, server_id, key):
- resp, body = self.delete("servers/%s/metadata/%s" %
- (server_id, key))
- self.validate_response(schema.delete_server_metadata_item,
- resp, body)
- return service_client.ResponseBody(resp, body)
-
- def stop_server(self, server_id, **kwargs):
- return self.action(server_id, 'os-stop', **kwargs)
-
- def start_server(self, server_id, **kwargs):
- return self.action(server_id, 'os-start', **kwargs)
-
- def attach_volume(self, server_id, **kwargs):
- """Attaches a volume to a server instance."""
- post_body = json.dumps({'volumeAttachment': kwargs})
- resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
- post_body)
- body = json.loads(body)
- self.validate_response(schema.attach_volume, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def detach_volume(self, server_id, volume_id): # noqa
- """Detaches a volume from a server instance."""
- resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
- (server_id, volume_id))
- self.validate_response(schema.detach_volume, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_volume_attachment(self, server_id, attach_id):
- """Return details about the given volume attachment."""
- resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
- server_id, attach_id))
- body = json.loads(body)
- self.validate_response(schema.show_volume_attachment, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def list_volume_attachments(self, server_id):
- """Returns the list of volume attachments for a given instance."""
- resp, body = self.get('servers/%s/os-volume_attachments' % (
- server_id))
- body = json.loads(body)
- self.validate_response(schema.list_volume_attachments, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def add_security_group(self, server_id, **kwargs):
- """Add a security group to the server.
-
- Available params: TODO
- """
- # TODO(oomichi): The api-site doesn't contain this API description.
- # So the above should be changed to the api-site link after
- # adding the description on the api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524199
- return self.action(server_id, 'addSecurityGroup', **kwargs)
-
- def remove_security_group(self, server_id, **kwargs):
- """Remove a security group from the server.
-
- Available params: TODO
- """
- # TODO(oomichi): The api-site doesn't contain this API description.
- # So the above should be changed to the api-site link after
- # adding the description on the api-site.
- # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524199
- return self.action(server_id, 'removeSecurityGroup', **kwargs)
-
- def live_migrate_server(self, server_id, **kwargs):
- """This should be called with administrator privileges.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#migrateLive
- """
- return self.action(server_id, 'os-migrateLive', **kwargs)
-
- def migrate_server(self, server_id, **kwargs):
- """Migrate a server to a new host.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#migrate
- """
- return self.action(server_id, 'migrate', **kwargs)
-
- def lock_server(self, server_id, **kwargs):
- """Lock the given server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#lock
- """
- return self.action(server_id, 'lock', **kwargs)
-
- def unlock_server(self, server_id, **kwargs):
- """UNlock the given server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#unlock
- """
- return self.action(server_id, 'unlock', **kwargs)
-
- def suspend_server(self, server_id, **kwargs):
- """Suspend the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#suspend
- """
- return self.action(server_id, 'suspend', **kwargs)
-
- def resume_server(self, server_id, **kwargs):
- """Un-suspend the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#resume
- """
- return self.action(server_id, 'resume', **kwargs)
-
- def pause_server(self, server_id, **kwargs):
- """Pause the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#pause
- """
- return self.action(server_id, 'pause', **kwargs)
-
- def unpause_server(self, server_id, **kwargs):
- """Un-pause the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#unpause
- """
- return self.action(server_id, 'unpause', **kwargs)
-
- def reset_state(self, server_id, **kwargs):
- """Reset the state of a server to active/error.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#resetState
- """
- return self.action(server_id, 'os-resetState', **kwargs)
-
- def shelve_server(self, server_id, **kwargs):
- """Shelve the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#shelve
- """
- return self.action(server_id, 'shelve', **kwargs)
-
- def unshelve_server(self, server_id, **kwargs):
- """Un-shelve the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#unshelve
- """
- return self.action(server_id, 'unshelve', **kwargs)
-
- def shelve_offload_server(self, server_id, **kwargs):
- """Shelve-offload the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#shelveOffload
- """
- return self.action(server_id, 'shelveOffload', **kwargs)
-
- def get_console_output(self, server_id, **kwargs):
- """Get console output.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#getConsoleOutput
- """
- return self.action(server_id, 'os-getConsoleOutput',
- schema.get_console_output, **kwargs)
-
- def list_virtual_interfaces(self, server_id):
- """List the virtual interfaces used in an instance."""
- resp, body = self.get('/'.join(['servers', server_id,
- 'os-virtual-interfaces']))
- body = json.loads(body)
- self.validate_response(schema.list_virtual_interfaces, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def rescue_server(self, server_id, **kwargs):
- """Rescue the provided server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#rescue
- """
- return self.action(server_id, 'rescue', schema.rescue_server, **kwargs)
-
- def unrescue_server(self, server_id):
- """Unrescue the provided server."""
- return self.action(server_id, 'unrescue')
-
- def show_server_diagnostics(self, server_id):
- """Get the usage data for a server."""
- resp, body = self.get("servers/%s/diagnostics" % server_id)
- return service_client.ResponseBody(resp, json.loads(body))
-
- def list_instance_actions(self, server_id):
- """List the provided server action."""
- resp, body = self.get("servers/%s/os-instance-actions" %
- server_id)
- body = json.loads(body)
- self.validate_response(schema.list_instance_actions, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def show_instance_action(self, server_id, request_id):
- """Returns the action details of the provided server."""
- resp, body = self.get("servers/%s/os-instance-actions/%s" %
- (server_id, request_id))
- body = json.loads(body)
- self.validate_response(schema.show_instance_action, resp, body)
- return service_client.ResponseBody(resp, body)
-
- def force_delete_server(self, server_id, **kwargs):
- """Force delete a server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#forceDelete
- """
- return self.action(server_id, 'forceDelete', **kwargs)
-
- def restore_soft_deleted_server(self, server_id, **kwargs):
- """Restore a soft-deleted server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#restore
- """
- return self.action(server_id, 'restore', **kwargs)
-
- def reset_network(self, server_id, **kwargs):
- """Reset the Network of a server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#resetNetwork
- """
- return self.action(server_id, 'resetNetwork', **kwargs)
-
- def inject_network_info(self, server_id, **kwargs):
- """Inject the Network Info into server.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#injectNetworkInfo
- """
- return self.action(server_id, 'injectNetworkInfo', **kwargs)
-
- def get_vnc_console(self, server_id, **kwargs):
- """Get URL of VNC console.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#getVNCConsole
- """
- return self.action(server_id, "os-getVNCConsole",
- schema.get_vnc_console, **kwargs)
-
- def add_fixed_ip(self, server_id, **kwargs):
- """Add a fixed IP to server instance.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#addFixedIp
- """
- return self.action(server_id, 'addFixedIp', **kwargs)
-
- def remove_fixed_ip(self, server_id, **kwargs):
- """Remove input fixed IP from input server instance.
-
- Available params: http://developer.openstack.org/
- api-ref-compute-v2.1.html#removeFixedIp
- """
- return self.action(server_id, 'removeFixedIp', **kwargs)
diff --git a/tempest/services/database/json/flavors_client.py b/tempest/services/database/json/flavors_client.py
index 34a91ba..dbb5172 100644
--- a/tempest/services/database/json/flavors_client.py
+++ b/tempest/services/database/json/flavors_client.py
@@ -14,7 +14,7 @@
# under the License.
from oslo_serialization import jsonutils as json
-import urllib
+from six.moves import urllib
from tempest.common import service_client
@@ -24,7 +24,7 @@
def list_db_flavors(self, params=None):
url = 'flavors'
if params:
- url += '?%s' % urllib.urlencode(params)
+ url += '?%s' % urllib.parse.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
diff --git a/tempest/services/identity/v2/json/identity_client.py b/tempest/services/identity/v2/json/identity_client.py
index f80e22d..db334a6 100644
--- a/tempest/services/identity/v2/json/identity_client.py
+++ b/tempest/services/identity/v2/json/identity_client.py
@@ -26,116 +26,6 @@
body = json.loads(body)
return service_client.ResponseBody(resp, body)
- def create_role(self, name):
- """Create a role."""
- post_body = {
- 'name': name,
- }
- post_body = json.dumps({'role': post_body})
- resp, body = self.post('OS-KSADM/roles', post_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def show_role(self, role_id):
- """Get a role by its id."""
- resp, body = self.get('OS-KSADM/roles/%s' % role_id)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def delete_role(self, role_id):
- """Delete a role."""
- resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
- self.expected_success(204, resp.status)
- return resp, body
-
- def list_user_roles(self, tenant_id, user_id):
- """Returns a list of roles assigned to a user for a tenant."""
- url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
- resp, body = self.get(url)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def assign_user_role(self, tenant_id, user_id, role_id):
- """Add roles to a user on a tenant."""
- resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
- (tenant_id, user_id, role_id), "")
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def delete_user_role(self, tenant_id, user_id, role_id):
- """Removes a role assignment for a user on a tenant."""
- resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
- (tenant_id, user_id, role_id))
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def list_roles(self):
- """Returns roles."""
- resp, body = self.get('OS-KSADM/roles')
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def create_user(self, name, password, tenant_id, email, **kwargs):
- """Create a user."""
- post_body = {
- 'name': name,
- 'password': password,
- 'email': email
- }
- if tenant_id is not None:
- post_body['tenantId'] = tenant_id
- if kwargs.get('enabled') is not None:
- post_body['enabled'] = kwargs.get('enabled')
- post_body = json.dumps({'user': post_body})
- resp, body = self.post('users', post_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def update_user(self, user_id, **kwargs):
- """Updates a user."""
- put_body = json.dumps({'user': kwargs})
- resp, body = self.put('users/%s' % user_id, put_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def show_user(self, user_id):
- """GET a user."""
- resp, body = self.get("users/%s" % user_id)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def delete_user(self, user_id):
- """Delete a user."""
- resp, body = self.delete("users/%s" % user_id)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def list_users(self):
- """Get the list of users."""
- resp, body = self.get("users")
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def enable_disable_user(self, user_id, enabled):
- """Enables or disables a user."""
- put_body = {
- 'enabled': enabled
- }
- put_body = json.dumps({'user': put_body})
- resp, body = self.put('users/%s/enabled' % user_id, put_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
def show_token(self, token_id):
"""Get token details."""
resp, body = self.get("tokens/%s" % token_id)
@@ -149,41 +39,6 @@
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
- def create_service(self, name, type, **kwargs):
- """Create a service."""
- post_body = {
- 'name': name,
- 'type': type,
- 'description': kwargs.get('description')
- }
- post_body = json.dumps({'OS-KSADM:service': post_body})
- resp, body = self.post('/OS-KSADM/services', post_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def show_service(self, service_id):
- """Get Service."""
- url = '/OS-KSADM/services/%s' % service_id
- resp, body = self.get(url)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def list_services(self):
- """List Service - Returns Services."""
- resp, body = self.get('/OS-KSADM/services')
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def delete_service(self, service_id):
- """Delete Service."""
- url = '/OS-KSADM/services/%s' % service_id
- resp, body = self.delete(url)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
def create_endpoint(self, service_id, region_id, **kwargs):
"""Create an endpoint for service."""
post_body = {
@@ -213,63 +68,9 @@
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
- def update_user_password(self, user_id, new_pass):
- """Update User Password."""
- put_body = {
- 'password': new_pass,
- 'id': user_id
- }
- put_body = json.dumps({'user': put_body})
- resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def update_user_own_password(self, user_id, new_pass, old_pass):
- """User updates own password"""
- patch_body = {
- "password": new_pass,
- "original_password": old_pass
- }
- patch_body = json.dumps({'user': patch_body})
- resp, body = self.patch('OS-KSCRUD/users/%s' % user_id, patch_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
def list_extensions(self):
"""List all the extensions."""
resp, body = self.get('/extensions')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
-
- def create_user_ec2_credentials(self, user_id, **kwargs):
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- post_body = json.dumps(kwargs)
- resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
- post_body)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def delete_user_ec2_credentials(self, user_id, access):
- resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
- (user_id, access))
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def list_user_ec2_credentials(self, user_id):
- resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def show_user_ec2_credentials(self, user_id, access):
- resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
- (user_id, access))
- self.expected_success(200, resp.status)
- body = json.loads(body)
- return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/roles_client.py b/tempest/services/identity/v2/json/roles_client.py
new file mode 100644
index 0000000..ef6dfe9
--- /dev/null
+++ b/tempest/services/identity/v2/json/roles_client.py
@@ -0,0 +1,74 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class RolesClient(service_client.ServiceClient):
+ api_version = "v2.0"
+
+ def create_role(self, **kwargs):
+ """Create a role.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-identity-v2-ext.html#createRole
+ """
+ post_body = json.dumps({'role': kwargs})
+ resp, body = self.post('OS-KSADM/roles', post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def show_role(self, role_id):
+ """Get a role by its id."""
+ resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_role(self, role_id):
+ """Delete a role."""
+ resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
+ self.expected_success(204, resp.status)
+ return resp, body
+
+ def list_user_roles(self, tenant_id, user_id):
+ """Returns a list of roles assigned to a user for a tenant."""
+ url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def assign_user_role(self, tenant_id, user_id, role_id):
+ """Add roles to a user on a tenant."""
+ resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
+ (tenant_id, user_id, role_id), "")
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_user_role(self, tenant_id, user_id, role_id):
+ """Removes a role assignment for a user on a tenant."""
+ resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
+ (tenant_id, user_id, role_id))
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
+
+ def list_roles(self):
+ """Returns roles."""
+ resp, body = self.get('OS-KSADM/roles')
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/services_client.py b/tempest/services/identity/v2/json/services_client.py
new file mode 100644
index 0000000..436d00d
--- /dev/null
+++ b/tempest/services/identity/v2/json/services_client.py
@@ -0,0 +1,56 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class ServicesClient(service_client.ServiceClient):
+ api_version = "v2.0"
+
+ def create_service(self, name, type, **kwargs):
+ """Create a service."""
+ post_body = {
+ 'name': name,
+ 'type': type,
+ 'description': kwargs.get('description')
+ }
+ post_body = json.dumps({'OS-KSADM:service': post_body})
+ resp, body = self.post('/OS-KSADM/services', post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def show_service(self, service_id):
+ """Get Service."""
+ url = '/OS-KSADM/services/%s' % service_id
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def list_services(self):
+ """List Service - Returns Services."""
+ resp, body = self.get('/OS-KSADM/services')
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_service(self, service_id):
+ """Delete Service."""
+ url = '/OS-KSADM/services/%s' % service_id
+ resp, body = self.delete(url)
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v2/json/users_client.py b/tempest/services/identity/v2/json/users_client.py
new file mode 100644
index 0000000..5327638
--- /dev/null
+++ b/tempest/services/identity/v2/json/users_client.py
@@ -0,0 +1,137 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.common import service_client
+
+
+class UsersClient(service_client.ServiceClient):
+ api_version = "v2.0"
+
+ def create_user(self, name, password, tenant_id, email, **kwargs):
+ """Create a user."""
+ post_body = {
+ 'name': name,
+ 'password': password,
+ 'email': email
+ }
+ if tenant_id is not None:
+ post_body['tenantId'] = tenant_id
+ if kwargs.get('enabled') is not None:
+ post_body['enabled'] = kwargs.get('enabled')
+ post_body = json.dumps({'user': post_body})
+ resp, body = self.post('users', post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def update_user(self, user_id, **kwargs):
+ """Updates a user."""
+ put_body = json.dumps({'user': kwargs})
+ resp, body = self.put('users/%s' % user_id, put_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def show_user(self, user_id):
+ """GET a user."""
+ resp, body = self.get("users/%s" % user_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_user(self, user_id):
+ """Delete a user."""
+ resp, body = self.delete("users/%s" % user_id)
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
+
+ def list_users(self):
+ """Get the list of users."""
+ resp, body = self.get("users")
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def enable_disable_user(self, user_id, **kwargs):
+ """Enables or disables a user.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-identity-v2-ext.html#enableUser
+ """
+ # NOTE: The URL (users/<id>/enabled) is different from the api-site
+ # one (users/<id>/OS-KSADM/enabled) , but they are the same API
+ # because of the fact that in keystone/contrib/admin_crud/core.py
+ # both api use same action='set_user_enabled'
+ put_body = json.dumps({'user': kwargs})
+ resp, body = self.put('users/%s/enabled' % user_id, put_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def update_user_password(self, user_id, **kwargs):
+ """Update User Password."""
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524147
+ put_body = json.dumps({'user': kwargs})
+ resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def update_user_own_password(self, user_id, **kwargs):
+ """User updates own password"""
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1524153
+ # NOTE: This API is used for updating user password by itself.
+ # Ref: http://lists.openstack.org/pipermail/openstack-dev/2015-December
+ # /081803.html
+ patch_body = json.dumps({'user': kwargs})
+ resp, body = self.patch('OS-KSCRUD/users/%s' % user_id, patch_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def create_user_ec2_credentials(self, user_id, **kwargs):
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ post_body = json.dumps(kwargs)
+ resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
+ post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_user_ec2_credentials(self, user_id, access):
+ resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
+ (user_id, access))
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
+
+ def list_user_ec2_credentials(self, user_id):
+ resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def show_user_ec2_credentials(self, user_id, access):
+ resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
+ (user_id, access))
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return service_client.ResponseBody(resp, body)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 54d7723..fba26d4 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -180,12 +180,13 @@
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
- def create_role(self, name):
- """Create a Role."""
- post_body = {
- 'name': name
- }
- post_body = json.dumps({'role': post_body})
+ def create_role(self, **kwargs):
+ """Create a Role.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-identity-v3.html#createRole
+ """
+ post_body = json.dumps({'role': kwargs})
resp, body = self.post('roles', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
@@ -205,12 +206,13 @@
body = json.loads(body)
return service_client.ResponseBody(resp, body)
- def update_role(self, name, role_id):
- """Create a Role."""
- post_body = {
- 'name': name
- }
- post_body = json.dumps({'role': post_body})
+ def update_role(self, role_id, **kwargs):
+ """Update a Role.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-identity-v3.html#updateRole
+ """
+ post_body = json.dumps({'role': kwargs})
resp, body = self.patch('roles/%s' % str(role_id), post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
diff --git a/tempest/services/identity/v3/json/policy_client.py b/tempest/services/identity/v3/json/policies_client.py
similarity index 97%
rename from tempest/services/identity/v3/json/policy_client.py
rename to tempest/services/identity/v3/json/policies_client.py
index 7927ed5..639ed6d 100644
--- a/tempest/services/identity/v3/json/policy_client.py
+++ b/tempest/services/identity/v3/json/policies_client.py
@@ -22,7 +22,7 @@
from tempest.common import service_client
-class PolicyClient(service_client.ServiceClient):
+class PoliciesClient(service_client.ServiceClient):
api_version = "v3"
def create_policy(self, **kwargs):
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/regions_client.py
similarity index 98%
rename from tempest/services/identity/v3/json/region_client.py
rename to tempest/services/identity/v3/json/regions_client.py
index 3595391..bc4b7a1 100644
--- a/tempest/services/identity/v3/json/region_client.py
+++ b/tempest/services/identity/v3/json/regions_client.py
@@ -23,7 +23,7 @@
from tempest.common import service_client
-class RegionClient(service_client.ServiceClient):
+class RegionsClient(service_client.ServiceClient):
api_version = "v3"
def create_region(self, region_id=None, **kwargs):
diff --git a/tempest/services/identity/v3/json/service_client.py b/tempest/services/identity/v3/json/services_client.py
similarity index 97%
rename from tempest/services/identity/v3/json/service_client.py
rename to tempest/services/identity/v3/json/services_client.py
index 3dbfe5e..dd65f1d 100644
--- a/tempest/services/identity/v3/json/service_client.py
+++ b/tempest/services/identity/v3/json/services_client.py
@@ -22,7 +22,7 @@
from tempest.common import service_client
-class ServiceClient(service_client.ServiceClient):
+class ServicesClient(service_client.ServiceClient):
api_version = "v3"
def update_service(self, service_id, **kwargs):
diff --git a/tempest/services/image/v1/json/images_client.py b/tempest/services/image/v1/json/images_client.py
index 3406db8..af2e68c 100644
--- a/tempest/services/image/v1/json/images_client.py
+++ b/tempest/services/image/v1/json/images_client.py
@@ -147,50 +147,29 @@
self._http = self._get_http()
return self._http
- def create_image(self, name, container_format, disk_format, **kwargs):
- params = {
- "name": name,
- "container_format": container_format,
- "disk_format": disk_format,
- }
-
+ def create_image(self, **kwargs):
headers = {}
+ data = kwargs.pop('data', None)
+ headers.update(self._image_meta_to_headers(kwargs))
- for option in ['is_public', 'location', 'properties',
- 'copy_from', 'min_ram']:
- if option in kwargs:
- params[option] = kwargs.get(option)
-
- headers.update(self._image_meta_to_headers(params))
-
- if 'data' in kwargs:
- return self._create_with_data(headers, kwargs.get('data'))
+ if data is not None:
+ return self._create_with_data(headers, data)
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
- def update_image(self, image_id, name=None, container_format=None,
- data=None, properties=None):
- params = {}
+ def update_image(self, image_id, **kwargs):
headers = {}
- if name is not None:
- params['name'] = name
-
- if container_format is not None:
- params['container_format'] = container_format
-
- if properties is not None:
- params['properties'] = properties
-
- headers.update(self._image_meta_to_headers(params))
+ data = kwargs.pop('data', None)
+ headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
- resp, body = self.put(url, data, headers)
+ resp, body = self.put(url, None, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
@@ -201,21 +180,27 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def list_images(self, detail=False, properties=dict(),
- changes_since=None, **kwargs):
+ def list_images(self, detail=False, **kwargs):
+ """Return a list of all images filtered by input parameters.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-image-v1.html#listImage-v1
+
+ Most parameters except the following are passed to the API without
+ any changes.
+ :param changes_since: The name is changed to changes-since
+ """
url = 'v1/images'
if detail:
url += '/detail'
- params = {}
- for key, value in properties.items():
- params['property-%s' % key] = value
+ properties = kwargs.pop('properties', {})
+ for key, value in six.iteritems(properties):
+ kwargs['property-%s' % key] = value
- kwargs.update(params)
-
- if changes_since is not None:
- kwargs['changes-since'] = changes_since
+ if kwargs.get('changes_since'):
+ kwargs['changes-since'] = kwargs.pop('changes_since')
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
diff --git a/tempest/services/image/v2/json/images_client.py b/tempest/services/image/v2/json/images_client.py
index 44062ea..72b203a 100644
--- a/tempest/services/image/v2/json/images_client.py
+++ b/tempest/services/image/v2/json/images_client.py
@@ -55,6 +55,11 @@
return self._http
def update_image(self, image_id, patch):
+ """Update an image.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-image-v2.html#updateImage-v2
+ """
data = json.dumps(patch)
headers = {"Content-Type": "application/openstack-images-v2.0"
"-json-patch"}
@@ -63,21 +68,13 @@
body = json.loads(body)
return service_client.ResponseBody(resp, body)
- def create_image(self, name, container_format, disk_format, **kwargs):
- params = {
- "name": name,
- "container_format": container_format,
- "disk_format": disk_format,
- }
+ def create_image(self, **kwargs):
+ """Create an image.
- for option in kwargs:
- value = kwargs.get(option)
- if isinstance(value, dict) or isinstance(value, tuple):
- params.update(value)
- else:
- params[option] = value
-
- data = json.dumps(params)
+ Available params: see http://developer.openstack.org/
+ api-ref-image-v2.html#createImage-v2
+ """
+ data = json.dumps(kwargs)
resp, body = self.post('v2/images', data)
self.expected_success(201, resp.status)
body = json.loads(body)
diff --git a/tempest/services/network/json/agents_client.py b/tempest/services/network/json/agents_client.py
new file mode 100644
index 0000000..8bec847
--- /dev/null
+++ b/tempest/services/network/json/agents_client.py
@@ -0,0 +1,68 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.network.json import base
+
+
+class AgentsClient(base.BaseNetworkClient):
+
+ def update_agent(self, agent_id, **kwargs):
+ """Update agent."""
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526673
+ uri = '/agents/%s' % agent_id
+ return self.update_resource(uri, kwargs)
+
+ def show_agent(self, agent_id, **fields):
+ uri = '/agents/%s' % agent_id
+ return self.show_resource(uri, **fields)
+
+ def list_agents(self, **filters):
+ uri = '/agents'
+ return self.list_resources(uri, **filters)
+
+ def list_routers_on_l3_agent(self, agent_id):
+ uri = '/agents/%s/l3-routers' % agent_id
+ return self.list_resources(uri)
+
+ def create_router_on_l3_agent(self, agent_id, **kwargs):
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526670
+ uri = '/agents/%s/l3-routers' % agent_id
+ return self.create_resource(uri, kwargs)
+
+ def delete_router_from_l3_agent(self, agent_id, router_id):
+ uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
+ return self.delete_resource(uri)
+
+ def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
+ uri = '/agents/%s/dhcp-networks' % agent_id
+ return self.list_resources(uri)
+
+ def delete_network_from_dhcp_agent(self, agent_id, network_id):
+ uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
+ network_id)
+ return self.delete_resource(uri)
+
+ def add_dhcp_agent_to_network(self, agent_id, **kwargs):
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1526212
+ uri = '/agents/%s/dhcp-networks' % agent_id
+ return self.create_resource(uri, kwargs)
diff --git a/tempest/services/network/json/extensions_client.py b/tempest/services/network/json/extensions_client.py
new file mode 100644
index 0000000..64d3a4f
--- /dev/null
+++ b/tempest/services/network/json/extensions_client.py
@@ -0,0 +1,24 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.network.json import base
+
+
+class ExtensionsClient(base.BaseNetworkClient):
+
+ def show_extension(self, ext_alias, **fields):
+ uri = '/extensions/%s' % ext_alias
+ return self.show_resource(uri, **fields)
+
+ def list_extensions(self, **filters):
+ uri = '/extensions'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/floating_ips_client.py b/tempest/services/network/json/floating_ips_client.py
deleted file mode 100644
index 5c490ed..0000000
--- a/tempest/services/network/json/floating_ips_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class FloatingIPsClient(base.BaseNetworkClient):
-
- def create_floatingip(self, **kwargs):
- uri = '/floatingips'
- post_data = {'floatingip': kwargs}
- return self.create_resource(uri, post_data)
-
- def update_floatingip(self, floatingip_id, **kwargs):
- uri = '/floatingips/%s' % floatingip_id
- post_data = {'floatingip': kwargs}
- return self.update_resource(uri, post_data)
-
- def show_floatingip(self, floatingip_id, **fields):
- uri = '/floatingips/%s' % floatingip_id
- return self.show_resource(uri, **fields)
-
- def delete_floatingip(self, floatingip_id):
- uri = '/floatingips/%s' % floatingip_id
- return self.delete_resource(uri)
-
- def list_floatingips(self, **filters):
- uri = '/floatingips'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/metering_label_rules_client.py b/tempest/services/network/json/metering_label_rules_client.py
deleted file mode 100644
index 374a89c..0000000
--- a/tempest/services/network/json/metering_label_rules_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class MeteringLabelRulesClient(base.BaseNetworkClient):
-
- def create_metering_label_rule(self, **kwargs):
- uri = '/metering/metering-label-rules'
- post_data = {'metering_label_rule': kwargs}
- return self.create_resource(uri, post_data)
-
- def show_metering_label_rule(self, metering_label_rule_id, **fields):
- uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
- return self.show_resource(uri, **fields)
-
- def delete_metering_label_rule(self, metering_label_rule_id):
- uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
- return self.delete_resource(uri)
-
- def list_metering_label_rules(self, **filters):
- uri = '/metering/metering-label-rules'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/metering_labels_client.py b/tempest/services/network/json/metering_labels_client.py
deleted file mode 100644
index 2e5cdae..0000000
--- a/tempest/services/network/json/metering_labels_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class MeteringLabelsClient(base.BaseNetworkClient):
-
- def create_metering_label(self, **kwargs):
- uri = '/metering/metering-labels'
- post_data = {'metering_label': kwargs}
- return self.create_resource(uri, post_data)
-
- def show_metering_label(self, metering_label_id, **fields):
- uri = '/metering/metering-labels/%s' % metering_label_id
- return self.show_resource(uri, **fields)
-
- def delete_metering_label(self, metering_label_id):
- uri = '/metering/metering-labels/%s' % metering_label_id
- return self.delete_resource(uri)
-
- def list_metering_labels(self, **filters):
- uri = '/metering/metering-labels'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 459891f..6b0b1f9 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -35,68 +35,32 @@
quotas
"""
- def create_security_group(self, **kwargs):
- uri = '/security-groups'
- post_data = {'security_group': kwargs}
- return self.create_resource(uri, post_data)
+ def create_bulk_network(self, **kwargs):
+ """create bulk network
- def update_security_group(self, security_group_id, **kwargs):
- uri = '/security-groups/%s' % security_group_id
- post_data = {'security_group': kwargs}
- return self.update_resource(uri, post_data)
-
- def show_security_group(self, security_group_id, **fields):
- uri = '/security-groups/%s' % security_group_id
- return self.show_resource(uri, **fields)
-
- def delete_security_group(self, security_group_id):
- uri = '/security-groups/%s' % security_group_id
- return self.delete_resource(uri)
-
- def list_security_groups(self, **filters):
- uri = '/security-groups'
- return self.list_resources(uri, **filters)
-
- def create_security_group_rule(self, **kwargs):
- uri = '/security-group-rules'
- post_data = {'security_group_rule': kwargs}
- return self.create_resource(uri, post_data)
-
- def show_security_group_rule(self, security_group_rule_id, **fields):
- uri = '/security-group-rules/%s' % security_group_rule_id
- return self.show_resource(uri, **fields)
-
- def delete_security_group_rule(self, security_group_rule_id):
- uri = '/security-group-rules/%s' % security_group_rule_id
- return self.delete_resource(uri)
-
- def list_security_group_rules(self, **filters):
- uri = '/security-group-rules'
- return self.list_resources(uri, **filters)
-
- def show_extension(self, ext_alias, **fields):
- uri = '/extensions/%s' % ext_alias
- return self.show_resource(uri, **fields)
-
- def list_extensions(self, **filters):
- uri = '/extensions'
- return self.list_resources(uri, **filters)
-
- def create_bulk_network(self, names):
- network_list = [{'name': name} for name in names]
- post_data = {'networks': network_list}
+ Available params: see http://developer.openstack.org/
+ api-ref-networking-v2.html#bulkCreateNetwork
+ """
uri = '/networks'
- return self.create_resource(uri, post_data)
+ return self.create_resource(uri, kwargs)
- def create_bulk_subnet(self, subnet_list):
- post_data = {'subnets': subnet_list}
+ def create_bulk_subnet(self, **kwargs):
+ """create bulk subnet
+
+ Available params: see http://developer.openstack.org/
+ api-ref-networking-v2.html#bulkCreateSubnet
+ """
uri = '/subnets'
- return self.create_resource(uri, post_data)
+ return self.create_resource(uri, kwargs)
- def create_bulk_port(self, port_list):
- post_data = {'ports': port_list}
+ def create_bulk_port(self, **kwargs):
+ """create bulk port
+
+ Available params: see http://developer.openstack.org/
+ api-ref-networking-v2.html#bulkCreatePorts
+ """
uri = '/ports'
- return self.create_resource(uri, post_data)
+ return self.create_resource(uri, kwargs)
def wait_for_resource_deletion(self, resource_type, id, client=None):
"""Waits for a resource to be deleted."""
@@ -240,63 +204,22 @@
uri = '/ports?device_id=%s' % uuid
return self.list_resources(uri)
- def update_agent(self, agent_id, agent_info):
- """Update agent
-
- :param agent_info: Agent update information.
- E.g {"admin_state_up": True}
- """
- uri = '/agents/%s' % agent_id
- agent = {"agent": agent_info}
- return self.update_resource(uri, agent)
-
- def show_agent(self, agent_id, **fields):
- uri = '/agents/%s' % agent_id
- return self.show_resource(uri, **fields)
-
- def list_agents(self, **filters):
- uri = '/agents'
- return self.list_resources(uri, **filters)
-
- def list_routers_on_l3_agent(self, agent_id):
- uri = '/agents/%s/l3-routers' % agent_id
- return self.list_resources(uri)
-
def list_l3_agents_hosting_router(self, router_id):
uri = '/routers/%s/l3-agents' % router_id
return self.list_resources(uri)
- def add_router_to_l3_agent(self, agent_id, **kwargs):
- # TODO(piyush): Current api-site doesn't contain this API description.
- # After fixing the api-site, we need to fix here also for putting the
- # link to api-site.
- uri = '/agents/%s/l3-routers' % agent_id
- return self.create_resource(uri, kwargs)
-
- def remove_router_from_l3_agent(self, agent_id, router_id):
- uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
- return self.delete_resource(uri)
-
def list_dhcp_agent_hosting_network(self, network_id):
uri = '/networks/%s/dhcp-agents' % network_id
return self.list_resources(uri)
- def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
- uri = '/agents/%s/dhcp-networks' % agent_id
- return self.list_resources(uri)
+ def update_extra_routes(self, router_id, **kwargs):
+ """Update Extra routes.
- def remove_network_from_dhcp_agent(self, agent_id, network_id):
- uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
- network_id)
- return self.delete_resource(uri)
-
- def update_extra_routes(self, router_id, routes):
+ Available params: see http://developer.openstack.org/
+ api-ref-networking-v2-ext.html#updateExtraRoutes
+ """
uri = '/routers/%s' % router_id
- put_body = {
- 'router': {
- 'routes': routes
- }
- }
+ put_body = {'router': kwargs}
return self.update_resource(uri, put_body)
def delete_extra_routes(self, router_id):
@@ -307,30 +230,3 @@
}
}
return self.update_resource(uri, put_body)
-
- def add_dhcp_agent_to_network(self, agent_id, network_id):
- post_body = {'network_id': network_id}
- uri = '/agents/%s/dhcp-networks' % agent_id
- return self.create_resource(uri, post_body)
-
- def list_subnetpools(self, **filters):
- uri = '/subnetpools'
- return self.list_resources(uri, **filters)
-
- def create_subnetpools(self, **kwargs):
- uri = '/subnetpools'
- post_data = {'subnetpool': kwargs}
- return self.create_resource(uri, post_data)
-
- def show_subnetpools(self, subnetpool_id, **fields):
- uri = '/subnetpools/%s' % subnetpool_id
- return self.show_resource(uri, **fields)
-
- def update_subnetpools(self, subnetpool_id, **kwargs):
- uri = '/subnetpools/%s' % subnetpool_id
- post_data = {'subnetpool': kwargs}
- return self.update_resource(uri, post_data)
-
- def delete_subnetpools(self, subnetpool_id):
- uri = '/subnetpools/%s' % subnetpool_id
- return self.delete_resource(uri)
diff --git a/tempest/services/network/json/networks_client.py b/tempest/services/network/json/networks_client.py
deleted file mode 100644
index 2907d44..0000000
--- a/tempest/services/network/json/networks_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class NetworksClient(base.BaseNetworkClient):
-
- def create_network(self, **kwargs):
- uri = '/networks'
- post_data = {'network': kwargs}
- return self.create_resource(uri, post_data)
-
- def update_network(self, network_id, **kwargs):
- uri = '/networks/%s' % network_id
- post_data = {'network': kwargs}
- return self.update_resource(uri, post_data)
-
- def show_network(self, network_id, **fields):
- uri = '/networks/%s' % network_id
- return self.show_resource(uri, **fields)
-
- def delete_network(self, network_id):
- uri = '/networks/%s' % network_id
- return self.delete_resource(uri)
-
- def list_networks(self, **filters):
- uri = '/networks'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/ports_client.py b/tempest/services/network/json/ports_client.py
deleted file mode 100644
index d52d65e..0000000
--- a/tempest/services/network/json/ports_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class PortsClient(base.BaseNetworkClient):
-
- def create_port(self, **kwargs):
- uri = '/ports'
- post_data = {'port': kwargs}
- return self.create_resource(uri, post_data)
-
- def update_port(self, port_id, **kwargs):
- uri = '/ports/%s' % port_id
- post_data = {'port': kwargs}
- return self.update_resource(uri, post_data)
-
- def show_port(self, port_id, **fields):
- uri = '/ports/%s' % port_id
- return self.show_resource(uri, **fields)
-
- def delete_port(self, port_id):
- uri = '/ports/%s' % port_id
- return self.delete_resource(uri)
-
- def list_ports(self, **filters):
- uri = '/ports'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/security_group_rules_client.py b/tempest/services/network/json/security_group_rules_client.py
new file mode 100644
index 0000000..b2ba5b2
--- /dev/null
+++ b/tempest/services/network/json/security_group_rules_client.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.network.json import base
+
+
+class SecurityGroupRulesClient(base.BaseNetworkClient):
+
+ def create_security_group_rule(self, **kwargs):
+ uri = '/security-group-rules'
+ post_data = {'security_group_rule': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def show_security_group_rule(self, security_group_rule_id, **fields):
+ uri = '/security-group-rules/%s' % security_group_rule_id
+ return self.show_resource(uri, **fields)
+
+ def delete_security_group_rule(self, security_group_rule_id):
+ uri = '/security-group-rules/%s' % security_group_rule_id
+ return self.delete_resource(uri)
+
+ def list_security_group_rules(self, **filters):
+ uri = '/security-group-rules'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/security_groups_client.py b/tempest/services/network/json/security_groups_client.py
new file mode 100644
index 0000000..a60d2a6
--- /dev/null
+++ b/tempest/services/network/json/security_groups_client.py
@@ -0,0 +1,38 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.network.json import base
+
+
+class SecurityGroupsClient(base.BaseNetworkClient):
+
+ def create_security_group(self, **kwargs):
+ uri = '/security-groups'
+ post_data = {'security_group': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_security_group(self, security_group_id, **kwargs):
+ uri = '/security-groups/%s' % security_group_id
+ post_data = {'security_group': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_security_group(self, security_group_id, **fields):
+ uri = '/security-groups/%s' % security_group_id
+ return self.show_resource(uri, **fields)
+
+ def delete_security_group(self, security_group_id):
+ uri = '/security-groups/%s' % security_group_id
+ return self.delete_resource(uri)
+
+ def list_security_groups(self, **filters):
+ uri = '/security-groups'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/services/network/json/subnetpools_client.py b/tempest/services/network/json/subnetpools_client.py
new file mode 100644
index 0000000..f921bb0
--- /dev/null
+++ b/tempest/services/network/json/subnetpools_client.py
@@ -0,0 +1,40 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.network.json import base
+
+
+class SubnetpoolsClient(base.BaseNetworkClient):
+
+ def list_subnetpools(self, **filters):
+ uri = '/subnetpools'
+ return self.list_resources(uri, **filters)
+
+ def create_subnetpool(self, **kwargs):
+ uri = '/subnetpools'
+ post_data = {'subnetpool': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def show_subnetpool(self, subnetpool_id, **fields):
+ uri = '/subnetpools/%s' % subnetpool_id
+ return self.show_resource(uri, **fields)
+
+ def update_subnetpool(self, subnetpool_id, **kwargs):
+ uri = '/subnetpools/%s' % subnetpool_id
+ post_data = {'subnetpool': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def delete_subnetpool(self, subnetpool_id):
+ uri = '/subnetpools/%s' % subnetpool_id
+ return self.delete_resource(uri)
diff --git a/tempest/services/network/json/subnets_client.py b/tempest/services/network/json/subnets_client.py
deleted file mode 100644
index 957b606..0000000
--- a/tempest/services/network/json/subnets_client.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.network.json import base
-
-
-class SubnetsClient(base.BaseNetworkClient):
-
- def create_subnet(self, **kwargs):
- uri = '/subnets'
- post_data = {'subnet': kwargs}
- return self.create_resource(uri, post_data)
-
- def update_subnet(self, subnet_id, **kwargs):
- uri = '/subnets/%s' % subnet_id
- post_data = {'subnet': kwargs}
- return self.update_resource(uri, post_data)
-
- def show_subnet(self, subnet_id, **fields):
- uri = '/subnets/%s' % subnet_id
- return self.show_resource(uri, **fields)
-
- def delete_subnet(self, subnet_id):
- uri = '/subnets/%s' % subnet_id
- return self.delete_resource(uri)
-
- def list_subnets(self, **filters):
- uri = '/subnets'
- return self.list_resources(uri, **filters)
diff --git a/tempest/services/volume/base/admin/base_types_client.py b/tempest/services/volume/base/admin/base_types_client.py
index de6ea8a..867273e 100644
--- a/tempest/services/volume/base/admin/base_types_client.py
+++ b/tempest/services/volume/base/admin/base_types_client.py
@@ -47,10 +47,10 @@
"""Returns the primary type of resource this client works with."""
return 'volume-type/encryption-type'
- def list_volume_types(self, params=None):
+ def list_volume_types(self, **params):
"""List all the volume_types created."""
url = 'types'
- if params is not None:
+ if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
@@ -66,19 +66,13 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def create_volume_type(self, name, **kwargs):
- """Creates a new Volume_type.
+ def create_volume_type(self, **kwargs):
+ """Create volume type.
- name(Required): Name of volume_type.
- Following optional keyword arguments are accepted:
- extra_specs: A dictionary of values to be used as extra_specs.
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#createVolumeType
"""
- post_body = {
- 'name': name,
- 'extra_specs': kwargs.get('extra_specs'),
- }
-
- post_body = json.dumps({'volume_type': post_body})
+ post_body = json.dumps({'volume_type': kwargs})
resp, body = self.post('types', post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
@@ -90,10 +84,17 @@
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def list_volume_types_extra_specs(self, vol_type_id, params=None):
- """List all the volume_types extra specs created."""
+ def list_volume_types_extra_specs(self, vol_type_id, **params):
+ """List all the volume_types extra specs created.
+
+ TODO: Current api-site doesn't contain this API description.
+ After fixing the api-site, we need to fix here also for putting
+ the link to api-site.
+
+
+ """
url = 'types/%s/extra_specs' % str(vol_type_id)
- if params is not None:
+ if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
@@ -101,23 +102,23 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def show_volume_type_extra_specs(self, vol_type_id, extra_spec_name):
+ def show_volume_type_extra_specs(self, vol_type_id, extra_specs_name):
"""Returns the details of a single volume_type extra spec."""
url = "types/%s/extra_specs/%s" % (str(vol_type_id),
- str(extra_spec_name))
+ str(extra_specs_name))
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def create_volume_type_extra_specs(self, vol_type_id, extra_spec):
+ def create_volume_type_extra_specs(self, vol_type_id, extra_specs):
"""Creates a new Volume_type extra spec.
vol_type_id: Id of volume_type.
extra_specs: A dictionary of values to be used as extra_specs.
"""
url = "types/%s/extra_specs" % str(vol_type_id)
- post_body = json.dumps({'extra_specs': extra_spec})
+ post_body = json.dumps({'extra_specs': extra_specs})
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
@@ -131,7 +132,7 @@
return service_client.ResponseBody(resp, body)
def update_volume_type_extra_specs(self, vol_type_id, extra_spec_name,
- extra_spec):
+ extra_specs):
"""Update a volume_type extra spec.
vol_type_id: Id of volume_type.
@@ -141,7 +142,7 @@
"""
url = "types/%s/extra_specs/%s" % (str(vol_type_id),
str(extra_spec_name))
- put_body = json.dumps(extra_spec)
+ put_body = json.dumps(extra_specs)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
@@ -159,18 +160,14 @@
return service_client.ResponseBody(resp, body)
def create_encryption_type(self, vol_type_id, **kwargs):
- """Create a new encryption type for the specified volume type.
+ """Create encryption type.
- vol_type_id: Id of volume_type.
- provider: Class providing encryption support.
- cipher: Encryption algorithm/mode to use.
- key_size: Size of the encryption key, in bits.
- control_location: Notional service where encryption is performed.
+ TODO: Current api-site doesn't contain this API description.
+ After fixing the api-site, we need to fix here also for putting
+ the link to api-site.
"""
url = "/types/%s/encryption" % str(vol_type_id)
- post_body = {}
- post_body.update(kwargs)
- post_body = json.dumps({'encryption': post_body})
+ post_body = json.dumps({'encryption': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(200, resp.status)
diff --git a/tempest/services/volume/base/base_qos_client.py b/tempest/services/volume/base/base_qos_client.py
index c7f6c6e..697e902 100644
--- a/tempest/services/volume/base/base_qos_client.py
+++ b/tempest/services/volume/base/base_qos_client.py
@@ -67,15 +67,13 @@
raise exceptions.TimeoutException
time.sleep(self.build_interval)
- def create_qos(self, name, consumer, **kwargs):
+ def create_qos(self, **kwargs):
"""Create a QoS Specification.
- name : name of the QoS specifications
- consumer : conumer of Qos ( front-end / back-end / both )
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#createQoSSpec
"""
- post_body = {'name': name, 'consumer': consumer}
- post_body.update(kwargs)
- post_body = json.dumps({'qos_specs': post_body})
+ post_body = json.dumps({'qos_specs': kwargs})
resp, body = self.post('qos-specs', post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
@@ -107,7 +105,8 @@
def set_qos_key(self, qos_id, **kwargs):
"""Set the specified keys/values of QoS specification.
- kwargs : it is the dictionary of the key=value pairs to set
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#setQoSKey
"""
put_body = json.dumps({"qos_specs": kwargs})
resp, body = self.put('qos-specs/%s' % qos_id, put_body)
@@ -118,7 +117,9 @@
def unset_qos_key(self, qos_id, keys):
"""Unset the specified keys of QoS specification.
- keys : it is the array of the keys to unset
+ :param keys: keys to delete from the QoS specification.
+
+ TODO(jordanP): Add a link once LP #1524877 is fixed.
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
diff --git a/tempest/services/volume/base/base_snapshots_client.py b/tempest/services/volume/base/base_snapshots_client.py
index fac90e4..1388e9c 100644
--- a/tempest/services/volume/base/base_snapshots_client.py
+++ b/tempest/services/volume/base/base_snapshots_client.py
@@ -29,7 +29,7 @@
create_resp = 200
- def list_snapshots(self, detail=False, params=None):
+ def list_snapshots(self, detail=False, **params):
"""List all the snapshot."""
url = 'snapshots'
if detail:
@@ -50,17 +50,13 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def create_snapshot(self, volume_id, **kwargs):
+ def create_snapshot(self, **kwargs):
"""Creates a new snapshot.
- volume_id(Required): id of the volume.
- force: Create a snapshot even if the volume attached (Default=False)
- display_name: Optional snapshot Name.
- display_description: User friendly snapshot description.
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#createSnapshot
"""
- post_body = {'volume_id': volume_id}
- post_body.update(kwargs)
- post_body = json.dumps({'snapshot': post_body})
+ post_body = json.dumps({'snapshot': kwargs})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
self.expected_success(self.create_resp, resp.status)
@@ -136,13 +132,14 @@
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def update_snapshot_status(self, snapshot_id, status, progress):
+ def update_snapshot_status(self, snapshot_id, **kwargs):
"""Update the specified snapshot's status."""
- post_body = {
- 'status': status,
- 'progress': progress
- }
- post_body = json.dumps({'os-update_snapshot_status': post_body})
+ # TODO(gmann): api-site doesn't contain doc ref
+ # for this API. After fixing the api-site, we need to
+ # add the link here.
+ # Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
+
+ post_body = json.dumps({'os-update_snapshot_status': kwargs})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
@@ -165,18 +162,26 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def update_snapshot_metadata(self, snapshot_id, metadata):
+ def update_snapshot_metadata(self, snapshot_id, **kwargs):
"""Update metadata for the snapshot."""
- put_body = json.dumps({'metadata': metadata})
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529063
+ put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
+ def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
"""Update metadata item for the snapshot."""
- put_body = json.dumps({'meta': meta_item})
+ # TODO(piyush): Current api-site doesn't contain this API description.
+ # After fixing the api-site, we need to fix here also for putting the
+ # link to api-site.
+ # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
+ put_body = json.dumps(kwargs)
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body)
body = json.loads(body)
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/services/volume/base/base_volumes_client.py
index c7302e8..d4435bc 100644
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/services/volume/base/base_volumes_client.py
@@ -71,23 +71,15 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def create_volume(self, size=None, **kwargs):
+ def create_volume(self, **kwargs):
"""Creates a new Volume.
- size: Size of volume in GB.
- Following optional keyword arguments are accepted:
- display_name: Optional Volume Name(only for V1).
- name: Optional Volume Name(only for V2).
- metadata: A dictionary of values to be used as metadata.
- volume_type: Optional Name of volume_type for the volume
- snapshot_id: When specified the volume is created from this snapshot
- imageRef: When specified the volume is created from this image
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html#createVolume
"""
- if size is None:
- size = self.default_volume_size
- post_body = {'size': size}
- post_body.update(kwargs)
- post_body = json.dumps({'volume': post_body})
+ if 'size' not in kwargs:
+ kwargs['size'] = self.default_volume_size
+ post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
body = json.loads(body)
self.expected_success(self.create_resp, resp.status)
@@ -107,35 +99,26 @@
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def upload_volume(self, volume_id, image_name, disk_format):
+ def upload_volume(self, volume_id, **kwargs):
"""Uploads a volume in Glance."""
- post_body = {
- 'image_name': image_name,
- 'disk_format': disk_format
- }
- post_body = json.dumps({'os-volume_upload_image': post_body})
+ post_body = json.dumps({'os-volume_upload_image': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def attach_volume(self, volume_id, instance_uuid, mountpoint):
+ def attach_volume(self, volume_id, **kwargs):
"""Attaches a volume to a given instance on a given mountpoint."""
- post_body = {
- 'instance_uuid': instance_uuid,
- 'mountpoint': mountpoint,
- }
- post_body = json.dumps({'os-attach': post_body})
+ post_body = json.dumps({'os-attach': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def set_bootable_volume(self, volume_id, bootable):
+ def set_bootable_volume(self, volume_id, **kwargs):
"""set a bootable flag for a volume - true or false."""
- post_body = {"bootable": bootable}
- post_body = json.dumps({'os-set_bootable': post_body})
+ post_body = json.dumps({'os-set_bootable': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(200, resp.status)
@@ -143,8 +126,7 @@
def detach_volume(self, volume_id):
"""Detaches a volume from an instance."""
- post_body = {}
- post_body = json.dumps({'os-detach': post_body})
+ post_body = json.dumps({'os-detach': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
@@ -152,8 +134,7 @@
def reserve_volume(self, volume_id):
"""Reserves a volume."""
- post_body = {}
- post_body = json.dumps({'os-reserve': post_body})
+ post_body = json.dumps({'os-reserve': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
@@ -161,8 +142,7 @@
def unreserve_volume(self, volume_id):
"""Restore a reserved volume ."""
- post_body = {}
- post_body = json.dumps({'os-unreserve': post_body})
+ post_body = json.dumps({'os-unreserve': {}})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
@@ -184,20 +164,17 @@
"""Returns the primary type of resource this client works with."""
return 'volume'
- def extend_volume(self, volume_id, extend_size):
+ def extend_volume(self, volume_id, **kwargs):
"""Extend a volume."""
- post_body = {
- 'new_size': extend_size
- }
- post_body = json.dumps({'os-extend': post_body})
+ post_body = json.dumps({'os-extend': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def reset_volume_status(self, volume_id, status):
+ def reset_volume_status(self, volume_id, **kwargs):
"""Reset the Specified Volume's Status."""
- post_body = json.dumps({'os-reset_status': {"status": status}})
+ post_body = json.dumps({'os-reset_status': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
@@ -218,14 +195,9 @@
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def create_volume_transfer(self, vol_id, display_name=None):
+ def create_volume_transfer(self, **kwargs):
"""Create a volume transfer."""
- post_body = {
- 'volume_id': vol_id
- }
- if display_name:
- post_body['name'] = display_name
- post_body = json.dumps({'transfer': post_body})
+ post_body = json.dumps({'transfer': kwargs})
resp, body = self.post('os-volume-transfer', post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
@@ -239,7 +211,7 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def list_volume_transfers(self, params=None):
+ def list_volume_transfers(self, **params):
"""List all the volume transfers created."""
url = 'os-volume-transfer'
if params:
@@ -255,24 +227,18 @@
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def accept_volume_transfer(self, transfer_id, transfer_auth_key):
+ def accept_volume_transfer(self, transfer_id, **kwargs):
"""Accept a volume transfer."""
- post_body = {
- 'auth_key': transfer_auth_key,
- }
url = 'os-volume-transfer/%s/accept' % transfer_id
- post_body = json.dumps({'accept': post_body})
+ post_body = json.dumps({'accept': kwargs})
resp, body = self.post(url, post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
- def update_volume_readonly(self, volume_id, readonly):
+ def update_volume_readonly(self, volume_id, **kwargs):
"""Update the Specified Volume readonly."""
- post_body = {
- 'readonly': readonly
- }
- post_body = json.dumps({'os-update_readonly_flag': post_body})
+ post_body = json.dumps({'os-update_readonly_flag': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.expected_success(202, resp.status)
@@ -327,10 +293,8 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def retype_volume(self, volume_id, volume_type, **kwargs):
+ def retype_volume(self, volume_id, **kwargs):
"""Updates volume with new volume type."""
- post_body = {'new_type': volume_type}
- post_body.update(kwargs)
- post_body = json.dumps({'os-retype': post_body})
+ post_body = json.dumps({'os-retype': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 2a23291..8bbbfc4 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -161,7 +161,7 @@
self._create_sec_group()
self._create_keypair()
private_key = self.key['private_key']
- username = CONF.compute.image_ssh_user
+ username = CONF.validation.image_ssh_user
self.remote_client = remote_client.RemoteClient(self.floating['ip'],
username,
pkey=private_key)
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index 5c25e32..1c1fb46 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -69,11 +69,11 @@
except Exception:
pass
- users = admin_manager.identity_client.list_users()['users']
+ users = admin_manager.users_client.list_users()['users']
LOG.info("Cleanup::remove %s users" % len(users))
for user in users:
if user['name'].startswith("stress_user"):
- admin_manager.identity_client.delete_user(user['id'])
+ admin_manager.users_client.delete_user(user['id'])
tenants = admin_manager.tenants_client.list_tenants()['tenants']
LOG.info("Cleanup::remove %s tenants" % len(tenants))
for tenant in tenants:
@@ -84,7 +84,7 @@
# volume deletion may block
_, snaps = admin_manager.snapshots_client.list_snapshots(
- params={"all_tenants": True})['snapshots']
+ all_tenants=True)['snapshots']
LOG.info("Cleanup::remove %s snapshots" % len(snaps))
for v in snaps:
try:
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index cac848b..6531059 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -106,7 +106,7 @@
if process['process'].is_alive():
try:
pid = process['process'].pid
- LOG.warn("Process %d hangs. Send SIGKILL." % pid)
+ LOG.warning("Process %d hangs. Send SIGKILL." % pid)
os.kill(pid, signal.SIGKILL)
except Exception:
pass
@@ -147,11 +147,16 @@
if CONF.identity.auth_version == 'v2':
identity_client = admin_manager.identity_client
projects_client = admin_manager.tenants_client
+ roles_client = admin_manager.roles_client
+ users_client = admin_manager.users_client
else:
identity_client = admin_manager.identity_v3_client
projects_client = None
+ roles_client = None
+ users_client = None
credentials_client = cred_client.get_creds_client(
- identity_client, projects_client)
+ identity_client, projects_client, roles_client,
+ users_client)
project = credentials_client.create_project(
name=tenant_name, description=tenant_name)
user = credentials_client.create_user(username, password,
@@ -237,14 +242,13 @@
had_errors = True
sum_runs += process['statistic']['runs']
sum_fails += process['statistic']['fails']
- LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
- (process['p_number'],
- process['action'],
- process['statistic']['runs'],
- process['statistic']['fails']))
- LOG.info("Summary:")
- LOG.info("Run %d actions (%d failed)" %
- (sum_runs, sum_fails))
+ print ("Process %d (%s): Run %d actions (%d failed)" % (
+ process['p_number'],
+ process['action'],
+ process['statistic']['runs'],
+ process['statistic']['fails']))
+ print ("Summary:")
+ print ("Run %d actions (%d failed)" % (sum_runs, sum_fails))
if not had_errors and CONF.stress.full_clean_stack:
LOG.info("cleaning up")
diff --git a/tempest/test.py b/tempest/test.py
index 8e961f4..dfed947 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -19,7 +19,6 @@
import re
import sys
import time
-import urllib
import uuid
import fixtures
@@ -27,6 +26,7 @@
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
import six
+from six.moves import urllib
from tempest_lib import decorators
import testscenarios
import testtools
@@ -226,6 +226,7 @@
# Resources required to validate a server using ssh
validation_resources = {}
network_resources = {}
+ services_microversion = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
@@ -375,8 +376,8 @@
cls.validation_resources = vresources.create_validation_resources(
cls.os, cls.validation_resources)
else:
- LOG.warn("Client manager not found, validation resources not"
- " created")
+ LOG.warning("Client manager not found, validation resources not"
+ " created")
@classmethod
def resource_cleanup(cls):
@@ -391,8 +392,8 @@
cls.validation_resources)
cls.validation_resources = {}
else:
- LOG.warn("Client manager not found, validation resources not"
- " deleted")
+ LOG.warning("Client manager not found, validation resources "
+ "not deleted")
def setUp(self):
super(BaseTestCase, self).setUp()
@@ -437,9 +438,13 @@
if CONF.identity.auth_version == 'v2':
client = self.os_admin.identity_client
project_client = self.os_admin.tenants_client
+ roles_client = self.os_admin.roles_client
+ users_client = self.os_admin.users_client
else:
client = self.os_admin.identity_v3_client
project_client = None
+ roles_client = None
+ users_client = None
try:
domain = client.auth_provider.credentials.project_domain_name
@@ -447,6 +452,8 @@
domain = 'Default'
return cred_client.get_creds_client(client, project_client,
+ roles_client,
+ users_client,
project_domain_name=domain)
@classmethod
@@ -512,7 +519,8 @@
else:
raise exceptions.InvalidCredentials(
"Invalid credentials type %s" % credential_type)
- return clients.Manager(credentials=creds, service=cls._service)
+ return clients.Manager(credentials=creds, service=cls._service,
+ api_microversions=cls.services_microversion)
@classmethod
def clear_credentials(cls):
@@ -599,7 +607,8 @@
credentials.is_admin_available(
identity_version=cls.get_identity_version())):
admin_creds = cred_provider.get_admin_creds()
- admin_manager = clients.Manager(admin_creds)
+ admin_manager = clients.Manager(
+ admin_creds, api_microversions=cls.services_microversion)
networks_client = admin_manager.compute_networks_client
return fixed_network.get_tenant_network(
cred_provider, networks_client, CONF.compute.fixed_network_name)
@@ -767,7 +776,7 @@
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
- return "%s?%s" % (url, urllib.urlencode(json_dict)), None
+ return "%s?%s" % (url, urllib.parse.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index dac7d91..330f370 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,7 @@
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
# Load local tempest tests
- for test_dir in ['tempest/api', 'tempest/scenario',
- 'tempest/thirdparty']:
+ for test_dir in ['tempest/api', 'tempest/scenario']:
full_test_dir = os.path.join(base_path, test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
diff --git a/tempest/tests/cmd/test_javelin.py b/tempest/tests/cmd/test_javelin.py
index 56bc96c..ab6a7a0 100644
--- a/tempest/tests/cmd/test_javelin.py
+++ b/tempest/tests/cmd/test_javelin.py
@@ -24,7 +24,7 @@
def setUp(self):
super(JavelinUnitTest, self).setUp()
- javelin.setup_logging()
+ javelin.LOG = mock.MagicMock()
self.fake_client = mock.MagicMock()
self.fake_object = mock.MagicMock()
@@ -78,8 +78,8 @@
mocked_function = self.fake_client.volumes.attach_volume
mocked_function.assert_called_once_with(
self.fake_object.volume['id'],
- self.fake_object.server['id'],
- self.fake_object['device'])
+ instance_uuid=self.fake_object.server['id'],
+ mountpoint=self.fake_object['device'])
class TestCreateResources(JavelinUnitTest):
@@ -119,7 +119,7 @@
fake_tenant_id = self.fake_object['tenant']['id']
fake_email = "%s@%s" % (self.fake_object['user'], fake_tenant_id)
- mocked_function = self.fake_client.identity.create_user
+ mocked_function = self.fake_client.users.create_user
mocked_function.assert_called_once_with(self.fake_object['name'],
self.fake_object['password'],
fake_tenant_id,
@@ -135,7 +135,7 @@
javelin.create_users([self.fake_object])
- mocked_function = self.fake_client.identity.create_user
+ mocked_function = self.fake_client.users.create_user
self.assertFalse(mocked_function.called)
def test_create_objects(self):
@@ -310,7 +310,7 @@
fake_auth = self.fake_client
fake_auth.tenants.list_tenants.return_value = \
{'tenants': [fake_tenant]}
- fake_auth.identity.list_users.return_value = {'users': [fake_user]}
+ fake_auth.users.list_users.return_value = {'users': [fake_user]}
self.useFixture(mockpatch.Patch(
'tempest.common.identity.get_user_by_username',
@@ -320,7 +320,7 @@
javelin.destroy_users([fake_user])
- mocked_function = fake_auth.identity.delete_user
+ mocked_function = fake_auth.users.delete_user
mocked_function.assert_called_once_with(fake_user['id'])
def test_destroy_objects(self):
diff --git a/tempest/tests/cmd/test_list_plugins.py b/tempest/tests/cmd/test_list_plugins.py
new file mode 100644
index 0000000..17ddb18
--- /dev/null
+++ b/tempest/tests/cmd/test_list_plugins.py
@@ -0,0 +1,24 @@
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+
+from tempest.tests import base
+
+
+class TestTempestListPlugins(base.TestCase):
+ def test_run_list_plugins(self):
+ return_code = subprocess.call(
+ ['tempest', 'list-plugins'], stdout=subprocess.PIPE)
+ self.assertEqual(return_code, 0)
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a5dea54..193abc7 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -201,7 +201,8 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- fake_os.network_client.list_extensions = fake_list_extensions
+ fake_os.network_extensions_client.list_extensions = (
+ fake_list_extensions)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
@@ -223,7 +224,8 @@
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
- fake_os.network_client.list_extensions = fake_list_extensions
+ fake_os.network_extensions_client.list_extensions = (
+ fake_list_extensions)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
diff --git a/tempest/tests/common/test_api_version_utils.py b/tempest/tests/common/test_api_version_utils.py
index 33024b6..babf965 100644
--- a/tempest/tests/common/test_api_version_utils.py
+++ b/tempest/tests/common/test_api_version_utils.py
@@ -192,3 +192,58 @@
def test_cfg_version_min_greater_than_max(self):
self.assertRaises(exceptions.InvalidConfiguration,
self._test_version, '2.2', '2.7', '2.9', '2.7')
+
+
+class TestSelectRequestMicroversion(base.TestCase):
+
+ def _test_request_version(self, test_min_version,
+ cfg_min_version, expected_version):
+ selected_version = api_version_utils.select_request_microversion(
+ test_min_version, cfg_min_version)
+ self.assertEqual(expected_version, selected_version)
+
+ def test_cfg_min_version_greater(self):
+ self._test_request_version('2.1', '2.3', expected_version='2.3')
+
+ def test_class_min_version_greater(self):
+ self._test_request_version('2.5', '2.3', expected_version='2.5')
+
+ def test_cfg_min_version_none(self):
+ self._test_request_version('2.5', None, expected_version='2.5')
+
+ def test_class_min_version_none(self):
+ self._test_request_version(None, '2.3', expected_version='2.3')
+
+ def test_both_min_version_none(self):
+ self._test_request_version(None, None, expected_version=None)
+
+ def test_both_min_version_equal(self):
+ self._test_request_version('2.3', '2.3', expected_version='2.3')
+
+
+class TestMicroversionHeaderMatches(base.TestCase):
+
+ def test_header_matches(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_respose = {microversion_header_name: request_microversion}
+ api_version_utils.assert_version_header_matches_request(
+ microversion_header_name, request_microversion, test_respose)
+
+ def test_header_does_not_match(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_respose = {microversion_header_name: '2.2'}
+ self.assertRaises(
+ exceptions.InvalidHTTPResponseHeader,
+ api_version_utils.assert_version_header_matches_request,
+ microversion_header_name, request_microversion, test_respose)
+
+ def test_header_not_present(self):
+ microversion_header_name = 'x-openstack-xyz-api-version'
+ request_microversion = '2.1'
+ test_respose = {}
+ self.assertRaises(
+ exceptions.InvalidHTTPResponseHeader,
+ api_version_utils.assert_version_header_matches_request,
+ microversion_header_name, request_microversion, test_respose)
diff --git a/tempest/tests/common/test_dynamic_creds.py b/tempest/tests/common/test_dynamic_creds.py
index 10acd14..4379756 100644
--- a/tempest/tests/common/test_dynamic_creds.py
+++ b/tempest/tests/common/test_dynamic_creds.py
@@ -24,8 +24,12 @@
from tempest import exceptions
from tempest.services.identity.v2.json import identity_client as \
json_iden_client
+from tempest.services.identity.v2.json import roles_client as \
+ json_roles_client
from tempest.services.identity.v2.json import tenants_client as \
json_tenants_client
+from tempest.services.identity.v2.json import users_client as \
+ json_users_client
from tempest.services.network.json import network_client as json_network_client
from tempest.tests import base
from tempest.tests import fake_config
@@ -68,7 +72,7 @@
def _mock_user_create(self, id, name):
user_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_users_client.UsersClient,
'create_user',
return_value=(service_client.ResponseBody
(200, {'user': {'id': id, 'name': name}}))))
@@ -84,7 +88,7 @@
def _mock_list_roles(self, id, name):
roles_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_roles_client.RolesClient,
'list_roles',
return_value=(service_client.ResponseBody
(200,
@@ -95,7 +99,7 @@
def _mock_list_2_roles(self):
roles_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_roles_client.RolesClient,
'list_roles',
return_value=(service_client.ResponseBody
(200,
@@ -106,7 +110,7 @@
def _mock_assign_user_role(self):
tenant_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_roles_client.RolesClient,
'assign_user_role',
return_value=(service_client.ResponseBody
(200, {}))))
@@ -114,7 +118,7 @@
def _mock_list_role(self):
roles_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_roles_client.RolesClient,
'list_roles',
return_value=(service_client.ResponseBody
(200, {'roles': [{'id': '1',
@@ -123,7 +127,7 @@
def _mock_list_ec2_credentials(self, user_id, tenant_id):
ec2_creds_fix = self.useFixture(mockpatch.PatchObject(
- json_iden_client.IdentityClient,
+ json_users_client.UsersClient,
'list_user_ec2_credentials',
return_value=(service_client.ResponseBody
(200, {'credentials': [{
@@ -178,11 +182,11 @@
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
- user_mock = mock.patch.object(json_iden_client.IdentityClient,
+ user_mock = mock.patch.object(json_roles_client.RolesClient,
'assign_user_role')
user_mock.start()
self.addCleanup(user_mock.stop)
- with mock.patch.object(json_iden_client.IdentityClient,
+ with mock.patch.object(json_roles_client.RolesClient,
'assign_user_role') as user_mock:
admin_creds = creds.get_admin_creds()
user_mock.assert_has_calls([
@@ -201,11 +205,11 @@
self._mock_user_create('1234', 'fake_role_user')
self._mock_tenant_create('1234', 'fake_role_tenant')
- user_mock = mock.patch.object(json_iden_client.IdentityClient,
+ user_mock = mock.patch.object(json_roles_client.RolesClient,
'assign_user_role')
user_mock.start()
self.addCleanup(user_mock.stop)
- with mock.patch.object(json_iden_client.IdentityClient,
+ with mock.patch.object(json_roles_client.RolesClient,
'assign_user_role') as user_mock:
role_creds = creds.get_creds_by_roles(
roles=['role1', 'role2'])
@@ -239,8 +243,8 @@
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
user_mock = self.patch(
- 'tempest.services.identity.v2.json.identity_client.'
- 'IdentityClient.delete_user')
+ 'tempest.services.identity.v2.json.users_client.'
+ 'UsersClient.delete_user')
tenant_mock = self.patch(
'tempest.services.identity.v2.json.tenants_client.'
'TenantsClient.delete_tenant')
@@ -320,7 +324,7 @@
'tempest.services.network.json.network_client.NetworkClient.'
'add_router_interface_with_subnet_id')
primary_creds = creds.get_primary_creds()
- router_interface_mock.called_once_with('1234', '1234')
+ router_interface_mock.assert_called_once_with('1234', '1234')
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
@@ -352,7 +356,7 @@
'tempest.services.network.json.network_client.NetworkClient.'
'add_router_interface_with_subnet_id')
creds.get_primary_creds()
- router_interface_mock.called_once_with('1234', '1234')
+ router_interface_mock.assert_called_once_with('1234', '1234')
router_interface_mock.reset_mock()
# Create alternate tenant and network
self._mock_user_create('12345', 'fake_alt_user')
@@ -361,7 +365,7 @@
self._mock_subnet_create(creds, '12345', 'fake_alt_subnet')
self._mock_router_create('12345', 'fake_alt_router')
creds.get_alt_creds()
- router_interface_mock.called_once_with('12345', '12345')
+ router_interface_mock.assert_called_once_with('12345', '12345')
router_interface_mock.reset_mock()
# Create admin tenant and networks
self._mock_user_create('123456', 'fake_admin_user')
@@ -371,8 +375,8 @@
self._mock_router_create('123456', 'fake_admin_router')
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
- self.patch('tempest.services.identity.v2.json.identity_client.'
- 'IdentityClient.delete_user')
+ self.patch('tempest.services.identity.v2.json.users_client.'
+ 'UsersClient.delete_user')
self.patch('tempest.services.identity.v2.json.tenants_client.'
'TenantsClient.delete_tenant')
net = mock.patch.object(creds.networks_admin_client,
@@ -394,15 +398,15 @@
port_list_mock.start()
secgroup_list_mock = mock.patch.object(
- creds.network_admin_client,
+ creds.security_groups_admin_client,
'list_security_groups',
side_effect=side_effect)
secgroup_list_mock.start()
return_values = (fake_http.fake_httplib({}, status=204), {})
remove_secgroup_mock = self.patch(
- 'tempest.services.network.json.network_client.'
- 'NetworkClient.delete', return_value=return_values)
+ 'tempest.services.network.json.security_groups_client.'
+ 'SecurityGroupsClient.delete', return_value=return_values)
creds.clear_creds()
# Verify default security group delete
calls = remove_secgroup_mock.mock_calls
@@ -459,7 +463,7 @@
'tempest.services.network.json.network_client.NetworkClient.'
'add_router_interface_with_subnet_id')
alt_creds = creds.get_alt_creds()
- router_interface_mock.called_once_with('1234', '1234')
+ router_interface_mock.assert_called_once_with('1234', '1234')
network = alt_creds.network
subnet = alt_creds.subnet
router = alt_creds.router
@@ -484,7 +488,7 @@
'add_router_interface_with_subnet_id')
self._mock_list_roles('123456', 'admin')
admin_creds = creds.get_admin_creds()
- router_interface_mock.called_once_with('1234', '1234')
+ router_interface_mock.assert_called_once_with('1234', '1234')
network = admin_creds.network
subnet = admin_creds.subnet
router = admin_creds.router
diff --git a/tempest/tests/common/test_service_clients.py b/tempest/tests/common/test_service_clients.py
index c313071..f248957 100644
--- a/tempest/tests/common/test_service_clients.py
+++ b/tempest/tests/common/test_service_clients.py
@@ -17,10 +17,6 @@
import six
from tempest.services.baremetal.v1.json import baremetal_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import security_group_rules_client
-from tempest.services.compute.json import server_groups_client
-from tempest.services.compute.json import servers_client
from tempest.services.data_processing.v1_1 import data_processing_client
from tempest.services.database.json import flavors_client as db_flavor_client
from tempest.services.database.json import versions_client as db_version_client
@@ -30,9 +26,9 @@
from tempest.services.identity.v3.json import endpoints_client
from tempest.services.identity.v3.json import identity_client as \
identity_v3_identity_client
-from tempest.services.identity.v3.json import policy_client
-from tempest.services.identity.v3.json import region_client
-from tempest.services.identity.v3.json import service_client
+from tempest.services.identity.v3.json import policies_client
+from tempest.services.identity.v3.json import regions_client
+from tempest.services.identity.v3.json import services_client
from tempest.services.image.v1.json import images_client
from tempest.services.image.v2.json import images_client as images_v2_client
from tempest.services.messaging.json import messaging_client
@@ -87,10 +83,6 @@
def test_service_client_creations_with_specified_args(self, mock_init):
test_clients = [
baremetal_client.BaremetalClient,
- floating_ips_client.FloatingIPsClient,
- security_group_rules_client.SecurityGroupRulesClient,
- server_groups_client.ServerGroupsClient,
- servers_client.ServersClient,
data_processing_client.DataProcessingClient,
db_flavor_client.DatabaseFlavorsClient,
db_version_client.DatabaseVersionsClient,
@@ -126,9 +118,9 @@
credentials_client.CredentialsClient,
endpoints_client.EndPointClient,
identity_v3_identity_client.IdentityV3Client,
- policy_client.PolicyClient,
- region_client.RegionClient,
- service_client.ServiceClient,
+ policies_client.PoliciesClient,
+ regions_client.RegionsClient,
+ services_client.ServicesClient,
images_client.ImagesClient,
images_v2_client.ImagesClientV2
]
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index e596aab..9c2b99e 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -29,7 +29,7 @@
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
cfg.CONF.set_default('ip_version_for_ssh', 4, group='validation')
- cfg.CONF.set_default('network_for_ssh', 'public', group='compute')
+ cfg.CONF.set_default('network_for_ssh', 'public', group='validation')
cfg.CONF.set_default('connect_timeout', 1, group='validation')
self.conn = remote_client.RemoteClient('127.0.0.1', 'user', 'pass')
@@ -79,7 +79,7 @@
def test_get_number_of_vcpus(self):
self.ssh_mock.mock.exec_command.return_value = '16'
self.assertEqual(self.conn.get_number_of_vcpus(), 16)
- self._assert_exec_called_with('grep -c processor /proc/cpuinfo')
+ self._assert_exec_called_with('grep -c ^processor /proc/cpuinfo')
def test_get_partitions(self):
proc_partitions = """major minor #blocks name
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index ca8bc3e..c45f6da 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -24,6 +24,7 @@
class ConfigFixture(conf_fixture.Config):
def __init__(self):
+ cfg.CONF([], default_config_files=[])
config.register_opts()
super(ConfigFixture, self).__init__()
@@ -59,6 +60,5 @@
class FakePrivate(config.TempestConfigPrivate):
def __init__(self, parse_conf=True, config_path=None):
- cfg.CONF([], default_config_files=[])
self._set_attrs()
- self.lock_path = cfg.CONF.lock_path
+ self.lock_path = cfg.CONF.oslo_concurrency.lock_path
diff --git a/tempest/tests/services/compute/test_base_compute_client.py b/tempest/tests/services/compute/test_base_compute_client.py
index 13461e4..7a55cdb 100644
--- a/tempest/tests/services/compute/test_base_compute_client.py
+++ b/tempest/tests/services/compute/test_base_compute_client.py
@@ -13,60 +13,124 @@
# under the License.
import httplib2
-import mock
+from oslotest import mockpatch
from tempest_lib.common import rest_client
-from tempest.services.compute.json import base as base_compute_client
+from tempest import exceptions
+from tempest.services.compute.json import base_compute_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
-class TestClientWithoutMicroversionHeader(base.BaseComputeServiceTest):
+class TestMicroversionHeaderCheck(base.BaseComputeServiceTest):
def setUp(self):
- super(TestClientWithoutMicroversionHeader, self).setUp()
+ super(TestMicroversionHeaderCheck, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = base_compute_client.BaseComputeClient(
fake_auth, 'compute', 'regionOne')
+ self.client.set_api_microversion('2.2')
- def test_no_microverion_header(self):
- header = self.client.get_headers()
- self.assertNotIn('X-OpenStack-Nova-API-Version', header)
+ def _check_microverion_header_in_response(self, fake_response):
+ def request(*args, **kwargs):
+ return (httplib2.Response(fake_response), {})
- def test_no_microverion_header_in_raw_request(self):
- def raw_request(*args, **kwargs):
- self.assertNotIn('X-OpenStack-Nova-API-Version', kwargs['headers'])
- return (httplib2.Response({'status': 200}), {})
+ self.useFixture(mockpatch.PatchObject(
+ rest_client.RestClient,
+ 'request',
+ side_effect=request))
- with mock.patch.object(rest_client.RestClient,
- 'raw_request') as mock_get:
- mock_get.side_effect = raw_request
- self.client.get('fake_url')
+ def test_correct_microverion_in_response(self):
+ fake_response = {self.client.api_microversion_header_name: '2.2'}
+ self._check_microverion_header_in_response(fake_response)
+ self.client.get('fake_url')
+
+ def test_incorrect_microverion_in_response(self):
+ fake_response = {self.client.api_microversion_header_name: '2.3'}
+ self._check_microverion_header_in_response(fake_response)
+ self.assertRaises(exceptions.InvalidHTTPResponseHeader,
+ self.client.get, 'fake_url')
+
+ def test_no_microverion_header_in_response(self):
+ self._check_microverion_header_in_response({})
+ self.assertRaises(exceptions.InvalidHTTPResponseHeader,
+ self.client.get, 'fake_url')
-class TestClientWithMicroversionHeader(base.BaseComputeServiceTest):
+class DummyServiceClient1(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.1', 'schema': 'schemav21'},
+ {'min': '2.2', 'max': '2.9', 'schema': 'schemav22'},
+ {'min': '2.10', 'max': None, 'schema': 'schemav210'}]
+
+ def return_selected_schema(self):
+ return self.get_schema(self.schema_versions_info)
+
+
+class TestSchemaVersionsNone(base.BaseComputeServiceTest):
+ api_microversion = None
+ expected_schema = 'schemav21'
def setUp(self):
- super(TestClientWithMicroversionHeader, self).setUp()
+ super(TestSchemaVersionsNone, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = base_compute_client.BaseComputeClient(
- fake_auth, 'compute', 'regionOne')
- self.client.api_microversion = '2.2'
+ self.client = DummyServiceClient1(fake_auth, 'compute', 'regionOne')
+ self.client.api_microversion = self.api_microversion
- def test_microverion_header(self):
- header = self.client.get_headers()
- self.assertIn('X-OpenStack-Nova-API-Version', header)
- self.assertEqual(self.client.api_microversion,
- header['X-OpenStack-Nova-API-Version'])
+ def test_schema(self):
+ self.assertEqual(self.expected_schema,
+ self.client.return_selected_schema())
- def test_microverion_header_in_raw_request(self):
- def raw_request(*args, **kwargs):
- self.assertIn('X-OpenStack-Nova-API-Version', kwargs['headers'])
- self.assertEqual(self.client.api_microversion,
- kwargs['headers']['X-OpenStack-Nova-API-Version'])
- return (httplib2.Response({'status': 200}), {})
- with mock.patch.object(rest_client.RestClient,
- 'raw_request') as mock_get:
- mock_get.side_effect = raw_request
- self.client.get('fake_url')
+class TestSchemaVersionsV21(TestSchemaVersionsNone):
+ api_microversion = '2.1'
+ expected_schema = 'schemav21'
+
+
+class TestSchemaVersionsV22(TestSchemaVersionsNone):
+ api_microversion = '2.2'
+ expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV25(TestSchemaVersionsNone):
+ api_microversion = '2.5'
+ expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV29(TestSchemaVersionsNone):
+ api_microversion = '2.9'
+ expected_schema = 'schemav22'
+
+
+class TestSchemaVersionsV210(TestSchemaVersionsNone):
+ api_microversion = '2.10'
+ expected_schema = 'schemav210'
+
+
+class TestSchemaVersionsLatest(TestSchemaVersionsNone):
+ api_microversion = 'latest'
+ expected_schema = 'schemav210'
+
+
+class DummyServiceClient2(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.1', 'schema': 'schemav21'},
+ {'min': '2.2', 'max': '2.9', 'schema': 'schemav22'}]
+
+ def return_selected_schema(self):
+ return self.get_schema(self.schema_versions_info)
+
+
+class TestSchemaVersionsNotFound(base.BaseComputeServiceTest):
+ api_microversion = '2.10'
+ expected_schema = 'schemav210'
+
+ def setUp(self):
+ super(TestSchemaVersionsNotFound, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = DummyServiceClient2(fake_auth, 'compute', 'regionOne')
+ self.client.api_microversion = self.api_microversion
+
+ def test_schema(self):
+ self.assertRaises(exceptions.JSONSchemaNotFound,
+ self.client.return_selected_schema)
diff --git a/tempest/tests/services/compute/test_floating_ips_client.py b/tempest/tests/services/compute/test_floating_ips_client.py
deleted file mode 100644
index ee22004..0000000
--- a/tempest/tests/services/compute/test_floating_ips_client.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslotest import mockpatch
-from tempest_lib import exceptions as lib_exc
-
-from tempest.services.compute.json import floating_ips_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestFloatingIpsClient(base.BaseComputeServiceTest):
-
- floating_ip = {"fixed_ip": None,
- "id": "46d61064-13ba-4bf0-9557-69de824c3d6f",
- "instance_id": "a1daa443-a6bb-463e-aea2-104b7d912eb8",
- "ip": "10.10.10.1",
- "pool": "nova"}
-
- def setUp(self):
- super(TestFloatingIpsClient, self).setUp()
- fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = floating_ips_client.FloatingIPsClient(
- fake_auth, 'compute', 'regionOne')
-
- def _test_list_floating_ips(self, bytes_body=False):
- expected = {'floating_ips': [TestFloatingIpsClient.floating_ip]}
- self.check_service_client_function(
- self.client.list_floating_ips,
- 'tempest.common.service_client.ServiceClient.get',
- expected,
- bytes_body)
-
- def test_list_floating_ips_str_body(self):
- self._test_list_floating_ips(bytes_body=False)
-
- def test_list_floating_ips_byte_body(self):
- self._test_list_floating_ips(bytes_body=True)
-
- def _test_show_floating_ip(self, bytes_body=False):
- expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
- self.check_service_client_function(
- self.client.show_floating_ip,
- 'tempest.common.service_client.ServiceClient.get',
- expected,
- bytes_body,
- floating_ip_id='a1daa443-a6bb-463e-aea2-104b7d912eb8')
-
- def test_show_floating_ip_str_body(self):
- self._test_show_floating_ip(bytes_body=False)
-
- def test_show_floating_ip_byte_body(self):
- self._test_show_floating_ip(bytes_body=True)
-
- def _test_create_floating_ip(self, bytes_body=False):
- expected = {"floating_ip": TestFloatingIpsClient.floating_ip}
- self.check_service_client_function(
- self.client.create_floating_ip,
- 'tempest.common.service_client.ServiceClient.post',
- expected,
- bytes_body,
- pool_name='nova')
-
- def test_create_floating_ip_str_body(self):
- self._test_create_floating_ip(bytes_body=False)
-
- def test_create_floating_ip_byte_body(self):
- self._test_create_floating_ip(bytes_body=True)
-
- def test_delete_floating_ip(self):
- self.check_service_client_function(
- self.client.delete_floating_ip,
- 'tempest.common.service_client.ServiceClient.delete',
- {}, status=202, floating_ip_id='fake-id')
-
- def test_associate_floating_ip_to_server(self):
- self.check_service_client_function(
- self.client.associate_floating_ip_to_server,
- 'tempest.common.service_client.ServiceClient.post',
- {}, status=202, floating_ip='10.10.10.1',
- server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
- def test_disassociate_floating_ip_from_server(self):
- self.check_service_client_function(
- self.client.disassociate_floating_ip_from_server,
- 'tempest.common.service_client.ServiceClient.post',
- {}, status=202, floating_ip='10.10.10.1',
- server_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
-
- def test_is_resource_deleted_true(self):
- self.useFixture(mockpatch.Patch(
- 'tempest.services.compute.json.floating_ips_client.'
- 'FloatingIPsClient.show_floating_ip',
- side_effect=lib_exc.NotFound()))
- self.assertTrue(self.client.is_resource_deleted('fake-id'))
-
- def test_is_resource_deleted_false(self):
- self.useFixture(mockpatch.Patch(
- 'tempest.services.compute.json.floating_ips_client.'
- 'FloatingIPsClient.show_floating_ip',
- return_value={"floating_ip": TestFloatingIpsClient.floating_ip}))
- self.assertFalse(self.client.is_resource_deleted('fake-id'))
diff --git a/tempest/tests/services/compute/test_keypairs_client.py b/tempest/tests/services/compute/test_keypairs_client.py
index 8b1a9a8..03aee53 100644
--- a/tempest/tests/services/compute/test_keypairs_client.py
+++ b/tempest/tests/services/compute/test_keypairs_client.py
@@ -38,7 +38,7 @@
def _test_list_keypairs(self, bytes_body=False):
self.check_service_client_function(
self.client.list_keypairs,
- 'tempest.common.service_client.ServiceClient.get',
+ 'tempest_lib.common.rest_client.RestClient.get',
{"keypairs": []},
bytes_body)
@@ -60,7 +60,7 @@
self.check_service_client_function(
self.client.show_keypair,
- 'tempest.common.service_client.ServiceClient.get',
+ 'tempest_lib.common.rest_client.RestClient.get',
fake_keypair,
bytes_body,
keypair_name="test")
@@ -77,7 +77,7 @@
self.check_service_client_function(
self.client.create_keypair,
- 'tempest.common.service_client.ServiceClient.post',
+ 'tempest_lib.common.rest_client.RestClient.post',
fake_keypair,
bytes_body,
name="test")
@@ -91,5 +91,5 @@
def test_delete_keypair(self):
self.check_service_client_function(
self.client.delete_keypair,
- 'tempest.common.service_client.ServiceClient.delete',
+ 'tempest_lib.common.rest_client.RestClient.delete',
{}, status=202, keypair_name='test')
diff --git a/tempest/tests/services/compute/test_security_group_rules_client.py b/tempest/tests/services/compute/test_security_group_rules_client.py
deleted file mode 100644
index c182742..0000000
--- a/tempest/tests/services/compute/test_security_group_rules_client.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2015 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.services.compute.json import security_group_rules_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestSecurityGroupRulesClient(base.BaseComputeServiceTest):
-
- FAKE_SECURITY_GROUP_RULE = {
- "security_group_rule": {
- "id": "2d021cf1-ce4b-4292-994f-7a785d62a144",
- "ip_range": {
- "cidr": "0.0.0.0/0"
- },
- "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb",
- "to_port": 443,
- "ip_protocol": "tcp",
- "group": {},
- "from_port": 443
- }
- }
-
- def setUp(self):
- super(TestSecurityGroupRulesClient, self).setUp()
- fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = security_group_rules_client.SecurityGroupRulesClient(
- fake_auth, 'compute', 'regionOne')
-
- def _test_create_security_group_rule(self, bytes_body=False):
- req_body = {
- "from_port": "443",
- "ip_protocol": "tcp",
- "to_port": "443",
- "cidr": "0.0.0.0/0",
- "parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb"
- }
- self.check_service_client_function(
- self.client.create_security_group_rule,
- 'tempest.common.service_client.ServiceClient.post',
- self.FAKE_SECURITY_GROUP_RULE,
- to_utf=bytes_body, **req_body)
-
- def test_create_security_group_rule_with_str_body(self):
- self._test_create_security_group_rule()
-
- def test_create_security_group_rule_with_bytes_body(self):
- self._test_create_security_group_rule(bytes_body=True)
-
- def test_delete_security_group_rule(self):
- self.check_service_client_function(
- self.client.delete_security_group_rule,
- 'tempest.common.service_client.ServiceClient.delete',
- {}, status=202, group_rule_id='group-id')
diff --git a/tempest/tests/services/compute/test_server_groups_client.py b/tempest/tests/services/compute/test_server_groups_client.py
deleted file mode 100644
index e531e2f..0000000
--- a/tempest/tests/services/compute/test_server_groups_client.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import httplib2
-
-from oslotest import mockpatch
-from tempest_lib.tests import fake_auth_provider
-
-from tempest.services.compute.json import server_groups_client
-from tempest.tests.services.compute import base
-
-
-class TestServerGroupsClient(base.BaseComputeServiceTest):
-
- server_group = {
- "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
- "name": "test",
- "policies": ["anti-affinity"],
- "members": [],
- "metadata": {}}
-
- def setUp(self):
- super(TestServerGroupsClient, self).setUp()
- fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = server_groups_client.ServerGroupsClient(
- fake_auth, 'compute', 'regionOne')
-
- def _test_create_server_group(self, bytes_body=False):
- expected = {"server_group": TestServerGroupsClient.server_group}
- self.check_service_client_function(
- self.client.create_server_group,
- 'tempest.common.service_client.ServiceClient.post', expected,
- bytes_body, name='fake-group', policies=['affinity'])
-
- def test_create_server_group_str_body(self):
- self._test_create_server_group(bytes_body=False)
-
- def test_create_server_group_byte_body(self):
- self._test_create_server_group(bytes_body=True)
-
- def test_delete_server_group(self):
- response = (httplib2.Response({'status': 204}), None)
- self.useFixture(mockpatch.Patch(
- 'tempest.common.service_client.ServiceClient.delete',
- return_value=response))
- self.client.delete_server_group('fake-group')
-
- def _test_list_server_groups(self, bytes_body=False):
- expected = {"server_groups": [TestServerGroupsClient.server_group]}
- self.check_service_client_function(
- self.client.list_server_groups,
- 'tempest.common.service_client.ServiceClient.get',
- expected, bytes_body)
-
- def test_list_server_groups_str_body(self):
- self._test_list_server_groups(bytes_body=False)
-
- def test_list_server_groups_byte_body(self):
- self._test_list_server_groups(bytes_body=True)
-
- def _test_show_server_group(self, bytes_body=False):
- expected = {"server_group": TestServerGroupsClient.server_group}
- self.check_service_client_function(
- self.client.show_server_group,
- 'tempest.common.service_client.ServiceClient.get',
- expected, bytes_body,
- server_group_id='5bbcc3c4-1da2-4437-a48a-66f15b1b13f9')
-
- def test_show_server_group_str_body(self):
- self._test_show_server_group(bytes_body=False)
-
- def test_show_server_group_byte_body(self):
- self._test_show_server_group(bytes_body=True)
diff --git a/tempest/tests/services/compute/test_servers_client.py b/tempest/tests/services/compute/test_servers_client.py
deleted file mode 100644
index 1fd0740..0000000
--- a/tempest/tests/services/compute/test_servers_client.py
+++ /dev/null
@@ -1,999 +0,0 @@
-# Copyright 2015 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-from tempest.services.compute.json import servers_client
-from tempest.tests import fake_auth_provider
-from tempest.tests.services.compute import base
-
-
-class TestServersClient(base.BaseComputeServiceTest):
-
- FAKE_SERVERS = {'servers': [{
- "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
- "links": [
- {
- "href": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19",
- "rel": "self"
- },
- {
- "href": "http://os.co/616fb98f-46ca-475e-917e-2563e5a8cd19",
- "rel": "bookmark"
- }
- ],
- "name": u"new\u1234-server-test"}]
- }
-
- FAKE_SERVER_DIAGNOSTICS = {
- "cpu0_time": 17300000000,
- "memory": 524288,
- "vda_errors": -1,
- "vda_read": 262144,
- "vda_read_req": 112,
- "vda_write": 5778432,
- "vda_write_req": 488,
- "vnet1_rx": 2070139,
- "vnet1_rx_drop": 0,
- "vnet1_rx_errors": 0,
- "vnet1_rx_packets": 26701,
- "vnet1_tx": 140208,
- "vnet1_tx_drop": 0,
- "vnet1_tx_errors": 0,
- "vnet1_tx_packets": 662
- }
-
- FAKE_SERVER_GET = {'server': {
- "accessIPv4": "",
- "accessIPv6": "",
- "addresses": {
- "private": [
- {
- "addr": "192.168.0.3",
- "version": 4
- }
- ]
- },
- "created": "2012-08-20T21:11:09Z",
- "flavor": {
- "id": "1",
- "links": [
- {
- "href": "http://os.com/openstack/flavors/1",
- "rel": "bookmark"
- }
- ]
- },
- "hostId": "65201c14a29663e06d0748e561207d998b343e1d164bfa0aafa9c45d",
- "id": "893c7791-f1df-4c3d-8383-3caae9656c62",
- "image": {
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
- "links": [
- {
- "href": "http://imgs/70a599e0-31e7-49b7-b260-868f441e862b",
- "rel": "bookmark"
- }
- ]
- },
- "links": [
- {
- "href": "http://v2/srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
- "rel": "self"
- },
- {
- "href": "http://srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
- "rel": "bookmark"
- }
- ],
- "metadata": {
- u"My Server N\u1234me": u"Apa\u1234che1"
- },
- "name": u"new\u1234-server-test",
- "progress": 0,
- "status": "ACTIVE",
- "tenant_id": "openstack",
- "updated": "2012-08-20T21:11:09Z",
- "user_id": "fake"}
- }
-
- FAKE_SERVER_POST = {"server": {
- "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
- "adminPass": "fake-admin-pass",
- "security_groups": [
- 'fake-security-group-1',
- 'fake-security-group-2'
- ],
- "links": [
- {
- "href": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19",
- "rel": "self"
- },
- {
- "href": "http://os.co/616fb98f-46ca-475e-917e-2563e5a8cd19",
- "rel": "bookmark"
- }
- ],
- "OS-DCF:diskConfig": "fake-disk-config"}
- }
-
- FAKE_ADDRESS = {"addresses": {
- "private": [
- {
- "addr": "192.168.0.3",
- "version": 4
- }
- ]}
- }
-
- FAKE_COMMON_VOLUME = {
- "id": "a6b0875b-6b5d-4a5a-81eb-0c3aa62e5fdb",
- "device": "fake-device",
- "volumeId": "a6b0875b-46ca-475e-917e-0c3aa62e5fdb",
- "serverId": "616fb98f-46ca-475e-917e-2563e5a8cd19"
- }
-
- FAKE_VIRTUAL_INTERFACES = {
- "id": "a6b0875b-46ca-475e-917e-0c3aa62e5fdb",
- "mac_address": "00:25:90:5b:f8:c3",
- "OS-EXT-VIF-NET:net_id": "fake-os-net-id"
- }
-
- FAKE_INSTANCE_ACTIONS = {
- "action": "fake-action",
- "request_id": "16fb98f-46ca-475e-917e-2563e5a8cd19",
- "user_id": "16fb98f-46ca-475e-917e-2563e5a8cd12",
- "project_id": "16fb98f-46ca-475e-917e-2563e5a8cd34",
- "start_time": "09MAR2015 11:15",
- "message": "fake-msg",
- "instance_uuid": "16fb98f-46ca-475e-917e-2563e5a8cd12"
- }
-
- FAKE_VNC_CONSOLE = {
- "type": "fake-type",
- "url": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19"
- }
-
- FAKE_INSTANCE_ACTION_EVENTS = {
- "event": "fake-event",
- "start_time": "09MAR2015 11:15",
- "finish_time": "09MAR2015 11:15",
- "result": "fake-result",
- "traceback": "fake-trace-back"
- }
-
- FAKE_INSTANCE_WITH_EVENTS = copy.deepcopy(FAKE_INSTANCE_ACTIONS)
- FAKE_INSTANCE_WITH_EVENTS['events'] = [FAKE_INSTANCE_ACTION_EVENTS]
-
- FAKE_REBUILD_SERVER = copy.deepcopy(FAKE_SERVER_GET)
- FAKE_REBUILD_SERVER['server']['adminPass'] = 'fake-admin-pass'
-
- server_id = FAKE_SERVER_GET['server']['id']
- network_id = 'a6b0875b-6b5d-4a5a-81eb-0c3aa62e5fdb'
-
- def setUp(self):
- super(TestServersClient, self).setUp()
- fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = servers_client.ServersClient(
- fake_auth, 'compute', 'regionOne')
-
- def test_list_servers_with_str_body(self):
- self._test_list_servers()
-
- def test_list_servers_with_bytes_body(self):
- self._test_list_servers(bytes_body=True)
-
- def _test_list_servers(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_servers,
- 'tempest.common.service_client.ServiceClient.get',
- self.FAKE_SERVERS,
- bytes_body)
-
- def test_show_server_with_str_body(self):
- self._test_show_server()
-
- def test_show_server_with_bytes_body(self):
- self._test_show_server(bytes_body=True)
-
- def _test_show_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_server,
- 'tempest.common.service_client.ServiceClient.get',
- self.FAKE_SERVER_GET,
- bytes_body,
- server_id=self.server_id
- )
-
- def test_delete_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.delete_server,
- 'tempest.common.service_client.ServiceClient.delete',
- {},
- status=204,
- server_id=self.server_id
- )
-
- def test_create_server_with_str_body(self):
- self._test_create_server()
-
- def test_create_server_with_bytes_body(self):
- self._test_create_server(True)
-
- def _test_create_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.create_server,
- 'tempest.common.service_client.ServiceClient.post',
- self.FAKE_SERVER_POST,
- bytes_body,
- status=202,
- name='fake-name',
- imageRef='fake-image-ref',
- flavorRef='fake-flavor-ref'
- )
-
- def test_list_addresses_with_str_body(self):
- self._test_list_addresses()
-
- def test_list_addresses_with_bytes_body(self):
- self._test_list_addresses(True)
-
- def _test_list_addresses(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_addresses,
- 'tempest.common.service_client.ServiceClient.get',
- self.FAKE_ADDRESS,
- bytes_body,
- server_id=self.server_id
- )
-
- def test_list_addresses_by_network_with_str_body(self):
- self._test_list_addresses_by_network()
-
- def test_list_addresses_by_network_with_bytes_body(self):
- self._test_list_addresses_by_network(True)
-
- def _test_list_addresses_by_network(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_addresses_by_network,
- 'tempest.common.service_client.ServiceClient.get',
- self.FAKE_ADDRESS['addresses'],
- server_id=self.server_id,
- network_id=self.network_id
- )
-
- def test_action_with_str_body(self):
- self._test_action()
-
- def test_action_with_bytes_body(self):
- self._test_action(True)
-
- def _test_action(self, bytes_body=False):
- self.check_service_client_function(
- self.client.action,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- server_id=self.server_id,
- action_name='fake-action-name',
- schema={'status_code': 200}
- )
-
- def test_create_backup_with_str_body(self):
- self._test_create_backup()
-
- def test_create_backup_with_bytes_body(self):
- self._test_create_backup(True)
-
- def _test_create_backup(self, bytes_body=False):
- self.check_service_client_function(
- self.client.create_backup,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- backup_type='fake-backup',
- rotation='fake-rotation',
- name='fake-name'
- )
-
- def test_change_password_with_str_body(self):
- self._test_change_password()
-
- def test_change_password_with_bytes_body(self):
- self._test_change_password(True)
-
- def _test_change_password(self, bytes_body=False):
- self.check_service_client_function(
- self.client.change_password,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- adminPass='fake-admin-pass'
- )
-
- def test_show_password_with_str_body(self):
- self._test_show_password()
-
- def test_show_password_with_bytes_body(self):
- self._test_show_password(True)
-
- def _test_show_password(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_password,
- 'tempest.common.service_client.ServiceClient.get',
- {'password': 'fake-password'},
- server_id=self.server_id
- )
-
- def test_delete_password_with_str_body(self):
- self._test_delete_password()
-
- def test_delete_password_with_bytes_body(self):
- self._test_delete_password(True)
-
- def _test_delete_password(self, bytes_body=False):
- self.check_service_client_function(
- self.client.delete_password,
- 'tempest.common.service_client.ServiceClient.delete',
- {},
- status=204,
- server_id=self.server_id
- )
-
- def test_reboot_server_with_str_body(self):
- self._test_reboot_server()
-
- def test_reboot_server_with_bytes_body(self):
- self._test_reboot_server(True)
-
- def _test_reboot_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.reboot_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- type='fake-reboot-type'
- )
-
- def test_rebuild_server_with_str_body(self):
- self._test_rebuild_server()
-
- def test_rebuild_server_with_bytes_body(self):
- self._test_rebuild_server(True)
-
- def _test_rebuild_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.rebuild_server,
- 'tempest.common.service_client.ServiceClient.post',
- self.FAKE_REBUILD_SERVER,
- status=202,
- server_id=self.server_id,
- image_ref='fake-image-ref'
- )
-
- def test_resize_server_with_str_body(self):
- self._test_resize_server()
-
- def test_resize_server_with_bytes_body(self):
- self._test_resize_server(True)
-
- def _test_resize_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.resize_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- flavor_ref='fake-flavor-ref'
- )
-
- def test_confirm_resize_server_with_str_body(self):
- self._test_confirm_resize_server()
-
- def test_confirm_resize_server_with_bytes_body(self):
- self._test_confirm_resize_server(True)
-
- def _test_confirm_resize_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.confirm_resize_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=204,
- server_id=self.server_id
- )
-
- def test_revert_resize_server_with_str_body(self):
- self._test_revert_resize()
-
- def test_revert_resize_server_with_bytes_body(self):
- self._test_revert_resize(True)
-
- def _test_revert_resize(self, bytes_body=False):
- self.check_service_client_function(
- self.client.revert_resize_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_list_server_metadata_with_str_body(self):
- self._test_list_server_metadata()
-
- def test_list_server_metadata_with_bytes_body(self):
- self._test_list_server_metadata()
-
- def _test_list_server_metadata(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_server_metadata,
- 'tempest.common.service_client.ServiceClient.get',
- {'metadata': {'fake-key': 'fake-meta-data'}},
- server_id=self.server_id
- )
-
- def test_set_server_metadata_with_str_body(self):
- self._test_set_server_metadata()
-
- def test_set_server_metadata_with_bytes_body(self):
- self._test_set_server_metadata(True)
-
- def _test_set_server_metadata(self, bytes_body=False):
- self.check_service_client_function(
- self.client.set_server_metadata,
- 'tempest.common.service_client.ServiceClient.put',
- {'metadata': {'fake-key': 'fake-meta-data'}},
- server_id=self.server_id,
- meta='fake-meta'
- )
-
- def test_update_server_metadata_with_str_body(self):
- self._test_update_server_metadata()
-
- def test_update_server_metadata_with_bytes_body(self):
- self._test_update_server_metadata(True)
-
- def _test_update_server_metadata(self, bytes_body=False):
- self.check_service_client_function(
- self.client.update_server_metadata,
- 'tempest.common.service_client.ServiceClient.post',
- {'metadata': {'fake-key': 'fake-meta-data'}},
- server_id=self.server_id,
- meta='fake-meta'
- )
-
- def test_show_server_metadata_item_with_str_body(self):
- self._test_show_server_metadata()
-
- def test_show_server_metadata_item_with_bytes_body(self):
- self._test_show_server_metadata(True)
-
- def _test_show_server_metadata(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_server_metadata_item,
- 'tempest.common.service_client.ServiceClient.get',
- {'meta': {'fake-key': 'fake-meta-data'}},
- server_id=self.server_id,
- key='fake-key'
- )
-
- def test_set_server_metadata_item_with_str_body(self):
- self._test_set_server_metadata_item()
-
- def test_set_server_metadata_item_with_bytes_body(self):
- self._test_set_server_metadata_item(True)
-
- def _test_set_server_metadata_item(self, bytes_body=False):
- self.check_service_client_function(
- self.client.set_server_metadata_item,
- 'tempest.common.service_client.ServiceClient.put',
- {'meta': {'fake-key': 'fake-meta-data'}},
- server_id=self.server_id,
- key='fake-key',
- meta='fake-meta'
- )
-
- def test_delete_server_metadata_item_with_str_body(self):
- self._test_delete_server_metadata()
-
- def test_delete_server_metadata_item_with_bytes_body(self):
- self._test_delete_server_metadata(True)
-
- def _test_delete_server_metadata(self, bytes_body=False):
- self.check_service_client_function(
- self.client.delete_server_metadata_item,
- 'tempest.common.service_client.ServiceClient.delete',
- {},
- status=204,
- server_id=self.server_id,
- key='fake-key'
- )
-
- def test_stop_server_with_str_body(self):
- self._test_stop_server()
-
- def test_stop_server_with_bytes_body(self):
- self._test_stop_server(True)
-
- def _test_stop_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.stop_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_start_server_with_str_body(self):
- self._test_start_server()
-
- def test_start_server_with_bytes_body(self):
- self._test_start_server(True)
-
- def _test_start_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.start_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_attach_volume_with_str_body(self):
- self._test_attach_volume_server()
-
- def test_attach_volume_with_bytes_body(self):
- self._test_attach_volume_server(True)
-
- def _test_attach_volume_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.attach_volume,
- 'tempest.common.service_client.ServiceClient.post',
- {'volumeAttachment': self.FAKE_COMMON_VOLUME},
- server_id=self.server_id
- )
-
- def test_detach_volume_with_str_body(self):
- self._test_detach_volume_server()
-
- def test_detach_volume_with_bytes_body(self):
- self._test_detach_volume_server(True)
-
- def _test_detach_volume_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.detach_volume,
- 'tempest.common.service_client.ServiceClient.delete',
- {},
- status=202,
- server_id=self.server_id,
- volume_id=self.FAKE_COMMON_VOLUME['volumeId']
- )
-
- def test_show_volume_attachment_with_str_body(self):
- self._test_show_volume_attachment()
-
- def test_show_volume_attachment_with_bytes_body(self):
- self._test_show_volume_attachment(True)
-
- def _test_show_volume_attachment(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_volume_attachment,
- 'tempest.common.service_client.ServiceClient.get',
- {'volumeAttachment': self.FAKE_COMMON_VOLUME},
- server_id=self.server_id,
- attach_id='fake-attach-id'
- )
-
- def test_list_volume_attachments_with_str_body(self):
- self._test_list_volume_attachments()
-
- def test_list_volume_attachments_with_bytes_body(self):
- self._test_list_volume_attachments(True)
-
- def _test_list_volume_attachments(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_volume_attachments,
- 'tempest.common.service_client.ServiceClient.get',
- {'volumeAttachments': [self.FAKE_COMMON_VOLUME]},
- server_id=self.server_id
- )
-
- def test_add_security_group_with_str_body(self):
- self._test_add_security_group()
-
- def test_add_security_group_with_bytes_body(self):
- self._test_add_security_group(True)
-
- def _test_add_security_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.add_security_group,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- name='fake-name'
- )
-
- def test_remove_security_group_with_str_body(self):
- self._test_remove_security_group()
-
- def test_remove_security_group_with_bytes_body(self):
- self._test_remove_security_group(True)
-
- def _test_remove_security_group(self, bytes_body=False):
- self.check_service_client_function(
- self.client.remove_security_group,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- name='fake-name'
- )
-
- def test_live_migrate_server_with_str_body(self):
- self._test_live_migrate_server()
-
- def test_live_migrate_server_with_bytes_body(self):
- self._test_live_migrate_server(True)
-
- def _test_live_migrate_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.live_migrate_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_migrate_server_with_str_body(self):
- self._test_migrate_server()
-
- def test_migrate_server_with_bytes_body(self):
- self._test_migrate_server(True)
-
- def _test_migrate_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.migrate_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_lock_server_with_str_body(self):
- self._test_lock_server()
-
- def test_lock_server_with_bytes_body(self):
- self._test_lock_server(True)
-
- def _test_lock_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.lock_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_unlock_server_with_str_body(self):
- self._test_unlock_server()
-
- def test_unlock_server_with_bytes_body(self):
- self._test_unlock_server(True)
-
- def _test_unlock_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.unlock_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_suspend_server_with_str_body(self):
- self._test_suspend_server()
-
- def test_suspend_server_with_bytes_body(self):
- self._test_suspend_server(True)
-
- def _test_suspend_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.suspend_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_resume_server_with_str_body(self):
- self._test_resume_server()
-
- def test_resume_server_with_bytes_body(self):
- self._test_resume_server(True)
-
- def _test_resume_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.resume_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_pause_server_with_str_body(self):
- self._test_pause_server()
-
- def test_pause_server_with_bytes_body(self):
- self._test_pause_server(True)
-
- def _test_pause_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.pause_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_unpause_server_with_str_body(self):
- self._test_unpause_server()
-
- def test_unpause_server_with_bytes_body(self):
- self._test_unpause_server(True)
-
- def _test_unpause_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.unpause_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_reset_state_with_str_body(self):
- self._test_reset_state()
-
- def test_reset_state_with_bytes_body(self):
- self._test_reset_state(True)
-
- def _test_reset_state(self, bytes_body=False):
- self.check_service_client_function(
- self.client.reset_state,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id,
- state='fake-state'
- )
-
- def test_shelve_server_with_str_body(self):
- self._test_shelve_server()
-
- def test_shelve_server_with_bytes_body(self):
- self._test_shelve_server(True)
-
- def _test_shelve_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.shelve_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_unshelve_server_with_str_body(self):
- self._test_unshelve_server()
-
- def test_unshelve_server_with_bytes_body(self):
- self._test_unshelve_server(True)
-
- def _test_unshelve_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.unshelve_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_shelve_offload_server_with_str_body(self):
- self._test_shelve_offload_server()
-
- def test_shelve_offload_server_with_bytes_body(self):
- self._test_shelve_offload_server(True)
-
- def _test_shelve_offload_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.shelve_offload_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_get_console_output_with_str_body(self):
- self._test_get_console_output()
-
- def test_get_console_output_with_bytes_body(self):
- self._test_get_console_output(True)
-
- def _test_get_console_output(self, bytes_body=False):
- self.check_service_client_function(
- self.client.get_console_output,
- 'tempest.common.service_client.ServiceClient.post',
- {'output': 'fake-output'},
- server_id=self.server_id,
- length='fake-length'
- )
-
- def test_list_virtual_interfaces_with_str_body(self):
- self._test_list_virtual_interfaces()
-
- def test_list_virtual_interfaces_with_bytes_body(self):
- self._test_list_virtual_interfaces(True)
-
- def _test_list_virtual_interfaces(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_virtual_interfaces,
- 'tempest.common.service_client.ServiceClient.get',
- {'virtual_interfaces': [self.FAKE_VIRTUAL_INTERFACES]},
- server_id=self.server_id
- )
-
- def test_rescue_server_with_str_body(self):
- self._test_rescue_server()
-
- def test_rescue_server_with_bytes_body(self):
- self._test_rescue_server(True)
-
- def _test_rescue_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.rescue_server,
- 'tempest.common.service_client.ServiceClient.post',
- {'adminPass': 'fake-admin-pass'},
- server_id=self.server_id
- )
-
- def test_unrescue_server_with_str_body(self):
- self._test_unrescue_server()
-
- def test_unrescue_server_with_bytes_body(self):
- self._test_unrescue_server(True)
-
- def _test_unrescue_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.unrescue_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_show_server_diagnostics_with_str_body(self):
- self._test_show_server_diagnostics()
-
- def test_show_server_diagnostics_with_bytes_body(self):
- self._test_show_server_diagnostics(True)
-
- def _test_show_server_diagnostics(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_server_diagnostics,
- 'tempest.common.service_client.ServiceClient.get',
- self.FAKE_SERVER_DIAGNOSTICS,
- status=200,
- server_id=self.server_id
- )
-
- def test_list_instance_actions_with_str_body(self):
- self._test_list_instance_actions()
-
- def test_list_instance_actions_with_bytes_body(self):
- self._test_list_instance_actions(True)
-
- def _test_list_instance_actions(self, bytes_body=False):
- self.check_service_client_function(
- self.client.list_instance_actions,
- 'tempest.common.service_client.ServiceClient.get',
- {'instanceActions': [self.FAKE_INSTANCE_ACTIONS]},
- server_id=self.server_id
- )
-
- def test_show_instance_action_with_str_body(self):
- self._test_show_instance_action()
-
- def test_show_instance_action_with_bytes_body(self):
- self._test_show_instance_action(True)
-
- def _test_show_instance_action(self, bytes_body=False):
- self.check_service_client_function(
- self.client.show_instance_action,
- 'tempest.common.service_client.ServiceClient.get',
- {'instanceAction': self.FAKE_INSTANCE_WITH_EVENTS},
- server_id=self.server_id,
- request_id='fake-request-id'
- )
-
- def test_force_delete_server_with_str_body(self):
- self._test_force_delete_server()
-
- def test_force_delete_server_with_bytes_body(self):
- self._test_force_delete_server(True)
-
- def _test_force_delete_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.force_delete_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_restore_soft_deleted_server_with_str_body(self):
- self._test_restore_soft_deleted_server()
-
- def test_restore_soft_deleted_server_with_bytes_body(self):
- self._test_restore_soft_deleted_server(True)
-
- def _test_restore_soft_deleted_server(self, bytes_body=False):
- self.check_service_client_function(
- self.client.restore_soft_deleted_server,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_reset_network_with_str_body(self):
- self._test_reset_network()
-
- def test_reset_network_with_bytes_body(self):
- self._test_reset_network(True)
-
- def _test_reset_network(self, bytes_body=False):
- self.check_service_client_function(
- self.client.reset_network,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_inject_network_info_with_str_body(self):
- self._test_inject_network_info()
-
- def test_inject_network_info_with_bytes_body(self):
- self._test_inject_network_info(True)
-
- def _test_inject_network_info(self, bytes_body=False):
- self.check_service_client_function(
- self.client.inject_network_info,
- 'tempest.common.service_client.ServiceClient.post',
- {},
- status=202,
- server_id=self.server_id
- )
-
- def test_get_vnc_console_with_str_body(self):
- self._test_get_vnc_console()
-
- def test_get_vnc_console_with_bytes_body(self):
- self._test_get_vnc_console(True)
-
- def _test_get_vnc_console(self, bytes_body=False):
- self.check_service_client_function(
- self.client.get_vnc_console,
- 'tempest.common.service_client.ServiceClient.post',
- {'console': self.FAKE_VNC_CONSOLE},
- server_id=self.server_id,
- type='fake-console-type'
- )
diff --git a/tempest/tests/services/test_base_microversion_client.py b/tempest/tests/services/test_base_microversion_client.py
new file mode 100644
index 0000000..11b8170
--- /dev/null
+++ b/tempest/tests/services/test_base_microversion_client.py
@@ -0,0 +1,75 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib2
+import mock
+from tempest_lib.common import rest_client
+
+from tempest.services import base_microversion_client
+from tempest.tests import fake_auth_provider
+from tempest.tests.services.compute import base
+
+
+class TestClientWithoutMicroversionHeader(base.BaseComputeServiceTest):
+
+ def setUp(self):
+ super(TestClientWithoutMicroversionHeader, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = base_microversion_client.BaseMicroversionClient(
+ fake_auth, 'compute', 'regionOne', 'X-OpenStack-Nova-API-Version')
+
+ def test_no_microverion_header(self):
+ header = self.client.get_headers()
+ self.assertNotIn(self.client.api_microversion_header_name, header)
+
+ def test_no_microverion_header_in_raw_request(self):
+ def raw_request(*args, **kwargs):
+ self.assertNotIn(self.client.api_microversion_header_name,
+ kwargs['headers'])
+ return (httplib2.Response({'status': 200}), {})
+
+ with mock.patch.object(rest_client.RestClient,
+ 'raw_request') as mock_get:
+ mock_get.side_effect = raw_request
+ self.client.get('fake_url')
+
+
+class TestClientWithMicroversionHeader(base.BaseComputeServiceTest):
+
+ def setUp(self):
+ super(TestClientWithMicroversionHeader, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = base_microversion_client.BaseMicroversionClient(
+ fake_auth, 'compute', 'regionOne', 'X-OpenStack-Nova-API-Version')
+ self.client.set_api_microversion('2.2')
+
+ def test_microverion_header(self):
+ header = self.client.get_headers()
+ self.assertIn(self.client.api_microversion_header_name, header)
+ self.assertEqual(self.client.api_microversion,
+ header[self.client.api_microversion_header_name])
+
+ def test_microverion_header_in_raw_request(self):
+ def raw_request(*args, **kwargs):
+ self.assertIn(self.client.api_microversion_header_name,
+ kwargs['headers'])
+ self.assertEqual(
+ self.client.api_microversion,
+ kwargs['headers'][self.client.api_microversion_header_name])
+ return (httplib2.Response({'status': 200}), {})
+
+ with mock.patch.object(rest_client.RestClient,
+ 'raw_request') as mock_get:
+ mock_get.side_effect = raw_request
+ self.client.get('fake_url')
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ce3eb7e..98b045a 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -140,7 +140,7 @@
self.fail('%s is not listed in the valid service tag list'
% service)
except KeyError:
- # NOTE(mtreinish): This condition is to test for a entry in
+ # NOTE(mtreinish): This condition is to test for an entry in
# the outer decorator list but not in the service_list dict.
# However, because we're looping over the service_list dict
# it's unlikely we'll trigger this. So manual review is still
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
deleted file mode 100644
index b0bfdf7..0000000
--- a/tempest/thirdparty/README.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. _third_party_field_guide:
-
-Tempest Field Guide to Third Party API tests
-============================================
-
-
-What are these tests?
----------------------
-
-Third party tests are tests for non native OpenStack APIs that are
-part of OpenStack projects. If we ship an API, we're really required
-to ensure that it's working.
-
-An example is that Nova Compute currently has EC2 API support in tree,
-which should be tested as part of normal process.
-
-
-Why are these tests in tempest?
--------------------------------
-
-If we ship an API in an OpenStack component, there should be tests in
-tempest to exercise it in some way.
-
-
-Scope of these tests
---------------------
-
-Third party API testing should be limited to the functional testing of
-third party API compliance. Complex scenarios should be avoided, and
-instead exercised with the OpenStack API, unless the third party API
-can't be tested without those scenarios.
-
-Whenever possible third party API testing should use a client as close
-to the third party API as possible. The point of these tests is API
-validation.
diff --git a/tempest/thirdparty/boto/__init__.py b/tempest/thirdparty/boto/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
deleted file mode 100644
index cfd3747..0000000
--- a/tempest/thirdparty/boto/test.py
+++ /dev/null
@@ -1,694 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import logging as orig_logging
-import os
-import re
-
-import boto
-from boto import ec2
-from boto import exception
-from boto import s3
-from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
-from tempest_lib import exceptions as lib_exc
-
-from tempest.common import credentials_factory as credentials
-from tempest.common.utils import file_utils
-from tempest import config
-from tempest import exceptions
-import tempest.test
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def decision_maker():
- A_I_IMAGES_READY = True # ari,ami,aki
- S3_CAN_CONNECT_ERROR = None
- EC2_CAN_CONNECT_ERROR = None
- secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
- id_matcher = re.compile("[A-Za-z0-9]{20,}")
-
- def all_read(*args):
- return all(map(file_utils.have_effective_read_access, args))
-
- materials_path = CONF.boto.s3_materials_path
- ami_path = materials_path + os.sep + CONF.boto.ami_manifest
- aki_path = materials_path + os.sep + CONF.boto.aki_manifest
- ari_path = materials_path + os.sep + CONF.boto.ari_manifest
-
- A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
- boto_logger = logging.getLogger('boto')
- level = boto_logger.logger.level
- # suppress logging for boto
- boto_logger.logger.setLevel(orig_logging.CRITICAL)
-
- def _cred_sub_check(connection_data):
- if not id_matcher.match(connection_data["aws_access_key_id"]):
- raise Exception("Invalid AWS access Key")
- if not secret_matcher.match(connection_data["aws_secret_access_key"]):
- raise Exception("Invalid AWS secret Key")
- raise Exception("Unknown (Authentication?) Error")
- # NOTE(andreaf) Setting up an extra manager here is redundant,
- # and should be removed.
- openstack = credentials.ConfiguredUserManager()
- try:
- if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
- raise Exception("Failed to get hostname from the ec2_url")
- ec2client = openstack.ec2api_client
- try:
- ec2client.get_all_regions()
- except exception.BotoServerError as exc:
- if exc.error_code is None:
- raise Exception("EC2 target does not looks EC2 service")
- _cred_sub_check(ec2client.connection_data)
-
- except lib_exc.Unauthorized:
- EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
- " also failed to get it from keystone"
- except Exception as exc:
- EC2_CAN_CONNECT_ERROR = str(exc)
-
- try:
- if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
- raise Exception("Failed to get hostname from the s3_url")
- s3client = openstack.s3_client
- try:
- s3client.get_bucket("^INVALID*#()@INVALID.")
- except exception.BotoServerError as exc:
- if exc.status == 403:
- _cred_sub_check(s3client.connection_data)
- except Exception as exc:
- S3_CAN_CONNECT_ERROR = str(exc)
- except lib_exc.Unauthorized:
- S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
- " failed to get them even by keystoneclient"
- boto_logger.logger.setLevel(level)
- return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
- 'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
- 'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
-
-
-class BotoExceptionMatcher(object):
- STATUS_RE = r'[45]\d\d'
- CODE_RE = '.*' # regexp makes sense in group match
-
- def match(self, exc):
- """Check boto exception
-
- :returns: Returns with an error string if it does not match,
- returns with None when it matches.
- """
- if not isinstance(exc, exception.BotoServerError):
- return "%r not an BotoServerError instance" % exc
- LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
- if re.match(self.STATUS_RE, str(exc.status)) is None:
- return ("Status code (%s) does not match"
- "the expected re pattern \"%s\""
- % (exc.status, self.STATUS_RE))
- if re.match(self.CODE_RE, str(exc.error_code)) is None:
- return ("Error code (%s) does not match" +
- "the expected re pattern \"%s\"") %\
- (exc.error_code, self.CODE_RE)
- return None
-
-
-class ClientError(BotoExceptionMatcher):
- STATUS_RE = r'4\d\d'
-
-
-class ServerError(BotoExceptionMatcher):
- STATUS_RE = r'5\d\d'
-
-
-def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
- """Usable for adding an ExceptionMatcher(s) into the exception tree.
-
- The not leaf elements does wildcard match
- """
- # in error_code just literal and '.' characters expected
- if not isinstance(error_data, six.string_types):
- (error_code, status_code) = map(str, error_data)
- else:
- status_code = None
- error_code = error_data
- parts = error_code.split('.')
- basematch = ""
- num_parts = len(parts)
- max_index = num_parts - 1
- add_cls = error_cls
- for i_part in six.moves.xrange(num_parts):
- part = parts[i_part]
- leaf = i_part == max_index
- if not leaf:
- match = basematch + part + "[.].*"
- else:
- match = basematch + part
-
- basematch += part + "[.]"
- if not hasattr(add_cls, part):
- cls_dict = {"CODE_RE": match}
- if leaf and status_code is not None:
- cls_dict["STATUS_RE"] = status_code
- cls = type(part, (base, ), cls_dict)
- setattr(add_cls, part, cls())
- add_cls = cls
- elif leaf:
- raise LookupError("Tries to redefine an error code \"%s\"" % part)
- else:
- add_cls = getattr(add_cls, part)
-
-
-# TODO(afazekas): classmethod handling
-def friendly_function_name_simple(call_able):
- name = ""
- if hasattr(call_able, "im_class"):
- name += call_able.im_class.__name__ + "."
- name += call_able.__name__
- return name
-
-
-def friendly_function_call_str(call_able, *args, **kwargs):
- string = friendly_function_name_simple(call_able)
- string += "(" + ", ".join(map(str, args))
- if len(kwargs):
- if len(args):
- string += ", "
- string += ", ".join("=".join(map(str, (key, value)))
- for (key, value) in kwargs.items())
- return string + ")"
-
-
-class BotoTestCase(tempest.test.BaseTestCase):
- """Recommended to use as base class for boto related test."""
-
- credentials = ['primary']
-
- @classmethod
- def skip_checks(cls):
- super(BotoTestCase, cls).skip_checks()
- if not CONF.compute_feature_enabled.ec2_api:
- raise cls.skipException("The EC2 API is not available")
- if not CONF.identity_feature_enabled.api_v2 or \
- not CONF.identity.auth_version == 'v2':
- raise cls.skipException("Identity v2 is not available")
-
- @classmethod
- def resource_setup(cls):
- super(BotoTestCase, cls).resource_setup()
- cls.conclusion = decision_maker()
- # The trash contains cleanup functions and parameters in tuples
- # (function, *args, **kwargs)
- cls._resource_trash_bin = {}
- cls._sequence = -1
- if (hasattr(cls, "EC2") and
- cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
- raise cls.skipException("EC2 " + cls.__name__ + ": " +
- cls.conclusion['EC2_CAN_CONNECT_ERROR'])
- if (hasattr(cls, "S3") and
- cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
- raise cls.skipException("S3 " + cls.__name__ + ": " +
- cls.conclusion['S3_CAN_CONNECT_ERROR'])
-
- @classmethod
- def addResourceCleanUp(cls, function, *args, **kwargs):
- """Adds CleanUp callable, used by tearDownClass.
-
- Recommended to a use (deep)copy on the mutable args.
- """
- cls._sequence = cls._sequence + 1
- cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
- return cls._sequence
-
- @classmethod
- def cancelResourceCleanUp(cls, key):
- """Cancel Clean up request."""
- del cls._resource_trash_bin[key]
-
- # TODO(afazekas): Add "with" context handling
- def assertBotoError(self, excMatcher, callableObj,
- *args, **kwargs):
- """Example usage:
-
- self.assertBotoError(self.ec2_error_code.client.
- InvalidKeyPair.Duplicate,
- self.client.create_keypair,
- key_name)
- """
- try:
- callableObj(*args, **kwargs)
- except exception.BotoServerError as exc:
- error_msg = excMatcher.match(exc)
- if error_msg is not None:
- raise self.failureException(error_msg)
- else:
- raise self.failureException("BotoServerError not raised")
-
- @classmethod
- def resource_cleanup(cls):
- """Calls the callables added by addResourceCleanUp
-
- when you overwrite this function don't forget to call this too.
- """
- fail_count = 0
- trash_keys = sorted(cls._resource_trash_bin, reverse=True)
- for key in trash_keys:
- (function, pos_args, kw_args) = cls._resource_trash_bin[key]
- try:
- func_name = friendly_function_call_str(function, *pos_args,
- **kw_args)
- LOG.debug("Cleaning up: %s" % func_name)
- function(*pos_args, **kw_args)
- except BaseException:
- fail_count += 1
- LOG.exception("Cleanup failed %s" % func_name)
- finally:
- del cls._resource_trash_bin[key]
- super(BotoTestCase, cls).resource_cleanup()
- # NOTE(afazekas): let the super called even on exceptions
- # The real exceptions already logged, if the super throws another,
- # does not causes hidden issues
- if fail_count:
- raise exceptions.TearDownException(num=fail_count)
-
- ec2_error_code = BotoExceptionMatcher()
- # InsufficientInstanceCapacity can be both server and client error
- ec2_error_code.server = ServerError()
- ec2_error_code.client = ClientError()
- s3_error_code = BotoExceptionMatcher()
- s3_error_code.server = ServerError()
- s3_error_code.client = ClientError()
- valid_image_state = set(('available', 'pending', 'failed'))
- # NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
- # a good mapping, because it uses memory, but not really a running machine
- valid_instance_state = set(('pending', 'running', 'shutting-down',
- 'terminated', 'stopping', 'stopped', 'paused'))
- valid_volume_status = set(('creating', 'available', 'in-use',
- 'deleting', 'deleted', 'error'))
- valid_snapshot_status = set(('pending', 'completed', 'error'))
-
- gone_set = set(('_GONE',))
-
- @classmethod
- def get_lfunction_gone(cls, obj):
- # NOTE: If the object is instance of a well know type returns back with
- # with the corresponding function otherwise it assumes the obj itself
- # is the function.
- ec = cls.ec2_error_code
- if isinstance(obj, ec2.instance.Instance):
- colusure_matcher = ec.client.InvalidInstanceID.NotFound
- status_attr = "state"
- elif isinstance(obj, ec2.image.Image):
- colusure_matcher = ec.client.InvalidAMIID.NotFound
- status_attr = "state"
- elif isinstance(obj, ec2.snapshot.Snapshot):
- colusure_matcher = ec.client.InvalidSnapshot.NotFound
- status_attr = "status"
- elif isinstance(obj, ec2.volume.Volume):
- colusure_matcher = ec.client.InvalidVolume.NotFound
- status_attr = "status"
- else:
- return obj
-
- def _status():
- try:
- obj.update(validate=True)
- except ValueError:
- return "_GONE"
- except exception.EC2ResponseError as exc:
- if colusure_matcher.match(exc) is None:
- return "_GONE"
- else:
- raise
- return getattr(obj, status_attr)
-
- return _status
-
- def state_wait_gone(self, lfunction, final_set, valid_set):
- if not isinstance(final_set, set):
- final_set = set((final_set,))
- final_set |= self.gone_set
- lfunction = self.get_lfunction_gone(lfunction)
- state = wait.state_wait(lfunction, final_set, valid_set)
- self.assertIn(state, valid_set | self.gone_set)
- return state
-
- def waitImageState(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_image_state)
-
- def waitInstanceState(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_instance_state)
-
- def waitSnapshotStatus(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_snapshot_status)
-
- def waitVolumeStatus(self, lfunction, wait_for):
- return self.state_wait_gone(lfunction, wait_for,
- self.valid_volume_status)
-
- def assertImageStateWait(self, lfunction, wait_for):
- state = self.waitImageState(lfunction, wait_for)
- self.assertIn(state, wait_for)
-
- def assertInstanceStateWait(self, lfunction, wait_for):
- state = self.waitInstanceState(lfunction, wait_for)
- self.assertIn(state, wait_for)
-
- def assertVolumeStatusWait(self, lfunction, wait_for):
- state = self.waitVolumeStatus(lfunction, wait_for)
- self.assertIn(state, wait_for)
-
- def assertSnapshotStatusWait(self, lfunction, wait_for):
- state = self.waitSnapshotStatus(lfunction, wait_for)
- self.assertIn(state, wait_for)
-
- def assertAddressDisassociatedWait(self, address):
-
- def _disassociate():
- cli = self.ec2_client
- addresses = cli.get_all_addresses(addresses=(address.public_ip,))
- if len(addresses) != 1:
- return "INVALID"
- if addresses[0].instance_id:
- LOG.info("%s associated to %s",
- address.public_ip,
- addresses[0].instance_id)
- return "ASSOCIATED"
- return "DISASSOCIATED"
-
- state = wait.state_wait(_disassociate, "DISASSOCIATED",
- set(("ASSOCIATED", "DISASSOCIATED")))
- self.assertEqual(state, "DISASSOCIATED")
-
- def assertAddressReleasedWait(self, address):
-
- def _address_delete():
- # NOTE(afazekas): the filter gives back IP
- # even if it is not associated to my tenant
- if (address.public_ip not in map(lambda a: a.public_ip,
- self.ec2_client.get_all_addresses())):
- return "DELETED"
- return "NOTDELETED"
-
- state = wait.state_wait(_address_delete, "DELETED")
- self.assertEqual(state, "DELETED")
-
- def assertReSearch(self, regexp, string):
- if re.search(regexp, string) is None:
- raise self.failureException("regexp: '%s' not found in '%s'" %
- (regexp, string))
-
- def assertNotReSearch(self, regexp, string):
- if re.search(regexp, string) is not None:
- raise self.failureException("regexp: '%s' found in '%s'" %
- (regexp, string))
-
- def assertReMatch(self, regexp, string):
- if re.match(regexp, string) is None:
- raise self.failureException("regexp: '%s' not matches on '%s'" %
- (regexp, string))
-
- def assertNotReMatch(self, regexp, string):
- if re.match(regexp, string) is not None:
- raise self.failureException("regexp: '%s' matches on '%s'" %
- (regexp, string))
-
- @classmethod
- def destroy_bucket(cls, connection_data, bucket):
- """Destroys the bucket and its content, just for teardown."""
- exc_num = 0
- try:
- with contextlib.closing(
- boto.connect_s3(**connection_data)) as conn:
- if isinstance(bucket, basestring):
- bucket = conn.lookup(bucket)
- assert isinstance(bucket, s3.bucket.Bucket)
- for obj in bucket.list():
- try:
- bucket.delete_key(obj.key)
- obj.close()
- except BaseException:
- LOG.exception("Failed to delete key %s " % obj.key)
- exc_num += 1
- conn.delete_bucket(bucket)
- except BaseException:
- LOG.exception("Failed to destroy bucket %s " % bucket)
- exc_num += 1
- if exc_num:
- raise exceptions.TearDownException(num=exc_num)
-
- @classmethod
- def destroy_reservation(cls, reservation):
- """Terminate instances in a reservation, just for teardown."""
- exc_num = 0
-
- def _instance_state():
- try:
- instance.update(validate=True)
- except ValueError:
- return "_GONE"
- except exception.EC2ResponseError as exc:
- if cls.ec2_error_code.\
- client.InvalidInstanceID.NotFound.match(exc) is None:
- return "_GONE"
- # NOTE(afazekas): incorrect code,
- # but the resource must be destroyed
- if exc.error_code == "InstanceNotFound":
- return "_GONE"
-
- return instance.state
-
- for instance in reservation.instances:
- try:
- instance.terminate()
- wait.re_search_wait(_instance_state, "_GONE")
- except BaseException:
- LOG.exception("Failed to terminate instance %s " % instance)
- exc_num += 1
- if exc_num:
- raise exceptions.TearDownException(num=exc_num)
-
- # NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
- # to write better teardown
-
- @classmethod
- def destroy_security_group_wait(cls, group):
- """Delete group.
-
- Use just for teardown!
- """
- # NOTE(afazekas): should wait/try until all related instance terminates
- group.delete()
-
- @classmethod
- def destroy_volume_wait(cls, volume):
- """Delete volume, tries to detach first.
-
- Use just for teardown!
- """
- exc_num = 0
- snaps = volume.snapshots()
- if len(snaps):
- LOG.critical("%s Volume has %s snapshot(s)", volume.id,
- map(snaps.id, snaps))
-
- # NOTE(afazekas): detaching/attaching not valid EC2 status
- def _volume_state():
- volume.update(validate=True)
- try:
- # NOTE(gmann): Make sure volume is attached.
- # Checking status as 'not "available"' is not enough to make
- # sure volume is attached as it can be in "error" state
- if volume.status == "in-use":
- volume.detach(force=True)
- except BaseException:
- LOG.exception("Failed to detach volume %s" % volume)
- # exc_num += 1 "nonlocal" not in python2
- return volume.status
-
- try:
- wait.re_search_wait(_volume_state, "available")
- # not validates status
- LOG.info(_volume_state())
- volume.delete()
- except BaseException:
- LOG.exception("Failed to delete volume %s" % volume)
- exc_num += 1
- if exc_num:
- raise exceptions.TearDownException(num=exc_num)
-
- @classmethod
- def destroy_snapshot_wait(cls, snapshot):
- """delete snapshot, wait until it ceases to exist."""
- snapshot.delete()
-
- def _update():
- snapshot.update(validate=True)
-
- wait.wait_exception(_update)
-
-
-# you can specify tuples if you want to specify the status pattern
-for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
- 'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
- 'DiskImageSizeTooLarge', 'FilterLimitExceeded',
- 'Gateway.NotAttached', 'IdempotentParameterMismatch',
- 'IncorrectInstanceState', 'IncorrectState',
- 'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
- 'InsufficientReservedInstancesCapacity',
- 'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
- 'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
- 'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
- 'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
- 'InvalidCustomerGateway.DuplicateIpAddress',
- 'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
- 'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
- 'InvalidFilter', 'InvalidGatewayID.NotFound',
- 'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
- 'InvalidGroup.InUse', 'InvalidGroup.NotFound',
- 'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
- 'InvalidInstanceID.NotFound',
- 'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
- 'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
- 'InvalidKeyPair.NotFound', 'InvalidManifest',
- 'InvalidNetworkAclEntry.NotFound',
- 'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
- 'InvalidParameterValue', 'InvalidPermission.Duplicate',
- 'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
- 'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
- 'InvalidRouteTableID.NotFound',
- 'InvalidSecurity.RequestHasExpired',
- 'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
- 'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
- 'InvalidReservedInstancesOfferingId',
- 'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
- 'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
- 'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
- 'InvalidVpnConnectionID.NotFound',
- 'InvalidVpnGatewayID.NotFound',
- 'InvalidZone.NotFound', 'LegacySecurityGroup',
- 'MissingParameter', 'NetworkAclEntryAlreadyExists',
- 'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
- 'NonEBSInstance', 'PendingSnapshotLimitExceeded',
- 'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
- 'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
- 'ResourceLimitExceeded', 'RouteAlreadyExists',
- 'RouteLimitExceeded', 'RouteTableLimitExceeded',
- 'RulesPerSecurityGroupLimitExceeded',
- 'SecurityGroupLimitExceeded',
- 'SecurityGroupsPerInstanceLimitExceeded',
- 'SnapshotLimitExceeded', 'SubnetLimitExceeded',
- 'UnknownParameter', 'UnsupportedOperation',
- 'VolumeLimitExceeded', 'VpcLimitExceeded',
- 'VpnConnectionLimitExceeded',
- 'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
- _add_matcher_class(BotoTestCase.ec2_error_code.client,
- code, base=ClientError)
-
-for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
- 'InsufficientReservedInstanceCapacity', 'InternalError',
- 'Unavailable'):
- _add_matcher_class(BotoTestCase.ec2_error_code.server,
- code, base=ServerError)
-
-
-for code in (('AccessDenied', 403),
- ('AccountProblem', 403),
- ('AmbiguousGrantByEmailAddress', 400),
- ('BadDigest', 400),
- ('BucketAlreadyExists', 409),
- ('BucketAlreadyOwnedByYou', 409),
- ('BucketNotEmpty', 409),
- ('CredentialsNotSupported', 400),
- ('CrossLocationLoggingProhibited', 403),
- ('EntityTooSmall', 400),
- ('EntityTooLarge', 400),
- ('ExpiredToken', 400),
- ('IllegalVersioningConfigurationException', 400),
- ('IncompleteBody', 400),
- ('IncorrectNumberOfFilesInPostRequest', 400),
- ('InlineDataTooLarge', 400),
- ('InvalidAccessKeyId', 403),
- 'InvalidAddressingHeader',
- ('InvalidArgument', 400),
- ('InvalidBucketName', 400),
- ('InvalidBucketState', 409),
- ('InvalidDigest', 400),
- ('InvalidLocationConstraint', 400),
- ('InvalidPart', 400),
- ('InvalidPartOrder', 400),
- ('InvalidPayer', 403),
- ('InvalidPolicyDocument', 400),
- ('InvalidRange', 416),
- ('InvalidRequest', 400),
- ('InvalidSecurity', 403),
- ('InvalidSOAPRequest', 400),
- ('InvalidStorageClass', 400),
- ('InvalidTargetBucketForLogging', 400),
- ('InvalidToken', 400),
- ('InvalidURI', 400),
- ('KeyTooLong', 400),
- ('MalformedACLError', 400),
- ('MalformedPOSTRequest', 400),
- ('MalformedXML', 400),
- ('MaxMessageLengthExceeded', 400),
- ('MaxPostPreDataLengthExceededError', 400),
- ('MetadataTooLarge', 400),
- ('MethodNotAllowed', 405),
- ('MissingAttachment'),
- ('MissingContentLength', 411),
- ('MissingRequestBodyError', 400),
- ('MissingSecurityElement', 400),
- ('MissingSecurityHeader', 400),
- ('NoLoggingStatusForKey', 400),
- ('NoSuchBucket', 404),
- ('NoSuchKey', 404),
- ('NoSuchLifecycleConfiguration', 404),
- ('NoSuchUpload', 404),
- ('NoSuchVersion', 404),
- ('NotSignedUp', 403),
- ('NotSuchBucketPolicy', 404),
- ('OperationAborted', 409),
- ('PermanentRedirect', 301),
- ('PreconditionFailed', 412),
- ('Redirect', 307),
- ('RequestIsNotMultiPartContent', 400),
- ('RequestTimeout', 400),
- ('RequestTimeTooSkewed', 403),
- ('RequestTorrentOfBucketError', 400),
- ('SignatureDoesNotMatch', 403),
- ('TemporaryRedirect', 307),
- ('TokenRefreshRequired', 400),
- ('TooManyBuckets', 400),
- ('UnexpectedContent', 400),
- ('UnresolvableGrantByEmailAddress', 400),
- ('UserKeyMustBeSpecified', 400)):
- _add_matcher_class(BotoTestCase.s3_error_code.client,
- code, base=ClientError)
-
-
-for code in (('InternalError', 500),
- ('NotImplemented', 501),
- ('ServiceUnavailable', 503),
- ('SlowDown', 503)):
- _add_matcher_class(BotoTestCase.s3_error_code.server,
- code, base=ServerError)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
deleted file mode 100644
index 6c1b362..0000000
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-
-from tempest.common.utils import data_utils
-from tempest.common.utils.linux import remote_client
-from tempest import config
-from tempest import exceptions
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-from tempest.thirdparty.boto.utils import wait
-
-CONF = config.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-class InstanceRunTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(InstanceRunTest, cls).setup_clients()
- cls.s3_client = cls.os.s3_client
- cls.ec2_client = cls.os.ec2api_client
-
- @classmethod
- def resource_setup(cls):
- super(InstanceRunTest, cls).resource_setup()
- if not cls.conclusion['A_I_IMAGES_READY']:
- raise cls.skipException("".join(("EC2 ", cls.__name__,
- ": requires ami/aki/ari manifest")))
- cls.zone = CONF.boto.aws_zone
- cls.materials_path = CONF.boto.s3_materials_path
- ami_manifest = CONF.boto.ami_manifest
- aki_manifest = CONF.boto.aki_manifest
- ari_manifest = CONF.boto.ari_manifest
- cls.instance_type = CONF.boto.instance_type
- cls.bucket_name = data_utils.rand_name("s3bucket")
- cls.keypair_name = data_utils.rand_name("keypair")
- cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
- cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
- cls.keypair_name)
- bucket = cls.s3_client.create_bucket(cls.bucket_name)
- cls.addResourceCleanUp(cls.destroy_bucket,
- cls.s3_client.connection_data,
- cls.bucket_name)
- s3.s3_upload_dir(bucket, cls.materials_path)
- cls.images = {"ami":
- {"name": data_utils.rand_name("ami-name"),
- "location": cls.bucket_name + "/" + ami_manifest},
- "aki":
- {"name": data_utils.rand_name("aki-name"),
- "location": cls.bucket_name + "/" + aki_manifest},
- "ari":
- {"name": data_utils.rand_name("ari-name"),
- "location": cls.bucket_name + "/" + ari_manifest}}
- for image_type in ("aki", "ari"):
- image = cls.images[image_type]
- image["image_id"] = cls.ec2_client.register_image(
- name=image["name"],
- image_location=image["location"])
- cls.addResourceCleanUp(cls.ec2_client.deregister_image,
- image["image_id"])
- image = cls.images["ami"]
- image["image_id"] = cls.ec2_client.register_image(
- name=image["name"],
- image_location=image["location"],
- kernel_id=cls.images["aki"]["image_id"],
- ramdisk_id=cls.images["ari"]["image_id"])
- cls.addResourceCleanUp(cls.ec2_client.deregister_image,
- image["image_id"])
-
- for image in cls.images.itervalues():
- def _state():
- retr = cls.ec2_client.get_image(image["image_id"])
- return retr.state
- state = wait.state_wait(_state, "available")
- if state != "available":
- for _image in cls.images.itervalues():
- cls.ec2_client.deregister_image(_image["image_id"])
- raise exceptions.EC2RegisterImageException(
- image_id=image["image_id"])
-
- def _terminate_reservation(self, reservation, rcuk):
- for instance in reservation.instances:
- instance.terminate()
- for instance in reservation.instances:
- self.assertInstanceStateWait(instance, '_GONE')
- self.cancelResourceCleanUp(rcuk)
-
- @test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
- def test_run_idempotent_instances(self):
- # EC2 run instances idempotently
-
- def _run_instance(client_token):
- reservation = self.ec2_client.run_instances(
- image_id=self.images["ami"]["image_id"],
- kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type,
- client_token=client_token)
- rcuk = self.addResourceCleanUp(self.destroy_reservation,
- reservation)
- return (reservation, rcuk)
-
- reservation_1, rcuk_1 = _run_instance('token_1')
- reservation_2, rcuk_2 = _run_instance('token_2')
- reservation_1a, rcuk_1a = _run_instance('token_1')
-
- self.assertIsNotNone(reservation_1)
- self.assertIsNotNone(reservation_2)
- self.assertIsNotNone(reservation_1a)
-
- # same reservation for token_1
- self.assertEqual(reservation_1.id, reservation_1a.id)
-
- # Cancel cleanup -- since it's a duplicate, it's
- # handled by rcuk1
- self.cancelResourceCleanUp(rcuk_1a)
-
- self._terminate_reservation(reservation_1, rcuk_1)
- self._terminate_reservation(reservation_2, rcuk_2)
-
- @test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
- def test_run_stop_terminate_instance(self):
- # EC2 run, stop and terminate instance
- image_ami = self.ec2_client.get_image(self.images["ami"]
- ["image_id"])
- reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type)
- rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
- for instance in reservation.instances:
- LOG.info("state: %s", instance.state)
- if instance.state != "running":
- self.assertInstanceStateWait(instance, "running")
-
- for instance in reservation.instances:
- instance.stop()
- LOG.info("state: %s", instance.state)
- if instance.state != "stopped":
- self.assertInstanceStateWait(instance, "stopped")
-
- self._terminate_reservation(reservation, rcuk)
-
- @test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
- def test_run_stop_terminate_instance_with_tags(self):
- # EC2 run, stop and terminate instance with tags
- image_ami = self.ec2_client.get_image(self.images["ami"]
- ["image_id"])
- reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type)
- rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
-
- for instance in reservation.instances:
- LOG.info("state: %s", instance.state)
- if instance.state != "running":
- self.assertInstanceStateWait(instance, "running")
- instance.add_tag('key1', value='value1')
-
- tags = self.ec2_client.get_all_tags()
- td = dict((item.name, item.value) for item in tags)
-
- self.assertIn('key1', td)
- self.assertEqual('value1', td['key1'])
-
- tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
- td = dict((item.name, item.value) for item in tags)
- self.assertIn('key1', td)
- self.assertEqual('value1', td['key1'])
-
- tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
- td = dict((item.name, item.value) for item in tags)
- self.assertIn('key1', td)
- self.assertEqual('value1', td['key1'])
-
- tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
- td = dict((item.name, item.value) for item in tags)
- self.assertNotIn('key1', td)
-
- for instance in reservation.instances:
- instance.remove_tag('key1', value='value1')
-
- tags = self.ec2_client.get_all_tags()
-
- # NOTE: Volume-attach and detach causes metadata (tags) to be created
- # for the volume. So exclude them while asserting.
- self.assertNotIn('key1', tags)
-
- for instance in reservation.instances:
- instance.stop()
- LOG.info("state: %s", instance.state)
- if instance.state != "stopped":
- self.assertInstanceStateWait(instance, "stopped")
-
- self._terminate_reservation(reservation, rcuk)
-
- @test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
- def test_run_terminate_instance(self):
- # EC2 run, terminate immediately
- image_ami = self.ec2_client.get_image(self.images["ami"]
- ["image_id"])
- reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type)
-
- for instance in reservation.instances:
- instance.terminate()
- self.assertInstanceStateWait(instance, '_GONE')
-
- @test.idempotent_id('ab836c29-737b-4101-9fb9-87045eaf89e9')
- def test_compute_with_volumes(self):
- # EC2 1. integration test (not strict)
- image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
- sec_group_name = data_utils.rand_name("securitygroup")
- group_desc = sec_group_name + " security group description "
- security_group = self.ec2_client.create_security_group(sec_group_name,
- group_desc)
- self.addResourceCleanUp(self.destroy_security_group_wait,
- security_group)
- self.assertTrue(
- self.ec2_client.authorize_security_group(
- sec_group_name,
- ip_protocol="icmp",
- cidr_ip="0.0.0.0/0",
- from_port=-1,
- to_port=-1))
- self.assertTrue(
- self.ec2_client.authorize_security_group(
- sec_group_name,
- ip_protocol="tcp",
- cidr_ip="0.0.0.0/0",
- from_port=22,
- to_port=22))
- reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type,
- key_name=self.keypair_name,
- security_groups=(sec_group_name,))
-
- LOG.debug("Instance booted - state: %s",
- reservation.instances[0].state)
-
- self.addResourceCleanUp(self.destroy_reservation,
- reservation)
- volume = self.ec2_client.create_volume(CONF.volume.volume_size,
- self.zone)
- LOG.debug("Volume created - status: %s", volume.status)
-
- self.addResourceCleanUp(self.destroy_volume_wait, volume)
- instance = reservation.instances[0]
- if instance.state != "running":
- self.assertInstanceStateWait(instance, "running")
- LOG.debug("Instance now running - state: %s", instance.state)
-
- address = self.ec2_client.allocate_address()
- rcuk_a = self.addResourceCleanUp(address.delete)
- self.assertTrue(address.associate(instance.id))
-
- rcuk_da = self.addResourceCleanUp(address.disassociate)
- # TODO(afazekas): ping test. dependecy/permission ?
-
- self.assertVolumeStatusWait(volume, "available")
- # NOTE(afazekas): it may be reports available before it is available
-
- ssh = remote_client.RemoteClient(address.public_ip,
- CONF.compute.ssh_user,
- pkey=self.keypair.material)
- text = data_utils.rand_name("Pattern text for console output")
- try:
- resp = ssh.write_to_console(text)
- except Exception:
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- else:
- console_output = instance.get_console_output().output
- LOG.debug('Console output for %s\nbody=\n%s',
- instance.id, console_output)
- raise
-
- self.assertFalse(resp)
-
- def _output():
- output = instance.get_console_output()
- return output.output
-
- wait.re_search_wait(_output, text)
- part_lines = ssh.get_partitions().split('\n')
- volume.attach(instance.id, "/dev/vdh")
-
- def _volume_state():
- """Return volume state realizing that 'in-use' is overloaded."""
- volume.update(validate=True)
- status = volume.status
- attached = volume.attach_data.status
- LOG.debug("Volume %s is in status: %s, attach_status: %s",
- volume.id, status, attached)
- # Nova reports 'in-use' on 'attaching' volumes because we
- # have a single volume status, and EC2 has 2. Ensure that
- # if we aren't attached yet we return something other than
- # 'in-use'
- if status == 'in-use' and attached != 'attached':
- return 'attaching'
- else:
- return status
-
- wait.re_search_wait(_volume_state, "in-use")
-
- # NOTE(afazekas): Different Hypervisor backends names
- # differently the devices,
- # now we just test is the partition number increased/decrised
-
- def _part_state():
- current = ssh.get_partitions().split('\n')
- LOG.debug("Partition map for instance: %s", current)
- if current > part_lines:
- return 'INCREASE'
- if current < part_lines:
- return 'DECREASE'
- return 'EQUAL'
-
- wait.state_wait(_part_state, 'INCREASE')
- part_lines = ssh.get_partitions().split('\n')
-
- # TODO(afazekas): Resource compare to the flavor settings
-
- volume.detach()
-
- self.assertVolumeStatusWait(volume, "available")
-
- wait.state_wait(_part_state, 'DECREASE')
-
- instance.stop()
- address.disassociate()
- self.assertAddressDisassociatedWait(address)
- self.cancelResourceCleanUp(rcuk_da)
- address.release()
- self.assertAddressReleasedWait(address)
- self.cancelResourceCleanUp(rcuk_a)
-
- LOG.debug("Instance %s state: %s", instance.id, instance.state)
- if instance.state != "stopped":
- self.assertInstanceStateWait(instance, "stopped")
- # TODO(afazekas): move steps from teardown to the test case
-
-
-# TODO(afazekas): Snapshot/volume read/write test case
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
deleted file mode 100644
index 1b58cb4..0000000
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-def compare_key_pairs(a, b):
- return (a.name == b.name and
- a.fingerprint == b.fingerprint)
-
-
-class EC2KeysTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(EC2KeysTest, cls).setup_clients()
- cls.client = cls.os.ec2api_client
-
- @classmethod
- def resource_setup(cls):
- super(EC2KeysTest, cls).resource_setup()
- cls.ec = cls.ec2_error_code
-
-# TODO(afazekas): merge create, delete, get test cases
- @test.idempotent_id('54236804-01b7-4cfe-a6f9-bce1340feec8')
- def test_create_ec2_keypair(self):
- # EC2 create KeyPair
- key_name = data_utils.rand_name("keypair")
- self.addResourceCleanUp(self.client.delete_key_pair, key_name)
- keypair = self.client.create_key_pair(key_name)
- self.assertTrue(compare_key_pairs(keypair,
- self.client.get_key_pair(key_name)))
-
- @test.idempotent_id('3283b898-f90c-4952-b238-3e42b8c3f34f')
- def test_delete_ec2_keypair(self):
- # EC2 delete KeyPair
- key_name = data_utils.rand_name("keypair")
- self.client.create_key_pair(key_name)
- self.client.delete_key_pair(key_name)
- self.assertIsNone(self.client.get_key_pair(key_name))
-
- @test.idempotent_id('fd89bd26-4d4d-4cf3-a303-65dd9158fcdc')
- def test_get_ec2_keypair(self):
- # EC2 get KeyPair
- key_name = data_utils.rand_name("keypair")
- self.addResourceCleanUp(self.client.delete_key_pair, key_name)
- keypair = self.client.create_key_pair(key_name)
- self.assertTrue(compare_key_pairs(keypair,
- self.client.get_key_pair(key_name)))
-
- @test.idempotent_id('daa73da1-e11c-4558-8d76-a716be79a401')
- def test_duplicate_ec2_keypair(self):
- # EC2 duplicate KeyPair
- key_name = data_utils.rand_name("keypair")
- self.addResourceCleanUp(self.client.delete_key_pair, key_name)
- keypair = self.client.create_key_pair(key_name)
- self.assertBotoError(self.ec.client.InvalidKeyPair.Duplicate,
- self.client.create_key_pair,
- key_name)
- self.assertTrue(compare_key_pairs(keypair,
- self.client.get_key_pair(key_name)))
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
deleted file mode 100644
index 594dc8b..0000000
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class EC2SecurityGroupTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(EC2SecurityGroupTest, cls).setup_clients()
- cls.client = cls.os.ec2api_client
-
- @test.idempotent_id('519b566e-0c38-4629-905e-7d6b6355f524')
- def test_create_authorize_security_group(self):
- # EC2 Create, authorize/revoke security group
- group_name = data_utils.rand_name("securty_group")
- group_description = group_name + " security group description "
- group = self.client.create_security_group(group_name,
- group_description)
- self.addResourceCleanUp(self.client.delete_security_group, group_name)
- groups_get = self.client.get_all_security_groups(
- groupnames=(group_name,))
- self.assertEqual(len(groups_get), 1)
- group_get = groups_get[0]
- self.assertEqual(group.name, group_get.name)
- self.assertEqual(group.name, group_get.name)
- # ping (icmp_echo) and other icmp allowed from everywhere
- # from_port and to_port act as icmp type
- success = self.client.authorize_security_group(group_name,
- ip_protocol="icmp",
- cidr_ip="0.0.0.0/0",
- from_port=-1,
- to_port=-1)
- self.assertTrue(success)
- # allow standard ssh port from anywhere
- success = self.client.authorize_security_group(group_name,
- ip_protocol="tcp",
- cidr_ip="0.0.0.0/0",
- from_port=22,
- to_port=22)
- self.assertTrue(success)
- # TODO(afazekas): Duplicate tests
- group_get = self.client.get_all_security_groups(
- groupnames=(group_name,))[0]
- # remove listed rules
- for ip_permission in group_get.rules:
- for cidr in ip_permission.grants:
- self.assertTrue(self.client.revoke_security_group(group_name,
- ip_protocol=ip_permission.ip_protocol,
- cidr_ip=cidr,
- from_port=ip_permission.from_port,
- to_port=ip_permission.to_port))
-
- group_get = self.client.get_all_security_groups(
- groupnames=(group_name,))[0]
- # all rules should be removed now
- self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
deleted file mode 100644
index 483d4c3..0000000
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def compare_volumes(a, b):
- return (a.id == b.id and
- a.size == b.size)
-
-
-class EC2VolumesTest(boto_test.BotoTestCase):
-
- @classmethod
- def skip_checks(cls):
- super(EC2VolumesTest, cls).skip_checks()
- if not CONF.service_available.cinder:
- skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
- raise cls.skipException(skip_msg)
-
- @classmethod
- def setup_clients(cls):
- super(EC2VolumesTest, cls).setup_clients()
- cls.client = cls.os.ec2api_client
-
- @classmethod
- def resource_setup(cls):
- super(EC2VolumesTest, cls).resource_setup()
- cls.zone = CONF.boto.aws_zone
-
- @test.idempotent_id('663f0077-c743-48ad-8ae0-46821cbc0918')
- def test_create_get_delete(self):
- # EC2 Create, get, delete Volume
- volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
- cuk = self.addResourceCleanUp(self.client.delete_volume, volume.id)
- self.assertIn(volume.status, self.valid_volume_status)
- retrieved = self.client.get_all_volumes((volume.id,))
- self.assertEqual(1, len(retrieved))
- self.assertTrue(compare_volumes(volume, retrieved[0]))
- self.assertVolumeStatusWait(volume, "available")
- self.client.delete_volume(volume.id)
- self.cancelResourceCleanUp(cuk)
-
- @test.idempotent_id('c6b60d7a-1af7-4f8e-af21-d539d9496149')
- def test_create_volume_from_snapshot(self):
- # EC2 Create volume from snapshot
- volume = self.client.create_volume(CONF.volume.volume_size, self.zone)
- self.addResourceCleanUp(self.client.delete_volume, volume.id)
- self.assertVolumeStatusWait(volume, "available")
- snap = self.client.create_snapshot(volume.id)
- self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
- self.assertSnapshotStatusWait(snap, "completed")
-
- svol = self.client.create_volume(CONF.volume.volume_size, self.zone,
- snapshot=snap)
- cuk = self.addResourceCleanUp(svol.delete)
- self.assertVolumeStatusWait(svol, "available")
- svol.delete()
- self.cancelResourceCleanUp(cuk)
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
deleted file mode 100644
index f008973..0000000
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(S3BucketsTest, cls).setup_clients()
- cls.client = cls.os.s3_client
-
- @test.idempotent_id('4678525d-8da0-4518-81c1-f1f67d595b00')
- def test_create_and_get_delete_bucket(self):
- # S3 Create, get and delete bucket
- bucket_name = data_utils.rand_name("s3bucket")
- cleanup_key = self.addResourceCleanUp(self.client.delete_bucket,
- bucket_name)
- bucket = self.client.create_bucket(bucket_name)
- self.assertTrue(bucket.name == bucket_name)
- bucket = self.client.get_bucket(bucket_name)
- self.assertTrue(bucket.name == bucket_name)
- self.client.delete_bucket(bucket_name)
- self.assertBotoError(self.s3_error_code.client.NoSuchBucket,
- self.client.get_bucket, bucket_name)
- self.cancelResourceCleanUp(cleanup_key)
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
deleted file mode 100644
index c41c7ac..0000000
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from tempest.common.utils import data_utils
-from tempest import config
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-from tempest.thirdparty.boto.utils import s3
-
-CONF = config.CONF
-
-
-class S3ImagesTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(S3ImagesTest, cls).setup_clients()
- cls.s3_client = cls.os.s3_client
- cls.images_client = cls.os.ec2api_client
-
- @classmethod
- def resource_setup(cls):
- super(S3ImagesTest, cls).resource_setup()
- if not cls.conclusion['A_I_IMAGES_READY']:
- raise cls.skipException("".join(("EC2 ", cls.__name__,
- ": requires ami/aki/ari manifest")))
- cls.materials_path = CONF.boto.s3_materials_path
- cls.ami_manifest = CONF.boto.ami_manifest
- cls.aki_manifest = CONF.boto.aki_manifest
- cls.ari_manifest = CONF.boto.ari_manifest
- cls.ami_path = cls.materials_path + os.sep + cls.ami_manifest
- cls.aki_path = cls.materials_path + os.sep + cls.aki_manifest
- cls.ari_path = cls.materials_path + os.sep + cls.ari_manifest
- cls.bucket_name = data_utils.rand_name("bucket")
- bucket = cls.s3_client.create_bucket(cls.bucket_name)
- cls.addResourceCleanUp(cls.destroy_bucket,
- cls.s3_client.connection_data,
- cls.bucket_name)
- s3.s3_upload_dir(bucket, cls.materials_path)
-
- @test.idempotent_id('f9d360a5-0188-4c77-9db2-4c34c28d12a5')
- def test_register_get_deregister_ami_image(self):
- # Register and deregister ami image
- image = {"name": data_utils.rand_name("ami-name"),
- "location": self.bucket_name + "/" + self.ami_manifest,
- "type": "ami"}
- image["image_id"] = self.images_client.register_image(
- name=image["name"],
- image_location=image["location"])
- # NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
- image["cleanUp"] = self.addResourceCleanUp(
- self.images_client.deregister_image,
- image["image_id"])
- self.assertEqual(image["image_id"][0:3], image["type"])
- retrieved_image = self.images_client.get_image(image["image_id"])
- self.assertTrue(retrieved_image.name == image["name"])
- self.assertTrue(retrieved_image.id == image["image_id"])
- if retrieved_image.state != "available":
- self.assertImageStateWait(retrieved_image, "available")
- self.images_client.deregister_image(image["image_id"])
- self.assertNotIn(image["image_id"], str(
- self.images_client.get_all_images()))
- self.cancelResourceCleanUp(image["cleanUp"])
-
- @test.idempotent_id('42cca5b0-453b-4618-b99f-dbc039db426f')
- def test_register_get_deregister_aki_image(self):
- # Register and deregister aki image
- image = {"name": data_utils.rand_name("aki-name"),
- "location": self.bucket_name + "/" + self.aki_manifest,
- "type": "aki"}
- image["image_id"] = self.images_client.register_image(
- name=image["name"],
- image_location=image["location"])
- image["cleanUp"] = self.addResourceCleanUp(
- self.images_client.deregister_image,
- image["image_id"])
- self.assertEqual(image["image_id"][0:3], image["type"])
- retrieved_image = self.images_client.get_image(image["image_id"])
- self.assertTrue(retrieved_image.name == image["name"])
- self.assertTrue(retrieved_image.id == image["image_id"])
- self.assertIn(retrieved_image.state, self.valid_image_state)
- if retrieved_image.state != "available":
- self.assertImageStateWait(retrieved_image, "available")
- self.images_client.deregister_image(image["image_id"])
- self.assertNotIn(image["image_id"], str(
- self.images_client.get_all_images()))
- self.cancelResourceCleanUp(image["cleanUp"])
-
- @test.idempotent_id('1359e860-841c-43bb-80f3-bb389cbfd81d')
- def test_register_get_deregister_ari_image(self):
- # Register and deregister ari image
- image = {"name": data_utils.rand_name("ari-name"),
- "location": "/" + self.bucket_name + "/" + self.ari_manifest,
- "type": "ari"}
- image["image_id"] = self.images_client.register_image(
- name=image["name"],
- image_location=image["location"])
- image["cleanUp"] = self.addResourceCleanUp(
- self.images_client.deregister_image,
- image["image_id"])
- self.assertEqual(image["image_id"][0:3], image["type"])
- retrieved_image = self.images_client.get_image(image["image_id"])
- self.assertIn(retrieved_image.state, self.valid_image_state)
- if retrieved_image.state != "available":
- self.assertImageStateWait(retrieved_image, "available")
- self.assertIn(retrieved_image.state, self.valid_image_state)
- self.assertTrue(retrieved_image.name == image["name"])
- self.assertTrue(retrieved_image.id == image["image_id"])
- self.images_client.deregister_image(image["image_id"])
- self.cancelResourceCleanUp(image["cleanUp"])
-
-# TODO(afazekas): less copy-paste style
diff --git a/tempest/thirdparty/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
deleted file mode 100644
index c42d85c..0000000
--- a/tempest/thirdparty/boto/test_s3_objects.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import boto.s3.key
-
-from tempest.common.utils import data_utils
-from tempest import test
-from tempest.thirdparty.boto import test as boto_test
-
-
-class S3BucketsTest(boto_test.BotoTestCase):
-
- @classmethod
- def setup_clients(cls):
- super(S3BucketsTest, cls).setup_clients()
- cls.client = cls.os.s3_client
-
- @test.idempotent_id('4eea567a-b46a-405b-a475-6097e1faebde')
- def test_create_get_delete_object(self):
- # S3 Create, get and delete object
- bucket_name = data_utils.rand_name("s3bucket")
- object_name = data_utils.rand_name("s3object")
- content = 'x' * 42
- bucket = self.client.create_bucket(bucket_name)
- self.addResourceCleanUp(self.destroy_bucket,
- self.client.connection_data,
- bucket_name)
-
- self.assertTrue(bucket.name == bucket_name)
- with contextlib.closing(boto.s3.key.Key(bucket)) as key:
- key.key = object_name
- key.set_contents_from_string(content)
- readback = key.get_contents_as_string()
- self.assertTrue(readback == content)
- bucket.delete_key(key)
- self.assertBotoError(self.s3_error_code.client.NoSuchKey,
- key.get_contents_as_string)
diff --git a/tempest/thirdparty/boto/utils/__init__.py b/tempest/thirdparty/boto/utils/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/thirdparty/boto/utils/__init__.py
+++ /dev/null
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
deleted file mode 100644
index 55c1b0a..0000000
--- a/tempest/thirdparty/boto/utils/s3.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import os
-import re
-
-import boto
-import boto.s3.key
-
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def s3_upload_dir(bucket, path, prefix="", connection_data=None):
- if isinstance(bucket, basestring):
- with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
- bucket = conn.lookup(bucket)
- for root, dirs, files in os.walk(path):
- for fil in files:
- with contextlib.closing(boto.s3.key.Key(bucket)) as key:
- source = root + os.sep + fil
- target = re.sub("^" + re.escape(path) + "?/", prefix, source)
- if os.sep != '/':
- target = re.sub(re.escape(os.sep), '/', target)
- key.key = target
- LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
- key.set_contents_from_filename(source)
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
deleted file mode 100644
index 8771ed7..0000000
--- a/tempest/thirdparty/boto/utils/wait.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import time
-
-import boto.exception
-from oslo_log import log as logging
-import testtools
-
-from tempest import config
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-def state_wait(lfunction, final_set=set(), valid_set=None):
- # TODO(afazekas): evaluate using ABC here
- if not isinstance(final_set, set):
- final_set = set((final_set,))
- if not isinstance(valid_set, set) and valid_set is not None:
- valid_set = set((valid_set,))
- start_time = time.time()
- old_status = status = lfunction()
- while True:
- if status != old_status:
- LOG.info('State transition "%s" ==> "%s" %d second', old_status,
- status, time.time() - start_time)
- if status in final_set:
- return status
- if valid_set is not None and status not in valid_set:
- return status
- dtime = time.time() - start_time
- if dtime > CONF.boto.build_timeout:
- raise testtools.TestCase\
- .failureException("State change timeout exceeded!"
- '(%ds) While waiting'
- 'for %s at "%s"' %
- (dtime, final_set, status))
- time.sleep(CONF.boto.build_interval)
- old_status = status
- status = lfunction()
-
-
-def re_search_wait(lfunction, regexp):
- """Stops waiting on success."""
- start_time = time.time()
- while True:
- text = lfunction()
- result = re.search(regexp, text)
- if result is not None:
- LOG.info('Pattern "%s" found in %d second in "%s"',
- regexp,
- time.time() - start_time,
- text)
- return result
- dtime = time.time() - start_time
- if dtime > CONF.boto.build_timeout:
- raise testtools.TestCase\
- .failureException('Pattern find timeout exceeded!'
- '(%ds) While waiting for'
- '"%s" pattern in "%s"' %
- (dtime, regexp, text))
- time.sleep(CONF.boto.build_interval)
-
-
-def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
- """Stops waiting on success."""
- start_time = time.time()
- if exc_matcher is not None:
- exc_class = boto.exception.BotoServerError
-
- if exc_class is None:
- exc_class = BaseException
- while True:
- result = None
- try:
- result = lfunction()
- LOG.info('No Exception in %d second',
- time.time() - start_time)
- return result
- except exc_class as exc:
- if exc_matcher is not None:
- res = exc_matcher.match(exc)
- if res is not None:
- LOG.info(res)
- raise exc
- # Let the other exceptions propagate
- dtime = time.time() - start_time
- if dtime > CONF.boto.build_timeout:
- raise testtools.TestCase\
- .failureException("Wait timeout exceeded! (%ds)" % dtime)
- time.sleep(CONF.boto.build_interval)
-
-
-# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
-def wait_exception(lfunction):
- """Returns with the exception or raises one."""
- start_time = time.time()
- while True:
- try:
- lfunction()
- except BaseException as exc:
- LOG.info('Exception in %d second',
- time.time() - start_time)
- return exc
- dtime = time.time() - start_time
- if dtime > CONF.boto.build_timeout:
- raise testtools.TestCase\
- .failureException("Wait timeout exceeded! (%ds)" % dtime)
- time.sleep(CONF.boto.build_interval)
-
-# TODO(afazekas): consider strategy design pattern..
diff --git a/test-requirements.txt b/test-requirements.txt
index 5b01ea9..eb43f31 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,10 +3,10 @@
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.0
# needed for doc build
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
-python-subunit>=0.0.18
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
-mox>=0.5.3
-mock>=1.2
-coverage>=3.6
+mox>=0.5.3 # Apache-2.0
+mock>=1.2 # BSD
+coverage>=3.6 # Apache-2.0
oslotest>=1.10.0 # Apache-2.0
diff --git a/tools/check_logs.py b/tools/check_logs.py
index c8d3a1a..fa7129d 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -19,7 +19,7 @@
import gzip
import os
import re
-import StringIO
+import six
import sys
import urllib2
@@ -71,7 +71,7 @@
req = urllib2.Request(url)
req.add_header('Accept-Encoding', 'gzip')
page = urllib2.urlopen(req)
- buf = StringIO.StringIO(page.read())
+ buf = six.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
if scan_content(name, f.read().splitlines(), regexp, whitelist):
logs_with_errors.append(name)
@@ -105,7 +105,7 @@
def main(opts):
if opts.directory and opts.url or not (opts.directory or opts.url):
print("Must provide exactly one of -d or -u")
- exit(1)
+ return 1
print("Checking logs...")
WHITELIST_FILE = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
index 4862d01..49a42fe 100755
--- a/tools/find_stack_traces.py
+++ b/tools/find_stack_traces.py
@@ -18,7 +18,7 @@
import gzip
import pprint
import re
-import StringIO
+import six
import sys
import urllib2
@@ -68,7 +68,7 @@
req = urllib2.Request(url)
req.add_header('Accept-Encoding', 'gzip')
page = urllib2.urlopen(req)
- buf = StringIO.StringIO(page.read())
+ buf = six.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
content = f.read()
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index e279159..d1643f8 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -113,7 +113,7 @@
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
- self.pip_install('pip>=1.4')
+ self.pip_install('pip<8,>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
diff --git a/tox.ini b/tox.ini
index fedd04c..95f2cf1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist = pep8,py34,py27
-minversion = 1.6
+minversion = 2.3.1
skipsdist = True
[tempestenv]
@@ -34,7 +34,7 @@
sitepackages = {[tempestenv]sitepackages}
# 'all' includes slow tests
setenv = {[tempestenv]setenv}
- OS_TEST_TIMEOUT=1200
+ OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
@@ -44,7 +44,7 @@
sitepackages = True
# 'all' includes slow tests
setenv = {[tempestenv]setenv}
- OS_TEST_TIMEOUT=1200
+ OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete