Merge "Use Cinder v2 by default in scenario tests"
diff --git a/.coveragerc b/.coveragerc
index 51482d3..449e62c 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,4 +1,4 @@
[run]
branch = True
source = tempest
-omit = tempest/tests/*,tempest/scenario/test_*.py,tempest/api_schema/*,tempest/api/*
+omit = tempest/tests/*,tempest/scenario/test_*.py,tempest/api/*
diff --git a/README.rst b/README.rst
index f1dac1c..fc4de5e 100644
--- a/README.rst
+++ b/README.rst
@@ -196,18 +196,18 @@
from a remote system running python 2.7. (or deploy a cloud guest in your cloud
that has python 2.7)
-Python 3.4
+Python 3.x
----------
Starting during the Liberty release development cycle work began on enabling
Tempest to run under both Python 2.7 and Python 3.4. Tempest strives to fully
-support running with Python 3.4. A gating unit test job was added to also run
-Tempest's unit tests under Python 3.4. This means that the Tempest code at
-least imports under Python 3.4 and things that have unit test coverage will
-work on Python 3.4. However, because large parts of Tempest are self-verifying
-there might be uncaught issues running on Python 3.4. So until there is a gating
-job which does a full Tempest run using Python 3.4 there isn't any guarantee
-that running Tempest under Python 3.4 is bug free.
+support running with Python 3.4 and newer. A gating unit test job was added to
+also run Tempest's unit tests under Python 3. This means that the Tempest
+code at least imports under Python 3.4 and things that have unit test coverage
+will work on Python 3.4. However, because large parts of Tempest are
+self-verifying there might be uncaught issues running on Python 3. So until
+there is a gating job which does a full Tempest run using Python 3 there
+isn't any guarantee that running Tempest under Python 3 is bug free.
Legacy run method
-----------------
diff --git a/REVIEWING.rst b/REVIEWING.rst
index 676a217..cfe7f4c 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -13,6 +13,13 @@
it. Tests which aren't executed either because of configuration or skips should
not be accepted.
+If a new test is added that depends on a new config option (like a feature
+flag), the commit message must reference a change in DevStack or DevStack-Gate
+that enables the execution of this newly introduced test. This reference could
+either be a `Cross-Repository Dependency <http://docs.openstack.org/infra/
+manual/developers.html#cross-repository-dependencies>`_ or a simple link
+to a Gerrit review.
+
Unit Tests
----------
diff --git a/releasenotes/notes/13.1.0-volume-clients-as-library-309030c7a16e62ab.yaml b/releasenotes/notes/13.1.0-volume-clients-as-library-309030c7a16e62ab.yaml
new file mode 100644
index 0000000..056e199
--- /dev/null
+++ b/releasenotes/notes/13.1.0-volume-clients-as-library-309030c7a16e62ab.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Define volume service clients as libraries.
+ The following volume service clients are defined as library interface,
+ so the other projects can use these modules as stable libraries without
+ any maintenance changes.
+
+ * volumes_client(v1)
+ * volumes_client(v2)
diff --git a/releasenotes/notes/add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml b/releasenotes/notes/add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
new file mode 100644
index 0000000..6f7a411
--- /dev/null
+++ b/releasenotes/notes/add-cred-provider-abstract-class-to-lib-70ff513221f8a871.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - The cred_provider abstract class which serves as the basis for both
+ of tempest's cred providers, pre-provisioned credentials and dynamic
+ credentials, is now a library interface. This provides the common signature
+ required for building a credential provider.
diff --git a/releasenotes/notes/add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml b/releasenotes/notes/add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
new file mode 100644
index 0000000..b2ad199
--- /dev/null
+++ b/releasenotes/notes/add-ssh-port-parameter-to-client-6d16c374ac4456c1.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - A new optional parameter `port` for ssh client (`tempest.lib.common.ssh.Client`)
+ to specify destination port for a host. The default value is 22.
diff --git a/releasenotes/notes/deprecate-nova-api-extensions-df16b02485dae203.yaml b/releasenotes/notes/deprecate-nova-api-extensions-df16b02485dae203.yaml
new file mode 100644
index 0000000..c2d9a9b
--- /dev/null
+++ b/releasenotes/notes/deprecate-nova-api-extensions-df16b02485dae203.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - The *api_extensions* config option in the *compute-feature-enabled* group is
+ now deprecated. This option will be removed from tempest when all the
+ OpenStack releases supported by tempest no longer support the API extensions
+ mechanism. This was removed from Nova during the Newton cycle, so this will
+ be removed at the Mitaka EOL.
diff --git a/releasenotes/notes/remove-sahara-tests-1532c47c7df80e3a.yaml b/releasenotes/notes/remove-sahara-tests-1532c47c7df80e3a.yaml
new file mode 100644
index 0000000..b541cf9
--- /dev/null
+++ b/releasenotes/notes/remove-sahara-tests-1532c47c7df80e3a.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - All tests for the Sahara project have been removed from Tempest. They now
+ live as a Tempest plugin in the ``openstack/sahara-tests`` repository.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 4522a17..140263c 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -275,3 +275,6 @@
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
+
+# -- Options for Internationalization output ------------------------------
+locale_dirs = ['locale/']
diff --git a/requirements.txt b/requirements.txt
index 4af8bb3..fa6c413 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -15,7 +15,6 @@
oslo.utils>=3.16.0 # Apache-2.0
six>=1.9.0 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
-testscenarios>=0.4 # Apache-2.0/BSD
PyYAML>=3.10.0 # MIT
python-subunit>=0.0.18 # Apache-2.0/BSD
stevedore>=1.17.1 # Apache-2.0
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index 456363c..e207aed 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -17,7 +17,7 @@
from tempest.api.compute import base
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 4e9bb88..72d5b18 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -128,8 +128,7 @@
def test_live_block_migration_paused(self):
self._test_live_migration(state='PAUSED')
- @decorators.skip_because(bug="1549511",
- condition=CONF.service_available.neutron)
+ @decorators.skip_because(bug="1524898")
@test.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@test.services('volume')
def test_volume_backed_live_migration(self):
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
deleted file mode 100644
index 83f8e19..0000000
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.api_schema.request.compute.v2 import flavors
-from tempest import config
-from tempest import test
-
-
-CONF = config.CONF
-
-load_tests = test.NegativeAutoTest.load_tests
-
-
-@test.SimpleNegativeAutoTest
-class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
- test.NegativeAutoTest):
- _service = CONF.compute.catalog_type
- _schema = flavors.flavor_list
-
-
-@test.SimpleNegativeAutoTest
-class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
- test.NegativeAutoTest):
- _service = CONF.compute.catalog_type
- _schema = flavors.flavors_details
-
- @classmethod
- def resource_setup(cls):
- super(FlavorDetailsNegativeTestJSON, cls).resource_setup()
- cls.set_resource("flavor", cls.flavor_ref)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 5738293..222bf18 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -41,8 +41,8 @@
@classmethod
def resource_cleanup(cls):
- for i in range(3):
- cls.client.delete_floating_ip(cls.floating_ip_id[i])
+ for f_id in cls.floating_ip_id:
+ cls.client.delete_floating_ip(f_id)
super(FloatingIPDetailsTestJSON, cls).resource_cleanup()
@test.idempotent_id('16db31c3-fb85-40c9-bbe2-8cf7b67ff99f')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 7b978ab..7fd23fc 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -27,26 +27,6 @@
class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
- def tearDown(self):
- """Terminate test instances created after a test is executed."""
- self.server_check_teardown()
- super(ImagesOneServerTestJSON, self).tearDown()
-
- def setUp(self):
- # NOTE(afazekas): Normally we use the same server with all test cases,
- # but if it has an issue, we build a new one
- super(ImagesOneServerTestJSON, self).setUp()
- # Check if the server is in a clean state after test
- try:
- waiters.wait_for_server_status(self.servers_client,
- self.server_id, 'ACTIVE')
- except Exception:
- LOG.exception('server %s timed out to become ACTIVE. rebuilding'
- % self.server_id)
- # Rebuild server if cannot reach the ACTIVE state
- # Usually it means the server had a serious accident
- self.__class__.server_id = self.rebuild_server(self.server_id)
-
@classmethod
def skip_checks(cls):
super(ImagesOneServerTestJSON, cls).skip_checks()
@@ -74,6 +54,18 @@
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
return flavor['disk']
+ @classmethod
+ def _rebuild_server_when_fails(cls, server_id):
+ try:
+ waiters.wait_for_server_status(cls.servers_client,
+ server_id, 'ACTIVE')
+ except Exception:
+ LOG.exception('server %s timed out to become ACTIVE. rebuilding'
+ % server_id)
+ # Rebuild server if cannot reach the ACTIVE state
+ # Usually it means the server had a serious accident
+ cls.server_id = cls.rebuild_server(server_id)
+
@test.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
def test_create_delete_image(self):
@@ -103,6 +95,7 @@
# Verify the image was deleted correctly
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
+ self.addCleanup(self._rebuild_server_when_fails, self.server_id)
@test.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
def test_create_image_specify_multibyte_character_image_name(self):
@@ -116,3 +109,4 @@
body = self.client.create_image(self.server_id, name=utf8_name)
image_id = data_utils.parse_image_id(body.response['location'])
self.addCleanup(self.client.delete_image, image_id)
+ self.addCleanup(self._rebuild_server_when_fails, self.server_id)
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index f340658..a9c2f7a 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -23,7 +23,7 @@
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 28ee739..9077801 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -24,7 +24,6 @@
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
-from tempest import exceptions
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -81,14 +80,19 @@
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
'Change password not available.')
def test_change_server_password(self):
+ # Since this test messes with the password and makes the
+ # server unreachable, it should create its own server
+ newserver = self.create_test_server(
+ validatable=True,
+ wait_until='ACTIVE')
# The server's password should be set to the provided password
new_password = 'Newpass1234'
- self.client.change_password(self.server_id, adminPass=new_password)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ self.client.change_password(newserver['id'], adminPass=new_password)
+ waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
if CONF.validation.run_validation:
# Verify that the user can authenticate with the new password
- server = self.client.show_server(self.server_id)['server']
+ server = self.client.show_server(newserver['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
self.ssh_user,
@@ -326,7 +330,7 @@
elif CONF.image_feature_enabled.api_v2:
glance_client = self.os.image_client_v2
else:
- raise exceptions.InvalidConfiguration(
+ raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
new file mode 100644
index 0000000..b7fa0fe
--- /dev/null
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -0,0 +1,41 @@
+# Copyright 2016 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute import base
+from tempest import config
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+
+CONF = config.CONF
+
+
+class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(AttachVolumeNegativeTest, cls).skip_checks()
+ if not CONF.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
+ def test_delete_attached_volume(self):
+ server = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ path = "/dev/%s" % CONF.compute.volume_device_name
+ self.attach_volume(server, volume, device=path)
+
+ self.assertRaises(lib_exc.BadRequest,
+ self.delete_volume, volume['id'])
diff --git a/tempest/api/data_processing/__init__.py b/tempest/api/data_processing/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api/data_processing/__init__.py
+++ /dev/null
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
deleted file mode 100644
index c8506ae..0000000
--- a/tempest/api/data_processing/base.py
+++ /dev/null
@@ -1,442 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import copy
-
-import six
-
-from tempest import config
-from tempest import exceptions
-from tempest.lib.common.utils import test_utils
-import tempest.test
-
-
-CONF = config.CONF
-
-"""Default templates.
-There should always be at least a master1 and a worker1 node
-group template."""
-BASE_VANILLA_DESC = {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['namenode', 'resourcemanager',
- 'hiveserver']
- },
- 'master2': {
- 'count': 1,
- 'node_processes': ['oozie', 'historyserver',
- 'secondarynamenode']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['datanode', 'nodemanager'],
- 'node_configs': {
- 'MapReduce': {
- 'yarn.app.mapreduce.am.resource.mb': 256,
- 'yarn.app.mapreduce.am.command-opts': '-Xmx256m'
- },
- 'YARN': {
- 'yarn.scheduler.minimum-allocation-mb': 256,
- 'yarn.scheduler.maximum-allocation-mb': 1024,
- 'yarn.nodemanager.vmem-check-enabled': False
- }
- }
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
- }
- }
-}
-
-BASE_SPARK_DESC = {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['namenode', 'master']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['datanode', 'slave']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
- }
- }
-}
-
-BASE_CDH_DESC = {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['CLOUDERA_MANAGER']
- },
- 'master2': {
- 'count': 1,
- 'node_processes': ['HDFS_NAMENODE',
- 'YARN_RESOURCEMANAGER']
- },
- 'master3': {
- 'count': 1,
- 'node_processes': ['OOZIE_SERVER', 'YARN_JOBHISTORY',
- 'HDFS_SECONDARYNAMENODE',
- 'HIVE_METASTORE', 'HIVE_SERVER2']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['YARN_NODEMANAGER', 'HDFS_DATANODE']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs_replication': 1
- }
- }
-}
-
-
-DEFAULT_TEMPLATES = {
- 'vanilla': collections.OrderedDict([
- ('2.6.0', copy.deepcopy(BASE_VANILLA_DESC)),
- ('2.7.1', copy.deepcopy(BASE_VANILLA_DESC)),
- ('1.2.1', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['namenode', 'jobtracker']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['datanode', 'tasktracker'],
- 'node_configs': {
- 'HDFS': {
- 'Data Node Heap Size': 1024
- },
- 'MapReduce': {
- 'Task Tracker Heap Size': 1024
- }
- }
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
- },
- 'MapReduce': {
- 'mapred.map.tasks.speculative.execution': False,
- 'mapred.child.java.opts': '-Xmx500m'
- },
- 'general': {
- 'Enable Swift': False
- }
- }
- })
- ]),
- 'hdp': collections.OrderedDict([
- ('2.0.6', {
- 'NODES': {
- 'master1': {
- 'count': 1,
- 'node_processes': ['NAMENODE', 'SECONDARY_NAMENODE',
- 'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
- 'HISTORYSERVER', 'RESOURCEMANAGER',
- 'GANGLIA_SERVER', 'NAGIOS_SERVER',
- 'OOZIE_SERVER']
- },
- 'worker1': {
- 'count': 1,
- 'node_processes': ['HDFS_CLIENT', 'DATANODE',
- 'YARN_CLIENT', 'ZOOKEEPER_CLIENT',
- 'MAPREDUCE2_CLIENT', 'NODEMANAGER',
- 'PIG', 'OOZIE_CLIENT']
- }
- },
- 'cluster_configs': {
- 'HDFS': {
- 'dfs.replication': 1
- }
- }
- })
- ]),
- 'spark': collections.OrderedDict([
- ('1.0.0', copy.deepcopy(BASE_SPARK_DESC)),
- ('1.3.1', copy.deepcopy(BASE_SPARK_DESC))
- ]),
- 'cdh': collections.OrderedDict([
- ('5.4.0', copy.deepcopy(BASE_CDH_DESC)),
- ('5.3.0', copy.deepcopy(BASE_CDH_DESC)),
- ('5', copy.deepcopy(BASE_CDH_DESC))
- ]),
-}
-
-
-class BaseDataProcessingTest(tempest.test.BaseTestCase):
-
- credentials = ['primary']
-
- @classmethod
- def skip_checks(cls):
- super(BaseDataProcessingTest, cls).skip_checks()
- if not CONF.service_available.sahara:
- raise cls.skipException('Sahara support is required')
- cls.default_plugin = cls._get_default_plugin()
-
- @classmethod
- def setup_clients(cls):
- super(BaseDataProcessingTest, cls).setup_clients()
- cls.client = cls.os.data_processing_client
-
- @classmethod
- def resource_setup(cls):
- super(BaseDataProcessingTest, cls).resource_setup()
-
- cls.default_version = cls._get_default_version()
- if cls.default_plugin is not None and cls.default_version is None:
- raise exceptions.InvalidConfiguration(
- message="No known Sahara plugin version was found")
- cls.flavor_ref = CONF.compute.flavor_ref
-
- # add lists for watched resources
- cls._node_group_templates = []
- cls._cluster_templates = []
- cls._data_sources = []
- cls._job_binary_internals = []
- cls._job_binaries = []
- cls._jobs = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
- cls.client.delete_cluster_template)
- cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
- cls.client.delete_node_group_template)
- cls.cleanup_resources(getattr(cls, '_jobs', []), cls.client.delete_job)
- cls.cleanup_resources(getattr(cls, '_job_binaries', []),
- cls.client.delete_job_binary)
- cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
- cls.client.delete_job_binary_internal)
- cls.cleanup_resources(getattr(cls, '_data_sources', []),
- cls.client.delete_data_source)
- super(BaseDataProcessingTest, cls).resource_cleanup()
-
- @staticmethod
- def cleanup_resources(resource_id_list, method):
- for resource_id in resource_id_list:
- test_utils.call_and_ignore_notfound_exc(method, resource_id)
-
- @classmethod
- def create_node_group_template(cls, name, plugin_name, hadoop_version,
- node_processes, flavor_id,
- node_configs=None, **kwargs):
- """Creates watched node group template with specified params.
-
- It supports passing additional params using kwargs and returns created
- object. All resources created in this method will be automatically
- removed in tearDownClass method.
- """
- resp_body = cls.client.create_node_group_template(name, plugin_name,
- hadoop_version,
- node_processes,
- flavor_id,
- node_configs,
- **kwargs)
- resp_body = resp_body['node_group_template']
- # store id of created node group template
- cls._node_group_templates.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def create_cluster_template(cls, name, plugin_name, hadoop_version,
- node_groups, cluster_configs=None, **kwargs):
- """Creates watched cluster template with specified params.
-
- It supports passing additional params using kwargs and returns created
- object. All resources created in this method will be automatically
- removed in tearDownClass method.
- """
- resp_body = cls.client.create_cluster_template(name, plugin_name,
- hadoop_version,
- node_groups,
- cluster_configs,
- **kwargs)
- resp_body = resp_body['cluster_template']
- # store id of created cluster template
- cls._cluster_templates.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def create_data_source(cls, name, type, url, **kwargs):
- """Creates watched data source with specified params.
-
- It supports passing additional params using kwargs and returns created
- object. All resources created in this method will be automatically
- removed in tearDownClass method.
- """
- resp_body = cls.client.create_data_source(name, type, url, **kwargs)
- resp_body = resp_body['data_source']
- # store id of created data source
- cls._data_sources.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def create_job_binary_internal(cls, name, data):
- """Creates watched job binary internal with specified params.
-
- It returns created object. All resources created in this method will
- be automatically removed in tearDownClass method.
- """
- resp_body = cls.client.create_job_binary_internal(name, data)
- resp_body = resp_body['job_binary_internal']
- # store id of created job binary internal
- cls._job_binary_internals.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def create_job_binary(cls, name, url, extra=None, **kwargs):
- """Creates watched job binary with specified params.
-
- It supports passing additional params using kwargs and returns created
- object. All resources created in this method will be automatically
- removed in tearDownClass method.
- """
- resp_body = cls.client.create_job_binary(name, url, extra, **kwargs)
- resp_body = resp_body['job_binary']
- # store id of created job binary
- cls._job_binaries.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def create_job(cls, name, job_type, mains, libs=None, **kwargs):
- """Creates watched job with specified params.
-
- It supports passing additional params using kwargs and returns created
- object. All resources created in this method will be automatically
- removed in tearDownClass method.
- """
- resp_body = cls.client.create_job(name,
- job_type, mains, libs, **kwargs)
- resp_body = resp_body['job']
- # store id of created job
- cls._jobs.append(resp_body['id'])
-
- return resp_body
-
- @classmethod
- def _get_default_plugin(cls):
- """Returns the default plugin used for testing."""
- if len(CONF.data_processing_feature_enabled.plugins) == 0:
- return None
-
- for plugin in CONF.data_processing_feature_enabled.plugins:
- if plugin in DEFAULT_TEMPLATES:
- break
- else:
- plugin = ''
- return plugin
-
- @classmethod
- def _get_default_version(cls):
- """Returns the default plugin version used for testing.
-
- This is gathered separately from the plugin to allow
- the usage of plugin name in skip_checks. This method is
- rather invoked into resource_setup, which allows API calls
- and exceptions.
- """
- if not cls.default_plugin:
- return None
- plugin = cls.client.get_plugin(cls.default_plugin)['plugin']
-
- for version in DEFAULT_TEMPLATES[cls.default_plugin].keys():
- if version in plugin['versions']:
- break
- else:
- version = None
-
- return version
-
- @classmethod
- def get_node_group_template(cls, nodegroup='worker1'):
- """Returns a node group template for the default plugin."""
- try:
- plugin_data = (
- DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
- )
- nodegroup_data = plugin_data['NODES'][nodegroup]
- node_group_template = {
- 'description': 'Test node group template',
- 'plugin_name': cls.default_plugin,
- 'hadoop_version': cls.default_version,
- 'node_processes': nodegroup_data['node_processes'],
- 'flavor_id': cls.flavor_ref,
- 'node_configs': nodegroup_data.get('node_configs', {}),
- }
- return node_group_template
- except (IndexError, KeyError):
- return None
-
- @classmethod
- def get_cluster_template(cls, node_group_template_ids=None):
- """Returns a cluster template for the default plugin.
-
- node_group_template_defined contains the type and ID of pre-defined
- node group templates that have to be used in the cluster template
- (instead of dynamically defining them with 'node_processes').
- """
- if node_group_template_ids is None:
- node_group_template_ids = {}
- try:
- plugin_data = (
- DEFAULT_TEMPLATES[cls.default_plugin][cls.default_version]
- )
-
- all_node_groups = []
- for ng_name, ng_data in six.iteritems(plugin_data['NODES']):
- node_group = {
- 'name': '%s-node' % (ng_name),
- 'flavor_id': cls.flavor_ref,
- 'count': ng_data['count']
- }
- if ng_name in node_group_template_ids.keys():
- # node group already defined, use it
- node_group['node_group_template_id'] = (
- node_group_template_ids[ng_name]
- )
- else:
- # node_processes list defined on-the-fly
- node_group['node_processes'] = ng_data['node_processes']
- if 'node_configs' in ng_data:
- node_group['node_configs'] = ng_data['node_configs']
- all_node_groups.append(node_group)
-
- cluster_template = {
- 'description': 'Test cluster template',
- 'plugin_name': cls.default_plugin,
- 'hadoop_version': cls.default_version,
- 'cluster_configs': plugin_data.get('cluster_configs', {}),
- 'node_groups': all_node_groups,
- }
- return cluster_template
- except (IndexError, KeyError):
- return None
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
deleted file mode 100644
index dfd8e27..0000000
--- a/tempest/api/data_processing/test_cluster_templates.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import exceptions
-from tempest import test
-
-
-class ClusterTemplateTest(dp_base.BaseDataProcessingTest):
- # Link to the API documentation is http://docs.openstack.org/developer/
- # sahara/restapi/rest_api_v1.0.html#cluster-templates
-
- @classmethod
- def skip_checks(cls):
- super(ClusterTemplateTest, cls).skip_checks()
- if cls.default_plugin is None:
- raise cls.skipException("No Sahara plugins configured")
-
- @classmethod
- def resource_setup(cls):
- super(ClusterTemplateTest, cls).resource_setup()
-
- # pre-define a node group templates
- node_group_template_w = cls.get_node_group_template('worker1')
- if node_group_template_w is None:
- raise exceptions.InvalidConfiguration(
- message="No known Sahara plugin was found")
-
- node_group_template_w['name'] = data_utils.rand_name(
- 'sahara-ng-template')
- resp_body = cls.create_node_group_template(**node_group_template_w)
- node_group_template_id = resp_body['id']
- configured_node_group_templates = {'worker1': node_group_template_id}
-
- cls.full_cluster_template = cls.get_cluster_template(
- configured_node_group_templates)
-
- # create cls.cluster_template variable to use for comparison to cluster
- # template response body. The 'node_groups' field in the response body
- # has some extra info that post body does not have. The 'node_groups'
- # field in the response body is something like this
- #
- # 'node_groups': [
- # {
- # 'count': 3,
- # 'name': 'worker-node',
- # 'volume_mount_prefix': '/volumes/disk',
- # 'created_at': '2014-05-21 14:31:37',
- # 'updated_at': None,
- # 'floating_ip_pool': None,
- # ...
- # },
- # ...
- # ]
- cls.cluster_template = cls.full_cluster_template.copy()
- del cls.cluster_template['node_groups']
-
- def _create_cluster_template(self, template_name=None):
- """Creates Cluster Template with optional name specified.
-
- It creates template, ensures template name and response body.
- Returns id and name of created template.
- """
- if not template_name:
- # generate random name if it's not specified
- template_name = data_utils.rand_name('sahara-cluster-template')
-
- # create cluster template
- resp_body = self.create_cluster_template(template_name,
- **self.full_cluster_template)
-
- # ensure that template created successfully
- self.assertEqual(template_name, resp_body['name'])
- self.assertDictContainsSubset(self.cluster_template, resp_body)
-
- return resp_body['id'], template_name
-
- @test.attr(type='smoke')
- @test.idempotent_id('3525f1f1-3f9c-407d-891a-a996237e728b')
- def test_cluster_template_create(self):
- self._create_cluster_template()
-
- @test.attr(type='smoke')
- @test.idempotent_id('7a161882-e430-4840-a1c6-1d928201fab2')
- def test_cluster_template_list(self):
- template_info = self._create_cluster_template()
-
- # check for cluster template in list
- templates = self.client.list_cluster_templates()['cluster_templates']
- templates_info = [(template['id'], template['name'])
- for template in templates]
- self.assertIn(template_info, templates_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('2b75fe22-f731-4b0f-84f1-89ab25f86637')
- def test_cluster_template_get(self):
- template_id, template_name = self._create_cluster_template()
-
- # check cluster template fetch by id
- template = self.client.get_cluster_template(template_id)
- template = template['cluster_template']
- self.assertEqual(template_name, template['name'])
- self.assertDictContainsSubset(self.cluster_template, template)
-
- @test.attr(type='smoke')
- @test.idempotent_id('ff1fd989-171c-4dd7-91fd-9fbc71b09675')
- def test_cluster_template_delete(self):
- template_id, _ = self._create_cluster_template()
-
- # delete the cluster template by id
- self.client.delete_cluster_template(template_id)
- # TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
deleted file mode 100644
index 67d09a0..0000000
--- a/tempest/api/data_processing/test_data_sources.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class DataSourceTest(dp_base.BaseDataProcessingTest):
- @classmethod
- def resource_setup(cls):
- super(DataSourceTest, cls).resource_setup()
- cls.swift_data_source_with_creds = {
- 'url': 'swift://sahara-container.sahara/input-source',
- 'description': 'Test data source',
- 'credentials': {
- 'user': cls.os.credentials.username,
- 'password': cls.os.credentials.password
- },
- 'type': 'swift'
- }
- cls.swift_data_source = cls.swift_data_source_with_creds.copy()
- del cls.swift_data_source['credentials']
-
- cls.local_hdfs_data_source = {
- 'url': 'input-source',
- 'description': 'Test data source',
- 'type': 'hdfs'
- }
-
- cls.external_hdfs_data_source = {
- 'url': 'hdfs://172.18.168.2:8020/usr/hadoop/input-source',
- 'description': 'Test data source',
- 'type': 'hdfs'
- }
-
- def _create_data_source(self, source_body, source_name=None):
- """Creates Data Source with optional name specified.
-
- It creates a link to input-source file (it may not exist), ensures
- source name and response body. Returns id and name of created source.
- """
- if not source_name:
- # generate random name if it's not specified
- source_name = data_utils.rand_name('sahara-data-source')
-
- # create data source
- resp_body = self.create_data_source(source_name, **source_body)
-
- # ensure that source created successfully
- self.assertEqual(source_name, resp_body['name'])
- if source_body['type'] == 'swift':
- source_body = self.swift_data_source
- self.assertDictContainsSubset(source_body, resp_body)
-
- return resp_body['id'], source_name
-
- def _list_data_sources(self, source_info):
- # check for data source in list
- sources = self.client.list_data_sources()['data_sources']
- sources_info = [(source['id'], source['name']) for source in sources]
- self.assertIn(source_info, sources_info)
-
- def _get_data_source(self, source_id, source_name, source_body):
- # check data source fetch by id
- source = self.client.get_data_source(source_id)['data_source']
- self.assertEqual(source_name, source['name'])
- self.assertDictContainsSubset(source_body, source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('9e0e836d-c372-4fca-91b7-b66c3e9646c8')
- def test_swift_data_source_create(self):
- self._create_data_source(self.swift_data_source_with_creds)
-
- @test.attr(type='smoke')
- @test.idempotent_id('3cb87a4a-0534-4b97-9edc-8bbc822b68a0')
- def test_swift_data_source_list(self):
- source_info = (
- self._create_data_source(self.swift_data_source_with_creds))
- self._list_data_sources(source_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('fc07409b-6477-4cb3-9168-e633c46b227f')
- def test_swift_data_source_get(self):
- source_id, source_name = (
- self._create_data_source(self.swift_data_source_with_creds))
- self._get_data_source(source_id, source_name, self.swift_data_source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('df53669c-0cd1-4cf7-b408-4cf215d8beb8')
- def test_swift_data_source_delete(self):
- source_id, _ = (
- self._create_data_source(self.swift_data_source_with_creds))
-
- # delete the data source by id
- self.client.delete_data_source(source_id)
-
- @test.attr(type='smoke')
- @test.idempotent_id('88505d52-db01-4229-8f1d-a1137da5fe2d')
- def test_local_hdfs_data_source_create(self):
- self._create_data_source(self.local_hdfs_data_source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('81d7d42a-d7f6-4d9b-b38c-0801a4dfe3c2')
- def test_local_hdfs_data_source_list(self):
- source_info = self._create_data_source(self.local_hdfs_data_source)
- self._list_data_sources(source_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('ec0144c6-db1e-4169-bb06-7abae14a8443')
- def test_local_hdfs_data_source_get(self):
- source_id, source_name = (
- self._create_data_source(self.local_hdfs_data_source))
- self._get_data_source(
- source_id, source_name, self.local_hdfs_data_source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('e398308b-4230-4f86-ba10-9b0b60a59c8d')
- def test_local_hdfs_data_source_delete(self):
- source_id, _ = self._create_data_source(self.local_hdfs_data_source)
-
- # delete the data source by id
- self.client.delete_data_source(source_id)
-
- @test.attr(type='smoke')
- @test.idempotent_id('bfd91128-e642-4d95-a973-3e536962180c')
- def test_external_hdfs_data_source_create(self):
- self._create_data_source(self.external_hdfs_data_source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('92e2be72-f7ab-499d-ae01-fb9943c90d8e')
- def test_external_hdfs_data_source_list(self):
- source_info = self._create_data_source(self.external_hdfs_data_source)
- self._list_data_sources(source_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('a31edb1b-6bc6-4f42-871f-70cd243184ac')
- def test_external_hdfs_data_source_get(self):
- source_id, source_name = (
- self._create_data_source(self.external_hdfs_data_source))
- self._get_data_source(
- source_id, source_name, self.external_hdfs_data_source)
-
- @test.attr(type='smoke')
- @test.idempotent_id('295924cd-a085-4b45-aea8-0707cdb2da7e')
- def test_external_hdfs_data_source_delete(self):
- source_id, _ = self._create_data_source(self.external_hdfs_data_source)
-
- # delete the data source by id
- self.client.delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_job_binaries.py b/tempest/api/data_processing/test_job_binaries.py
deleted file mode 100644
index a47ddbc..0000000
--- a/tempest/api/data_processing/test_job_binaries.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class JobBinaryTest(dp_base.BaseDataProcessingTest):
- # Link to the API documentation is http://docs.openstack.org/developer/
- # sahara/restapi/rest_api_v1.1_EDP.html#job-binaries
-
- @classmethod
- def resource_setup(cls):
- super(JobBinaryTest, cls).resource_setup()
- cls.swift_job_binary_with_extra = {
- 'url': 'swift://sahara-container.sahara/example.jar',
- 'description': 'Test job binary',
- 'extra': {
- 'user': cls.os.credentials.username,
- 'password': cls.os.credentials.password
- }
- }
- # Create extra cls.swift_job_binary variable to use for comparison to
- # job binary response body because response body has no 'extra' field.
- cls.swift_job_binary = cls.swift_job_binary_with_extra.copy()
- del cls.swift_job_binary['extra']
-
- name = data_utils.rand_name('sahara-internal-job-binary')
- cls.job_binary_data = 'Some script may be data'
- job_binary_internal = (
- cls.create_job_binary_internal(name, cls.job_binary_data))
- cls.internal_db_job_binary = {
- 'url': 'internal-db://%s' % job_binary_internal['id'],
- 'description': 'Test job binary',
- }
-
- def _create_job_binary(self, binary_body, binary_name=None):
- """Creates Job Binary with optional name specified.
-
- It creates a link to data (jar, pig files, etc.), ensures job binary
- name and response body. Returns id and name of created job binary.
- Data may not exist when using Swift as data storage.
- In other cases data must exist in storage.
- """
- if not binary_name:
- # generate random name if it's not specified
- binary_name = data_utils.rand_name('sahara-job-binary')
-
- # create job binary
- resp_body = self.create_job_binary(binary_name, **binary_body)
-
- # ensure that binary created successfully
- self.assertEqual(binary_name, resp_body['name'])
- if 'swift' in binary_body['url']:
- binary_body = self.swift_job_binary
- self.assertDictContainsSubset(binary_body, resp_body)
-
- return resp_body['id'], binary_name
-
- @test.attr(type='smoke')
- @test.idempotent_id('c00d43f8-4360-45f8-b280-af1a201b12d3')
- def test_swift_job_binary_create(self):
- self._create_job_binary(self.swift_job_binary_with_extra)
-
- @test.attr(type='smoke')
- @test.idempotent_id('f8809352-e79d-4748-9359-ce1efce89f2a')
- def test_swift_job_binary_list(self):
- binary_info = self._create_job_binary(self.swift_job_binary_with_extra)
-
- # check for job binary in list
- binaries = self.client.list_job_binaries()['binaries']
- binaries_info = [(binary['id'], binary['name']) for binary in binaries]
- self.assertIn(binary_info, binaries_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('2d4a670f-e8f1-413c-b5ac-50c1bfe9e1b1')
- def test_swift_job_binary_get(self):
- binary_id, binary_name = (
- self._create_job_binary(self.swift_job_binary_with_extra))
-
- # check job binary fetch by id
- binary = self.client.get_job_binary(binary_id)['job_binary']
- self.assertEqual(binary_name, binary['name'])
- self.assertDictContainsSubset(self.swift_job_binary, binary)
-
- @test.attr(type='smoke')
- @test.idempotent_id('9b0e8f38-04f3-4616-b399-cfa7eb2677ed')
- def test_swift_job_binary_delete(self):
- binary_id, _ = (
- self._create_job_binary(self.swift_job_binary_with_extra))
-
- # delete the job binary by id
- self.client.delete_job_binary(binary_id)
-
- @test.attr(type='smoke')
- @test.idempotent_id('63662f6d-8291-407e-a6fc-f654522ebab6')
- def test_internal_db_job_binary_create(self):
- self._create_job_binary(self.internal_db_job_binary)
-
- @test.attr(type='smoke')
- @test.idempotent_id('38731e7b-6d9d-4ffa-8fd1-193c453e88b1')
- def test_internal_db_job_binary_list(self):
- binary_info = self._create_job_binary(self.internal_db_job_binary)
-
- # check for job binary in list
- binaries = self.client.list_job_binaries()['binaries']
- binaries_info = [(binary['id'], binary['name']) for binary in binaries]
- self.assertIn(binary_info, binaries_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('1b32199b-c3f5-43e1-a37a-3797e57b7066')
- def test_internal_db_job_binary_get(self):
- binary_id, binary_name = (
- self._create_job_binary(self.internal_db_job_binary))
-
- # check job binary fetch by id
- binary = self.client.get_job_binary(binary_id)['job_binary']
- self.assertEqual(binary_name, binary['name'])
- self.assertDictContainsSubset(self.internal_db_job_binary, binary)
-
- @test.attr(type='smoke')
- @test.idempotent_id('3c42b0c3-3e03-46a5-adf0-df0650271a4e')
- def test_internal_db_job_binary_delete(self):
- binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
-
- # delete the job binary by id
- self.client.delete_job_binary(binary_id)
-
- @test.attr(type='smoke')
- @test.idempotent_id('d5d47659-7e2c-4ea7-b292-5b3e559e8587')
- def test_job_binary_get_data(self):
- binary_id, _ = self._create_job_binary(self.internal_db_job_binary)
-
- # get data of job binary by id
- _, data = self.client.get_job_binary_data(binary_id)
- self.assertEqual(data, self.job_binary_data)
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
deleted file mode 100644
index b4f0769..0000000
--- a/tempest/api/data_processing/test_job_binary_internals.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class JobBinaryInternalTest(dp_base.BaseDataProcessingTest):
- # Link to the API documentation is http://docs.openstack.org/developer/
- # sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
-
- @classmethod
- def resource_setup(cls):
- super(JobBinaryInternalTest, cls).resource_setup()
- cls.job_binary_internal_data = 'Some script may be data'
-
- def _create_job_binary_internal(self, binary_name=None):
- """Creates Job Binary Internal with optional name specified.
-
- It puts data into Sahara database and ensures job binary internal name.
- Returns id and name of created job binary internal.
- """
- if not binary_name:
- # generate random name if it's not specified
- binary_name = data_utils.rand_name('sahara-job-binary-internal')
-
- # create job binary internal
- resp_body = (
- self.create_job_binary_internal(binary_name,
- self.job_binary_internal_data))
-
- # ensure that job binary internal created successfully
- self.assertEqual(binary_name, resp_body['name'])
-
- return resp_body['id'], binary_name
-
- @test.attr(type='smoke')
- @test.idempotent_id('249c4dc2-946f-4939-83e6-212ddb6ea0be')
- def test_job_binary_internal_create(self):
- self._create_job_binary_internal()
-
- @test.attr(type='smoke')
- @test.idempotent_id('1e3c2ecd-5673-499d-babe-4fe2fcdf64ee')
- def test_job_binary_internal_list(self):
- binary_info = self._create_job_binary_internal()
-
- # check for job binary internal in list
- binaries = self.client.list_job_binary_internals()['binaries']
- binaries_info = [(binary['id'], binary['name']) for binary in binaries]
- self.assertIn(binary_info, binaries_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('a2046a53-386c-43ab-be35-df54b19db776')
- def test_job_binary_internal_get(self):
- binary_id, binary_name = self._create_job_binary_internal()
-
- # check job binary internal fetch by id
- binary = self.client.get_job_binary_internal(binary_id)
- self.assertEqual(binary_name, binary['job_binary_internal']['name'])
-
- @test.attr(type='smoke')
- @test.idempotent_id('b3568c33-4eed-40d5-aae4-6ff3b2ac58f5')
- def test_job_binary_internal_delete(self):
- binary_id, _ = self._create_job_binary_internal()
-
- # delete the job binary internal by id
- self.client.delete_job_binary_internal(binary_id)
-
- @test.attr(type='smoke')
- @test.idempotent_id('8871f2b0-5782-4d66-9bb9-6f95bcb839ea')
- def test_job_binary_internal_get_data(self):
- binary_id, _ = self._create_job_binary_internal()
-
- # get data of job binary internal by id
- _, data = self.client.get_job_binary_internal_data(binary_id)
- self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/data_processing/test_jobs.py b/tempest/api/data_processing/test_jobs.py
deleted file mode 100644
index 8503320..0000000
--- a/tempest/api/data_processing/test_jobs.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class JobTest(dp_base.BaseDataProcessingTest):
- # NOTE: Link to the API documentation: http://docs.openstack.org/developer/
- # sahara/restapi/rest_api_v1.1_EDP.html#jobs
-
- @classmethod
- def resource_setup(cls):
- super(JobTest, cls).resource_setup()
- # create job binary
- job_binary = {
- 'name': data_utils.rand_name('sahara-job-binary'),
- 'url': 'swift://sahara-container.sahara/example.jar',
- 'description': 'Test job binary',
- 'extra': {
- 'user': cls.os.credentials.username,
- 'password': cls.os.credentials.password
- }
- }
- resp_body = cls.create_job_binary(**job_binary)
- job_binary_id = resp_body['id']
-
- cls.job = {
- 'job_type': 'Pig',
- 'mains': [job_binary_id]
- }
-
- def _create_job(self, job_name=None):
- """Creates Job with optional name specified.
-
- It creates job and ensures job name. Returns id and name of created
- job.
- """
- if not job_name:
- # generate random name if it's not specified
- job_name = data_utils.rand_name('sahara-job')
-
- # create job
- resp_body = self.create_job(job_name, **self.job)
-
- # ensure that job created successfully
- self.assertEqual(job_name, resp_body['name'])
-
- return resp_body['id'], job_name
-
- @test.attr(type='smoke')
- @test.idempotent_id('8cf785ca-adf4-473d-8281-fb9a5efa3073')
- def test_job_create(self):
- self._create_job()
-
- @test.attr(type='smoke')
- @test.idempotent_id('41e253fe-b02a-41a0-b186-5ff1f0463ba3')
- def test_job_list(self):
- job_info = self._create_job()
-
- # check for job in list
- jobs = self.client.list_jobs()['jobs']
- jobs_info = [(job['id'], job['name']) for job in jobs]
- self.assertIn(job_info, jobs_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('3faf17fa-bc94-4a60-b1c3-79e53674c16c')
- def test_job_get(self):
- job_id, job_name = self._create_job()
-
- # check job fetch by id
- job = self.client.get_job(job_id)['job']
- self.assertEqual(job_name, job['name'])
-
- @test.attr(type='smoke')
- @test.idempotent_id('dff85e62-7dda-4ad8-b1ee-850adecb0c6e')
- def test_job_delete(self):
- job_id, _ = self._create_job()
-
- # delete the job by id
- self.client.delete_job(job_id)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
deleted file mode 100644
index c2dae85..0000000
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
-
- @classmethod
- def skip_checks(cls):
- super(NodeGroupTemplateTest, cls).skip_checks()
- if cls.default_plugin is None:
- raise cls.skipException("No Sahara plugins configured")
-
- def _create_node_group_template(self, template_name=None):
- """Creates Node Group Template with optional name specified.
-
- It creates template, ensures template name and response body.
- Returns id and name of created template.
- """
- self.node_group_template = self.get_node_group_template()
- self.assertIsNotNone(self.node_group_template,
- "No known Sahara plugin was found")
-
- if not template_name:
- # generate random name if it's not specified
- template_name = data_utils.rand_name('sahara-ng-template')
-
- # create node group template
- resp_body = self.create_node_group_template(template_name,
- **self.node_group_template)
-
- # ensure that template created successfully
- self.assertEqual(template_name, resp_body['name'])
- self.assertDictContainsSubset(self.node_group_template, resp_body)
-
- return resp_body['id'], template_name
-
- @test.attr(type='smoke')
- @test.idempotent_id('63164051-e46d-4387-9741-302ef4791cbd')
- def test_node_group_template_create(self):
- self._create_node_group_template()
-
- @test.attr(type='smoke')
- @test.idempotent_id('eb39801d-2612-45e5-88b1-b5d70b329185')
- def test_node_group_template_list(self):
- template_info = self._create_node_group_template()
-
- # check for node group template in list
- templates = self.client.list_node_group_templates()
- templates = templates['node_group_templates']
- templates_info = [(template['id'], template['name'])
- for template in templates]
- self.assertIn(template_info, templates_info)
-
- @test.attr(type='smoke')
- @test.idempotent_id('6ee31539-a708-466f-9c26-4093ce09a836')
- def test_node_group_template_get(self):
- template_id, template_name = self._create_node_group_template()
-
- # check node group template fetch by id
- template = self.client.get_node_group_template(template_id)
- template = template['node_group_template']
- self.assertEqual(template_name, template['name'])
- self.assertDictContainsSubset(self.node_group_template, template)
-
- @test.attr(type='smoke')
- @test.idempotent_id('f4f5cb82-708d-4031-81c4-b0618a706a2f')
- def test_node_group_template_delete(self):
- template_id, _ = self._create_node_group_template()
-
- # delete the node group template by id
- self.client.delete_node_group_template(template_id)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
deleted file mode 100644
index 14594e4..0000000
--- a/tempest/api/data_processing/test_plugins.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2014 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.data_processing import base as dp_base
-from tempest import config
-from tempest import test
-
-CONF = config.CONF
-
-
-class PluginsTest(dp_base.BaseDataProcessingTest):
- def _list_all_plugin_names(self):
- """Returns all enabled plugin names.
-
- It ensures main plugins availability.
- """
- plugins = self.client.list_plugins()['plugins']
- plugins_names = [plugin['name'] for plugin in plugins]
- for enabled_plugin in CONF.data_processing_feature_enabled.plugins:
- self.assertIn(enabled_plugin, plugins_names)
-
- return plugins_names
-
- @test.attr(type='smoke')
- @test.idempotent_id('01a005a3-426c-4c0b-9617-d09475403e09')
- def test_plugin_list(self):
- self._list_all_plugin_names()
-
- @test.attr(type='smoke')
- @test.idempotent_id('53cf6487-2cfb-4a6f-8671-97c542c6e901')
- def test_plugin_get(self):
- for plugin_name in self._list_all_plugin_names():
- plugin = self.client.get_plugin(plugin_name)['plugin']
- self.assertEqual(plugin_name, plugin['name'])
-
- for plugin_version in plugin['versions']:
- detailed_plugin = self.client.get_plugin(plugin_name,
- plugin_version)
- detailed_plugin = detailed_plugin['plugin']
- self.assertEqual(plugin_name, detailed_plugin['name'])
-
- # check that required image tags contains name and version
- image_tags = detailed_plugin['required_image_tags']
- self.assertIn(plugin_name, image_tags)
- self.assertIn(plugin_version, image_tags)
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 695efb5..269e297 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -20,7 +20,7 @@
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
diff --git a/tempest/api/image/v2/test_images_metadefs_namespaces.py b/tempest/api/image/v2/test_images_metadefs_namespaces.py
index 6fced00..a80a0cf 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/test_images_metadefs_namespaces.py
@@ -40,6 +40,10 @@
protected=True)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_namespace, namespace_name)
+ # list namespaces
+ bodys = self.namespaces_client.list_namespaces()['namespaces']
+ body = [namespace['namespace'] for namespace in bodys]
+ self.assertIn(namespace_name, body)
# get namespace details
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index d2ab237..b3555b6 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -44,7 +44,7 @@
body = self.admin_networks_client.list_dhcp_agents_on_hosting_network(
self.network['id'])
agents = body['agents']
- self.assertIsNotNone(agents)
+ self.assertNotEmpty(agents, "no dhcp agent")
agent = agents[0]
self.assertTrue(self._check_network_in_dhcp_agent(
self.network['id'], agent))
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index d2e1492..dca4523 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -15,7 +15,7 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index c1462db..4bc4262 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -20,6 +20,7 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
+from tempest.common.utils import net_info
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -66,7 +67,7 @@
body = self.ports_client.list_ports()
ports = body['ports']
for port in ports:
- if (port['device_owner'].startswith('network:router_interface') and
+ if (net_info.is_router_interface_port(port) and
port['device_id'] in [r['id'] for r in self.routers]):
self.routers_client.remove_router_interface(port['device_id'],
port_id=port['id'])
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index 2c981a1..84150b4 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -23,9 +23,9 @@
List all available extensions
- v2.0 of the Neutron API is assumed. It is also assumed that the following
- options are defined in the [network] section of etc/tempest.conf:
-
+ v2.0 of the Neutron API is assumed. It is also assumed that api-extensions
+ option is defined in the [network-feature-enabled] section of
+ etc/tempest.conf.
"""
@test.attr(type='smoke')
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 3825f84..819ef90 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -296,7 +296,7 @@
subnet_id)
# Since create_subnet adds the subnet to the delete list, and it is
- # is actually deleted here - this will create and issue, hence remove
+ # actually deleted here - this will create and issue, hence remove
# it from the list.
self.subnets.pop()
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index e5972a9..12539ba 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -23,7 +23,7 @@
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index ba416e4..98a4f63 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -190,11 +190,14 @@
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertGreaterEqual(len(fixed_ips), 1)
+ # Assert that all of the IPs from the router gateway port
+ # are allocated from a valid public subnet.
public_net_body = self.admin_networks_client.show_network(
CONF.network.public_network_id)
- public_subnet_id = public_net_body['network']['subnets'][0]
- self.assertIn(public_subnet_id,
- map(lambda x: x['subnet_id'], fixed_ips))
+ public_subnet_ids = public_net_body['network']['subnets']
+ for fixed_ip in fixed_ips:
+ subnet_id = fixed_ip['subnet_id']
+ self.assertIn(subnet_id, public_subnet_ids)
@test.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
def test_update_router_set_gateway(self):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 5312979..1031ab8 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -71,7 +71,7 @@
@test.attr(type='smoke')
@test.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
def test_list_security_groups(self):
- # Verify the that security group belonging to project exist in list
+ # Verify the security group belonging to project exist in list
body = self.security_groups_client.list_security_groups()
security_groups = body['security_groups']
found = None
diff --git a/tempest/api/volume/admin/test_qos.py b/tempest/api/volume/admin/test_qos.py
index 9f2d453..9275d2b 100644
--- a/tempest/api/volume/admin/test_qos.py
+++ b/tempest/api/volume/admin/test_qos.py
@@ -55,16 +55,6 @@
self.admin_volume_qos_client.associate_qos(
self.created_qos['id'], vol_type_id)
- def _test_get_association_qos(self):
- body = self.admin_volume_qos_client.show_association_qos(
- self.created_qos['id'])['qos_associations']
-
- associations = []
- for association in body:
- associations.append(association['id'])
-
- return associations
-
@test.idempotent_id('7e15f883-4bef-49a9-95eb-f94209a1ced1')
def test_create_delete_qos_with_front_end_consumer(self):
"""Tests the creation and deletion of QoS specs
@@ -147,8 +137,9 @@
self._test_associate_qos(vol_type[i]['id'])
# get the association of the qos-specs
- associations = self._test_get_association_qos()
-
+ body = self.admin_volume_qos_client.show_association_qos(
+ self.created_qos['id'])['qos_associations']
+ associations = [association['id'] for association in body]
for i in range(0, 3):
self.assertIn(vol_type[i]['id'], associations)
@@ -159,8 +150,6 @@
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'], operation,
vol_type[0]['id'])
- associations = self._test_get_association_qos()
- self.assertNotIn(vol_type[0]['id'], associations)
# disassociate all volume-types from qos-specs
self.admin_volume_qos_client.disassociate_all_qos(
@@ -168,8 +157,6 @@
operation = 'disassociate-all'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'], operation)
- associations = self._test_get_association_qos()
- self.assertEmpty(associations)
class QosSpecsV1TestJSON(QosSpecsV2TestJSON):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 646bc68..99f0a6b 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -43,10 +43,7 @@
"vendor_name": vendor}
# Create two volume_types
for i in range(2):
- vol_type_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-type')
vol_type = self.create_volume_type(
- name=vol_type_name,
extra_specs=extra_specs)
volume_types.append(vol_type)
params = {self.name_field: vol_name,
@@ -124,8 +121,7 @@
# Create/get/delete encryption type.
provider = "LuksEncryptor"
control_location = "front-end"
- name = data_utils.rand_name(self.__class__.__name__ + '-volume-type')
- body = self.create_volume_type(name=name)
+ body = self.create_volume_type()
# Create encryption type
encryption_type = \
self.admin_encryption_types_client.create_encryption_type(
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index 8b7ceff..fdff2df 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.api.volume import base
-from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -24,8 +23,7 @@
@classmethod
def resource_setup(cls):
super(VolumeTypesExtraSpecsV2Test, cls).resource_setup()
- vol_type_name = data_utils.rand_name(cls.__name__ + '-Volume-type')
- cls.volume_type = cls.create_volume_type(name=vol_type_name)
+ cls.volume_type = cls.create_volume_type()
@test.idempotent_id('b42923e9-0452-4945-be5b-d362ae533e60')
def test_volume_type_extra_specs_list(self):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 2e07457..8040322 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -24,10 +24,8 @@
@classmethod
def resource_setup(cls):
super(ExtraSpecsNegativeV2Test, cls).resource_setup()
- vol_type_name = data_utils.rand_name(cls.__name__ + '-Volume-type')
cls.extra_specs = {"spec1": "val1"}
- cls.volume_type = cls.create_volume_type(name=vol_type_name,
- extra_specs=cls.extra_specs)
+ cls.volume_type = cls.create_volume_type(extra_specs=cls.extra_specs)
@test.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
def test_update_no_body(self):
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 9686473..e7a3f62 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -14,8 +14,6 @@
# under the License.
from tempest.api.volume import base
-from tempest.common.utils import data_utils as utils
-from tempest.common import waiters
from tempest import config
from tempest import test
@@ -34,54 +32,28 @@
super(VolumesActionsV2Test, cls).resource_setup()
# Create a test shared volume for tests
- vol_name = utils.rand_name(cls.__name__ + '-Volume')
- cls.name_field = cls.special_fields['name_field']
- params = {cls.name_field: vol_name, 'size': CONF.volume.volume_size}
-
- cls.volume = cls.client.create_volume(**params)['volume']
- waiters.wait_for_volume_status(cls.client,
- cls.volume['id'], 'available')
-
- @classmethod
- def resource_cleanup(cls):
- # Delete the test volume
- cls.delete_volume(cls.client, cls.volume['id'])
-
- super(VolumesActionsV2Test, cls).resource_cleanup()
-
- def _reset_volume_status(self, volume_id, status):
- # Reset the volume status
- body = self.admin_volume_client.reset_volume_status(volume_id,
- status=status)
- return body
+ cls.volume = cls.create_volume()
def tearDown(self):
# Set volume's status to available after test
- self._reset_volume_status(self.volume['id'], status='available')
+ self.admin_volume_client.reset_volume_status(
+ self.volume['id'], status='available')
super(VolumesActionsV2Test, self).tearDown()
- def _create_temp_volume(self):
- # Create a temp volume for force delete tests
- vol_name = utils.rand_name(self.__class__.__name__ + '-Volume')
- params = {self.name_field: vol_name, 'size': CONF.volume.volume_size}
- temp_volume = self.client.create_volume(**params)['volume']
- waiters.wait_for_volume_status(self.client,
- temp_volume['id'], 'available')
-
- return temp_volume
-
def _create_reset_and_force_delete_temp_volume(self, status=None):
# Create volume, reset volume status, and force delete temp volume
- temp_volume = self._create_temp_volume()
+ temp_volume = self.create_volume()
if status:
- self._reset_volume_status(temp_volume['id'], status)
+ self.admin_volume_client.reset_volume_status(
+ temp_volume['id'], status=status)
self.admin_volume_client.force_delete_volume(temp_volume['id'])
self.client.wait_for_resource_deletion(temp_volume['id'])
@test.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
def test_volume_reset_status(self):
# test volume reset status : available->error->available
- self._reset_volume_status(self.volume['id'], 'error')
+ self.admin_volume_client.reset_volume_status(
+ self.volume['id'], status='error')
volume_get = self.admin_volume_client.show_volume(
self.volume['id'])['volume']
self.assertEqual('error', volume_get['status'])
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index b49a126..e6b8234 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -17,8 +17,8 @@
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
-from tempest import exceptions
from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
import tempest.test
CONF = config.CONF
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5586e02..83844e3 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,8 +18,8 @@
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
-from tempest import exceptions
from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
from tempest import test
CONF = config.CONF
@@ -152,24 +152,15 @@
@test.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
def test_volume_readonly_update(self):
- # Update volume readonly true
- readonly = True
- self.client.update_volume_readonly(self.volume['id'],
- readonly=readonly)
- # Get Volume information
- fetched_volume = self.client.show_volume(self.volume['id'])['volume']
- bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
- self.assertEqual(True, bool_flag)
-
- # Update volume readonly false
- readonly = False
- self.client.update_volume_readonly(self.volume['id'],
- readonly=readonly)
-
- # Get Volume information
- fetched_volume = self.client.show_volume(self.volume['id'])['volume']
- bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
- self.assertEqual(False, bool_flag)
+ for readonly in [True, False]:
+ # Update volume readonly
+ self.client.update_volume_readonly(self.volume['id'],
+ readonly=readonly)
+ # Get Volume information
+ fetched_volume = self.client.show_volume(
+ self.volume['id'])['volume']
+ bool_flag = self._is_true(fetched_volume['metadata']['readonly'])
+ self.assertEqual(readonly, bool_flag)
class VolumesV1ActionsTest(VolumesV2ActionsTest):
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 6be569c..f971eca 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -34,11 +34,11 @@
cls.name_field = cls.special_fields['name_field']
cls.descrip_field = cls.special_fields['descrip_field']
- def _detach(self, volume_id):
- """Detach volume."""
- self.volumes_client.detach_volume(volume_id)
- waiters.wait_for_volume_status(self.volumes_client,
- volume_id, 'available')
+ def cleanup_snapshot(self, snapshot):
+ # Delete the snapshot
+ self.snapshots_client.delete_snapshot(snapshot['id'])
+ self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+ self.snapshots.remove(snapshot)
@test.idempotent_id('b467b54c-07a4-446d-a1cf-651dedcc3ff1')
@test.services('compute')
@@ -121,12 +121,6 @@
self.assertEqual(volume['snapshot_id'], src_snap['id'])
self.assertEqual(int(volume['size']), src_size + 1)
- def cleanup_snapshot(self, snapshot):
- # Delete the snapshot
- self.snapshots_client.delete_snapshot(snapshot['id'])
- self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
- self.snapshots.remove(snapshot)
-
class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
_api_version = 1
diff --git a/tempest/api_schema/__init__.py b/tempest/api_schema/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api_schema/__init__.py
+++ /dev/null
diff --git a/tempest/api_schema/request/__init__.py b/tempest/api_schema/request/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api_schema/request/__init__.py
+++ /dev/null
diff --git a/tempest/api_schema/request/compute/__init__.py b/tempest/api_schema/request/compute/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api_schema/request/compute/__init__.py
+++ /dev/null
diff --git a/tempest/api_schema/request/compute/flavors.py b/tempest/api_schema/request/compute/flavors.py
deleted file mode 100644
index adaaf27..0000000
--- a/tempest/api_schema/request/compute/flavors.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# (c) 2014 Deutsche Telekom AG
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-common_flavor_details = {
- "name": "get-flavor-details",
- "http-method": "GET",
- "url": "flavors/%s",
- "resources": [
- {"name": "flavor", "expected_result": 404}
- ]
-}
-
-common_flavor_list = {
- "name": "list-flavors-with-detail",
- "http-method": "GET",
- "url": "flavors/detail",
- "json-schema": {
- "type": "object",
- "properties": {
- }
- }
-}
-
-common_admin_flavor_create = {
- "name": "flavor-create",
- "http-method": "POST",
- "admin_client": True,
- "url": "flavors",
- "default_result_code": 400,
- "json-schema": {
- "type": "object",
- "properties": {
- "flavor": {
- "type": "object",
- "properties": {
- "name": {"type": "string",
- "exclude_tests": ["gen_str_min_length"]},
- "ram": {"type": "integer", "minimum": 1},
- "vcpus": {"type": "integer", "minimum": 1},
- "disk": {"type": "integer"},
- "id": {"type": "integer",
- "exclude_tests": ["gen_none", "gen_string"]
- },
- }
- }
- }
- }
-}
diff --git a/tempest/api_schema/request/compute/v2/__init__.py b/tempest/api_schema/request/compute/v2/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api_schema/request/compute/v2/__init__.py
+++ /dev/null
diff --git a/tempest/api_schema/request/compute/v2/flavors.py b/tempest/api_schema/request/compute/v2/flavors.py
deleted file mode 100644
index bc459ad..0000000
--- a/tempest/api_schema/request/compute/v2/flavors.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (c) 2014 Deutsche Telekom AG
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-
-from tempest.api_schema.request.compute import flavors
-
-flavors_details = copy.deepcopy(flavors.common_flavor_details)
-
-flavor_list = copy.deepcopy(flavors.common_flavor_list)
-
-flavor_create = copy.deepcopy(flavors.common_admin_flavor_create)
-
-flavor_list["json-schema"]["properties"] = {
- "minRam": {
- "type": "integer",
- "results": {
- "gen_none": 400,
- "gen_string": 400
- }
- },
- "minDisk": {
- "type": "integer",
- "results": {
- "gen_none": 400,
- "gen_string": 400
- }
- }
-}
diff --git a/tempest/clients.py b/tempest/clients.py
index 765a526..be6bc02 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -24,7 +24,6 @@
from tempest.lib import exceptions as lib_exc
from tempest.lib.services import clients
from tempest.services import baremetal
-from tempest.services import data_processing
from tempest.services import identity
from tempest.services import object_storage
from tempest.services import orchestration
@@ -39,7 +38,7 @@
default_params = config.service_client_config()
- # TODO(andreaf) This is only used by data_processing and baremetal clients,
+ # TODO(andreaf) This is only used by baremetal clients,
# and should be removed once they are out of Tempest
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
@@ -84,12 +83,6 @@
build_interval=CONF.orchestration.build_interval,
build_timeout=CONF.orchestration.build_timeout,
**self.default_params)
- self.data_processing_client = data_processing.DataProcessingClient(
- self.auth_provider,
- CONF.data_processing.catalog_type,
- CONF.identity.region,
- endpoint_type=CONF.data_processing.endpoint_type,
- **self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
@@ -274,14 +267,14 @@
CONF.identity.uri, **self.default_params)
else:
msg = 'Identity v2 API enabled, but no identity.uri set'
- raise exceptions.InvalidConfiguration(msg)
+ raise lib_exc.InvalidConfiguration(msg)
if CONF.identity_feature_enabled.api_v3:
if CONF.identity.uri_v3:
self.token_v3_client = identity.v3.V3TokenClient(
CONF.identity.uri_v3, **self.default_params)
else:
msg = 'Identity v3 API enabled, but no identity.uri_v3 set'
- raise exceptions.InvalidConfiguration(msg)
+ raise lib_exc.InvalidConfiguration(msg)
def _set_volume_clients(self):
# Mandatory parameters (always defined)
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index f9d7a9b..1779252 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -144,6 +144,13 @@
identity_version=identity_version,
name=opts.tag,
network_resources=network_resources,
+ neutron_available=CONF.service_available.neutron,
+ create_networks=CONF.auth.create_isolated_networks,
+ identity_admin_role=CONF.identity.admin_role,
+ identity_admin_domain_scope=CONF.identity.admin_domain_scope,
+ project_network_cidr=CONF.network.project_network_cidr,
+ project_network_mask_bits=CONF.network.project_network_mask_bits,
+ public_network_id=CONF.network.public_network_id,
admin_creds=admin_creds,
**credentials_factory.get_dynamic_provider_params())
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 9758061..32b0ebb 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -18,6 +18,7 @@
from tempest.common import credentials_factory as credentials
from tempest.common import identity
+from tempest.common.utils import net_info
from tempest import config
from tempest import test
@@ -463,7 +464,7 @@
rid = router['id']
ports = [port for port
in ports_client.list_ports(device_id=rid)['ports']
- if port["device_owner"] == "network:router_interface"]
+ if net_info.is_router_interface_port(port)]
for port in ports:
client.remove_router_interface(rid, port_id=port['id'])
client.delete_router(rid)
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index baa36a2..99185d2 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -14,13 +14,13 @@
import os
import shutil
-import subprocess
import sys
from cliff import command
from oslo_config import generator
from oslo_log import log as logging
from six import moves
+from testrepository import commands
from tempest.cmd import workspace
@@ -167,7 +167,8 @@
self.generate_testr_conf(local_dir)
# setup local testr working dir
if not os.path.isdir(testr_dir):
- subprocess.call(['testr', 'init'], cwd=local_dir)
+ commands.run_argv(['testr', 'init', '-d', local_dir], sys.stdin,
+ sys.stdout, sys.stderr)
def take_action(self, parsed_args):
workspace_manager = workspace.WorkspaceManager(
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 236953c..5fa8b74 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -135,6 +135,12 @@
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
+ if not path:
+ sys.exit(
+ "The %r workspace isn't registered in "
+ "%r. Use 'tempest init' to "
+ "register the workspace." %
+ (parsed_args.workspace, workspace_mgr.path))
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index b2e72c5..381f3df 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -286,7 +286,6 @@
'object_storage': 'swift',
'compute': 'nova',
'orchestration': 'heat',
- 'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
}
diff --git a/tempest/cmd/workspace.py b/tempest/cmd/workspace.py
index b36cf4e..3c58648 100644
--- a/tempest/cmd/workspace.py
+++ b/tempest/cmd/workspace.py
@@ -72,7 +72,10 @@
@lockutils.synchronized('workspaces', external=True)
def get_workspace(self, name):
- """Returns the workspace that has the given name"""
+ """Returns the workspace that has the given name
+
+ If the workspace isn't registered then `None` is returned.
+ """
self._populate()
return self.workspaces.get(name)
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index c22afc1..5634958 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -17,8 +17,8 @@
from tempest.common import dynamic_creds
from tempest.common import preprov_creds
from tempest import config
-from tempest import exceptions
from tempest.lib import auth
+from tempest.lib import exceptions
CONF = config.CONF
@@ -80,6 +80,16 @@
network_resources=network_resources,
identity_version=identity_version,
admin_creds=admin_creds,
+ identity_admin_domain_scope=CONF.identity.admin_domain_scope,
+ identity_admin_role=CONF.identity.admin_role,
+ extra_roles=CONF.auth.tempest_roles,
+ neutron_available=CONF.service_available.neutron,
+ project_network_cidr=CONF.network.project_network_cidr,
+ project_network_mask_bits=CONF.network.project_network_mask_bits,
+ public_network_id=CONF.network.public_network_id,
+ create_networks=(CONF.auth.create_isolated_networks and not
+ CONF.baremetal.driver_enabled),
+ resource_prefix=CONF.resources_prefix,
**get_dynamic_provider_params())
else:
if CONF.auth.test_accounts_file:
diff --git a/tempest/common/dynamic_creds.py b/tempest/common/dynamic_creds.py
index b96b1c0..5c12fd8 100644
--- a/tempest/common/dynamic_creds.py
+++ b/tempest/common/dynamic_creds.py
@@ -18,20 +18,22 @@
from tempest import clients
from tempest.common import cred_client
-from tempest.common import cred_provider
-from tempest.common.utils import data_utils
-from tempest import config
-from tempest import exceptions
+from tempest.lib.common import cred_provider
+from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
-CONF = config.CONF
LOG = logging.getLogger(__name__)
class DynamicCredentialProvider(cred_provider.CredentialProvider):
def __init__(self, identity_version, name=None, network_resources=None,
- credentials_domain=None, admin_role=None, admin_creds=None):
+ credentials_domain=None, admin_role=None, admin_creds=None,
+ identity_admin_domain_scope=False,
+ identity_admin_role='admin', extra_roles=None,
+ neutron_available=False, create_networks=True,
+ project_network_cidr=None, project_network_mask_bits=None,
+ public_network_id=None, resource_prefix=None):
"""Creates credentials dynamically for tests
A credential provider that, based on an initial set of
@@ -48,6 +50,23 @@
:param dict network_resources: network resources to be created for
the created credentials
:param Credentials admin_creds: initial admin credentials
+ :param bool identity_admin_domain_scope: Set to true if admin should be
+ scoped to the domain. By
+ default this is False and the
+ admin role is scoped to the
+ project.
+ :param str identity_admin_role: The role name to use for admin
+ :param list extra_roles: A list of strings for extra roles that should
+ be assigned to all created users
+ :param bool neutron_available: Whether we are running in an environemnt
+ with neutron
+ :param bool create_networks: Whether dynamic project networks should be
+ created or not
+ :param project_network_cidr: The CIDR to use for created project
+ networks
+ :param project_network_mask_bits: The network mask bits to use for
+ created project networks
+ :param public_network_id: The id for the public network to use
"""
super(DynamicCredentialProvider, self).__init__(
identity_version=identity_version, admin_role=admin_role,
@@ -56,7 +75,16 @@
self.network_resources = network_resources
self._creds = {}
self.ports = []
+ self.resource_prefix = resource_prefix or ''
+ self.neutron_available = neutron_available
+ self.create_networks = create_networks
+ self.project_network_cidr = project_network_cidr
+ self.project_network_mask_bits = project_network_mask_bits
+ self.public_network_id = public_network_id
self.default_admin_creds = admin_creds
+ self.identity_admin_domain_scope = identity_admin_domain_scope
+ self.identity_admin_role = identity_admin_role or 'admin'
+ self.extra_roles = extra_roles or []
(self.identity_admin_client,
self.tenants_admin_client,
self.users_admin_client,
@@ -98,7 +126,7 @@
else:
# We use a dedicated client manager for identity client in case we
# need a different token scope for them.
- scope = 'domain' if CONF.identity.admin_domain_scope else 'project'
+ scope = 'domain' if self.identity_admin_domain_scope else 'project'
identity_os = clients.Manager(self.default_admin_creds,
scope=scope)
return (identity_os.identity_v3_client,
@@ -124,7 +152,7 @@
"""
root = self.name
- project_name = data_utils.rand_name(root)
+ project_name = data_utils.rand_name(root, prefix=self.resource_prefix)
project_desc = project_name + "-desc"
project = self.creds_client.create_project(
name=project_name, description=project_desc)
@@ -133,7 +161,8 @@
# having the same ID in both makes it easier to match them and debug.
username = project_name
user_password = data_utils.rand_password()
- email = data_utils.rand_name(root) + "@example.com"
+ email = data_utils.rand_name(
+ root, prefix=self.resource_prefix) + "@example.com"
user = self.creds_client.create_user(
username, user_password, project, email)
role_assigned = False
@@ -141,11 +170,11 @@
self.creds_client.assign_user_role(user, project, self.admin_role)
role_assigned = True
if (self.identity_version == 'v3' and
- CONF.identity.admin_domain_scope):
+ self.identity_admin_domain_scope):
self.creds_client.assign_user_role_on_domain(
- user, CONF.identity.admin_role)
+ user, self.identity_admin_role)
# Add roles specified in config file
- for conf_role in CONF.auth.tempest_roles:
+ for conf_role in self.extra_roles:
self.creds_client.assign_user_role(user, project, conf_role)
role_assigned = True
# Add roles requested by caller
@@ -189,26 +218,27 @@
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
- raise exceptions.InvalidConfiguration(
+ raise lib_exc.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
- raise exceptions.InvalidConfiguration(
+ raise lib_exc.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
- raise exceptions.InvalidConfiguration('DHCP requires a subnet')
+ raise lib_exc.InvalidConfiguration('DHCP requires a subnet')
- data_utils.rand_name_root = data_utils.rand_name(self.name)
+ rand_name_root = data_utils.rand_name(
+ self.name, prefix=self.resource_prefix)
if not self.network_resources or self.network_resources['network']:
- network_name = data_utils.rand_name_root + "-network"
+ network_name = rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
- subnet_name = data_utils.rand_name_root + "-subnet"
+ subnet_name = rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
- router_name = data_utils.rand_name_root + "-router"
+ router_name = rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
@@ -234,8 +264,8 @@
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
- base_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- mask_bits = CONF.network.project_network_mask_bits
+ base_cidr = netaddr.IPNetwork(self.project_network_cidr)
+ mask_bits = self.project_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.network_resources:
@@ -264,7 +294,7 @@
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
- network_id=CONF.network.public_network_id)
+ network_id=self.public_network_id)
resp_body = self.routers_admin_client.create_router(
name=router_name,
external_gateway_info=external_net_id,
@@ -288,9 +318,8 @@
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n credentials: %s"
% credentials)
- if (CONF.service_available.neutron and
- not CONF.baremetal.driver_enabled and
- CONF.auth.create_isolated_networks):
+ if (self.neutron_available and
+ self.create_networks):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
credentials.set_resources(network=network, subnet=subnet,
@@ -405,7 +434,7 @@
# "circular dependency". So here just use try...except to
# ensure tenant deletion without big changes.
try:
- if CONF.service_available.neutron:
+ if self.neutron_available:
self._cleanup_default_secgroup(creds.tenant_id)
except lib_exc.NotFound:
LOG.warning("failed to cleanup tenant %s's secgroup" %
diff --git a/tempest/common/preprov_creds.py b/tempest/common/preprov_creds.py
index 5992d24..5e23696 100644
--- a/tempest/common/preprov_creds.py
+++ b/tempest/common/preprov_creds.py
@@ -21,10 +21,10 @@
import yaml
from tempest import clients
-from tempest.common import cred_provider
from tempest.common import fixed_network
from tempest import exceptions
from tempest.lib import auth
+from tempest.lib.common import cred_provider
from tempest.lib import exceptions as lib_exc
LOG = logging.getLogger(__name__)
@@ -35,7 +35,7 @@
with open(path, 'r') as yaml_file:
accounts = yaml.load(yaml_file)
except IOError:
- raise exceptions.InvalidConfiguration(
+ raise lib_exc.InvalidConfiguration(
'The path for the test accounts file: %s '
'could not be found' % path)
return accounts
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 7cb9ebe..9ec217f 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -19,7 +19,6 @@
from oslo_log import log as logging
from tempest import config
-from tempest import exceptions
from tempest.lib.common import ssh
from tempest.lib.common.utils import test_utils
import tempest.lib.exceptions
@@ -218,8 +217,8 @@
supported_clients = ['udhcpc', 'dhclient']
dhcp_client = CONF.scenario.dhcp_client
if dhcp_client not in supported_clients:
- raise exceptions.InvalidConfiguration('%s DHCP client unsupported'
- % dhcp_client)
+ raise tempest.lib.exceptions.InvalidConfiguration(
+ '%s DHCP client unsupported' % dhcp_client)
if dhcp_client == 'udhcpc' and not fixed_ip:
raise ValueError("need to set 'fixed_ip' for udhcpc client")
return getattr(self, '_renew_lease_' + dhcp_client)(fixed_ip=fixed_ip)
diff --git a/tempest/services/volume/v1/json/volumes_client.py b/tempest/common/utils/net_info.py
similarity index 64%
rename from tempest/services/volume/v1/json/volumes_client.py
rename to tempest/common/utils/net_info.py
index 7782043..9b0a083 100644
--- a/tempest/services/volume/v1/json/volumes_client.py
+++ b/tempest/common/utils/net_info.py
@@ -1,4 +1,3 @@
-# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,9 +11,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import re
-from tempest.services.volume.base import base_volumes_client
+RE_OWNER = re.compile('^network:.*router_.*interface.*')
-class VolumesClient(base_volumes_client.BaseVolumesClient):
- """Client class to send CRUD Volume V1 API requests"""
+def _is_owner_router_interface(owner):
+ return bool(RE_OWNER.match(owner))
+
+
+def is_router_interface_port(port):
+ """Based on the port attributes determines is it a router interface."""
+ return _is_owner_router_interface(port['device_owner'])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index fa951b5..c1942d6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -221,7 +221,7 @@
body = client.show_backup(backup_id)['backup']
backup_status = body['status']
if backup_status == 'error' and backup_status != status:
- raise exceptions.VolumeBackupException(backup_id=backup_id)
+ raise lib_exc.VolumeBackupException(backup_id=backup_id)
if int(time.time()) - start >= client.build_timeout:
message = ('Volume backup %s failed to reach %s status '
diff --git a/tempest/config.py b/tempest/config.py
index d29e03a..8ce38f9 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -245,7 +245,7 @@
"projects. If multiple networks are available for a "
"project, this is the network which will be used for "
"creating servers if tempest does not create a network or "
- "s network is not specified elsewhere. It may be used for "
+ "a network is not specified elsewhere. It may be used for "
"ssh validation only if floating IPs are disabled."),
cfg.StrOpt('catalog_type',
default='compute',
@@ -318,7 +318,13 @@
help='A list of enabled compute extensions with a special '
'entry all which indicates every extension is enabled. '
'Each extension should be specified with alias name. '
- 'Empty list indicates all extensions are disabled'),
+ 'Empty list indicates all extensions are disabled',
+ deprecated_for_removal=True,
+ deprecated_reason='The Nova extensions API and mechanism '
+ 'is deprecated. This option will be '
+ 'removed when all releases supported '
+ 'by tempest no longer contain the Nova '
+ 'extensions API and mechanism.'),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
@@ -341,12 +347,10 @@
help="Does the test environment support suspend/resume?"),
cfg.BoolOpt('cold_migration',
default=True,
- help="Does the test environment support cold migration "
- "available?"),
+ help="Does the test environment support cold migration?"),
cfg.BoolOpt('live_migration',
default=True,
- help="Does the test environment support live migration "
- "available?"),
+ help="Does the test environment support live migration?"),
cfg.BoolOpt('metadata_service',
default=True,
help="Does the test environment support metadata service? "
@@ -895,34 +899,6 @@
help="Value must match heat configuration of the same name."),
]
-data_processing_group = cfg.OptGroup(name="data-processing",
- title="Data Processing options")
-
-DataProcessingGroup = [
- cfg.StrOpt('catalog_type',
- default='data-processing',
- deprecated_group="data_processing",
- help="Catalog type of the data processing service."),
- cfg.StrOpt('endpoint_type',
- default='publicURL',
- choices=['public', 'admin', 'internal',
- 'publicURL', 'adminURL', 'internalURL'],
- deprecated_group="data_processing",
- help="The endpoint type to use for the data processing "
- "service."),
-]
-
-
-data_processing_feature_group = cfg.OptGroup(
- name="data-processing-feature-enabled",
- title="Enabled Data Processing features")
-
-DataProcessingFeaturesGroup = [
- cfg.ListOpt('plugins',
- default=["vanilla", "cdh"],
- deprecated_group="data_processing-feature-enabled",
- help="List of enabled data processing plugins")
-]
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
@@ -1169,8 +1145,6 @@
(object_storage_group, ObjectStoreGroup),
(object_storage_feature_group, ObjectStoreFeaturesGroup),
(orchestration_group, OrchestrationGroup),
- (data_processing_group, DataProcessingGroup),
- (data_processing_feature_group, DataProcessingFeaturesGroup),
(stress_group, StressGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
@@ -1236,9 +1210,6 @@
self.object_storage_feature_enabled = _CONF[
'object-storage-feature-enabled']
self.orchestration = _CONF.orchestration
- self.data_processing = _CONF['data-processing']
- self.data_processing_feature_enabled = _CONF[
- 'data-processing-feature-enabled']
self.stress = _CONF.stress
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index da32693..727d54e 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -17,10 +17,6 @@
from tempest.lib import exceptions
-class InvalidConfiguration(exceptions.TempestException):
- message = "Invalid Configuration"
-
-
class InvalidServiceTag(exceptions.TempestException):
message = "Invalid service tag"
diff --git a/tempest/common/cred_provider.py b/tempest/lib/common/cred_provider.py
similarity index 100%
rename from tempest/common/cred_provider.py
rename to tempest/lib/common/cred_provider.py
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index c13f41a..4226cd6 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -36,9 +36,11 @@
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
- channel_timeout=10, look_for_keys=False, key_filename=None):
+ channel_timeout=10, look_for_keys=False, key_filename=None,
+ port=22):
self.host = host
self.username = username
+ self.port = port
self.password = password
if isinstance(pkey, six.string_types):
pkey = paramiko.RSAKey.from_private_key(
@@ -58,17 +60,17 @@
paramiko.AutoAddPolicy())
_start_time = time.time()
if self.pkey is not None:
- LOG.info("Creating ssh connection to '%s' as '%s'"
+ LOG.info("Creating ssh connection to '%s:%d' as '%s'"
" with public key authentication",
- self.host, self.username)
+ self.host, self.port, self.username)
else:
- LOG.info("Creating ssh connection to '%s' as '%s'"
+ LOG.info("Creating ssh connection to '%s:%d' as '%s'"
" with password %s",
- self.host, self.username, str(self.password))
+ self.host, self.port, self.username, str(self.password))
attempts = 0
while True:
try:
- ssh.connect(self.host, username=self.username,
+ ssh.connect(self.host, port=self.port, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index a5c6b1b..a6c01bb 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -149,6 +149,10 @@
message = "Unexpected response code received"
+class InvalidConfiguration(TempestException):
+ message = "Invalid Configuration"
+
+
class InvalidIdentityVersion(TempestException):
message = "Invalid version %(identity_version)s of the identity service"
diff --git a/tempest/lib/services/image/v2/namespaces_client.py b/tempest/lib/services/image/v2/namespaces_client.py
index c92ff3a..359ff1d 100644
--- a/tempest/lib/services/image/v2/namespaces_client.py
+++ b/tempest/lib/services/image/v2/namespaces_client.py
@@ -34,6 +34,18 @@
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+ def list_namespaces(self):
+ """List namespaces
+
+ Available params: see http://developer.openstack.org/
+ api-ref/image/v2/metadefs-index.html#list-namespaces
+ """
+ url = 'metadefs/namespaces'
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
def show_namespace(self, namespace):
"""Show namespace details.
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
similarity index 98%
rename from tempest/services/volume/base/base_volumes_client.py
rename to tempest/lib/services/volume/v1/volumes_client.py
index 1cb1ef5..3df8da4 100644
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -21,11 +21,9 @@
from tempest.lib import exceptions as lib_exc
-class BaseVolumesClient(rest_client.RestClient):
+class VolumesClient(rest_client.RestClient):
"""Base client class to send CRUD Volume API requests"""
- create_resp = 200
-
def _prepare_params(self, params):
"""Prepares params for use in get or _ext_get methods.
@@ -69,7 +67,7 @@
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
body = json.loads(body)
- self.expected_success(self.create_resp, resp.status)
+ self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume(self, volume_id, **kwargs):
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/lib/services/volume/v2/volumes_client.py
similarity index 84%
copy from tempest/services/volume/base/base_volumes_client.py
copy to tempest/lib/services/volume/v2/volumes_client.py
index 1cb1ef5..b1930e1 100644
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/lib/services/volume/v2/volumes_client.py
@@ -21,10 +21,9 @@
from tempest.lib import exceptions as lib_exc
-class BaseVolumesClient(rest_client.RestClient):
- """Base client class to send CRUD Volume API requests"""
-
- create_resp = 200
+class VolumesClient(rest_client.RestClient):
+ """Client class to send CRUD Volume V2 API requests"""
+ api_version = "v2"
def _prepare_params(self, params):
"""Prepares params for use in get or _ext_get methods.
@@ -69,7 +68,7 @@
post_body = json.dumps({'volume': kwargs})
resp, body = self.post('volumes', post_body)
body = json.loads(body)
- self.expected_success(self.create_resp, resp.status)
+ self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_volume(self, volume_id, **kwargs):
@@ -293,3 +292,49 @@
post_body = json.dumps({'os-retype': kwargs})
resp, body = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
+
+ def update_volume_image_metadata(self, volume_id, **kwargs):
+ """Update image metadata for the volume.
+
+ Available params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html
+ #setVolumeimagemetadata
+ """
+ post_body = json.dumps({'os-set_image_metadata': {'metadata': kwargs}})
+ url = "volumes/%s/action" % (volume_id)
+ resp, body = self.post(url, post_body)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_volume_image_metadata(self, volume_id, key_name):
+ """Delete image metadata item for the volume."""
+ post_body = json.dumps({'os-unset_image_metadata': {'key': key_name}})
+ url = "volumes/%s/action" % (volume_id)
+ resp, body = self.post(url, post_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_pools(self, detail=False):
+ # List all the volumes pools (hosts)
+ url = 'scheduler-stats/get_pools'
+ if detail:
+ url += '?detail=True'
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_backend_capabilities(self, host):
+ """Shows capabilities for a storage back end.
+
+ Output params: see http://developer.openstack.org/
+ api-ref-blockstorage-v2.html
+ #showBackendCapabilities
+ """
+ url = 'capabilities/%s' % host
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 283fba5..3a808ce 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -57,7 +57,7 @@
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.manager.image_client_v2
else:
- raise exceptions.InvalidConfiguration(
+ raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
@@ -622,13 +622,24 @@
# method is creating the floating IP there.
return self.create_floating_ip(server)['ip']
elif CONF.validation.connect_method == 'fixed':
- addresses = server['addresses'][CONF.validation.network_for_ssh]
+ # Determine the network name to look for based on config or creds
+ # provider network resources.
+ if CONF.validation.network_for_ssh:
+ addresses = server['addresses'][
+ CONF.validation.network_for_ssh]
+ else:
+ creds_provider = self._get_credentials_provider()
+ net_creds = creds_provider.get_primary_creds()
+ network = getattr(net_creds, 'network', None)
+ addresses = (server['addresses'][network['name']]
+ if network else [])
for address in addresses:
- if address['version'] == CONF.validation.ip_version_for_ssh:
+ if (address['version'] == CONF.validation.ip_version_for_ssh
+ and address['OS-EXT-IPS:type'] == 'fixed'):
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
- raise exceptions.InvalidConfiguration()
+ raise lib_exc.InvalidConfiguration()
class NetworkScenarioTest(ScenarioTest):
@@ -1166,7 +1177,7 @@
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
- raise exceptions.InvalidConfiguration(m)
+ raise lib_exc.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 2c16be8..32f5d9f 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -17,6 +17,7 @@
from tempest import clients
from tempest.common.utils import data_utils
+from tempest.common.utils import net_info
from tempest import config
from tempest.scenario import manager
from tempest import test
@@ -247,16 +248,10 @@
myport = (tenant.router['id'], tenant.subnet['id'])
router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
in self._list_ports()
- if self._is_router_port(i)]
+ if net_info.is_router_interface_port(i)]
self.assertIn(myport, router_ports)
- def _is_router_port(self, port):
- """Return True if port is a router interface."""
- # NOTE(armando-migliaccio): match device owner for both centralized
- # and distributed routers; 'device_owner' is "" by default.
- return port['device_owner'].startswith('network:router_interface')
-
def _create_server(self, name, tenant, security_groups, **kwargs):
"""Creates a server and assigns it to security group.
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index b323d2a..333079c 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -15,7 +15,7 @@
from tempest import config
-from tempest import exceptions
+from tempest.lib import exceptions
from tempest.scenario import manager
from tempest import test
diff --git a/tempest/services/baremetal/v1/json/baremetal_client.py b/tempest/services/baremetal/v1/json/baremetal_client.py
index ede0d90..7405871 100644
--- a/tempest/services/baremetal/v1/json/baremetal_client.py
+++ b/tempest/services/baremetal/v1/json/baremetal_client.py
@@ -84,7 +84,7 @@
def show_node_by_instance_uuid(self, instance_uuid):
"""Gets a node associated with given instance uuid.
- :param uuid: Unique identifier of the node in UUID format.
+ :param instance_uuid: Unique identifier of the instance in UUID format.
:return: Serialized node as a dictionary.
"""
@@ -138,6 +138,7 @@
def create_node(self, chassis_id=None, **kwargs):
"""Create a baremetal node with the specified parameters.
+ :param chassis_id: The unique identifier of the chassis.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
@@ -269,7 +270,7 @@
"""Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
- :state: desired state to set (on/off/reboot).
+ :param state: desired state to set (on/off/reboot).
"""
target = {'target': state}
@@ -280,7 +281,7 @@
def validate_driver_interface(self, node_uuid):
"""Get all driver interfaces of a specific node.
- :param uuid: Unique identifier of the node in UUID format.
+ :param node_uuid: Unique identifier of the node in UUID format.
"""
diff --git a/tempest/services/data_processing/__init__.py b/tempest/services/data_processing/__init__.py
deleted file mode 100644
index c49bc5c..0000000
--- a/tempest/services/data_processing/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.services.data_processing.v1_1.data_processing_client import \
- DataProcessingClient
-
-__all__ = ['DataProcessingClient']
diff --git a/tempest/services/data_processing/v1_1/__init__.py b/tempest/services/data_processing/v1_1/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/data_processing/v1_1/__init__.py
+++ /dev/null
diff --git a/tempest/services/data_processing/v1_1/data_processing_client.py b/tempest/services/data_processing/v1_1/data_processing_client.py
deleted file mode 100644
index c74672f..0000000
--- a/tempest/services/data_processing/v1_1/data_processing_client.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (c) 2013 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class DataProcessingClient(rest_client.RestClient):
-
- def _request_and_check_resp(self, request_func, uri, resp_status):
- """Make a request and check response status code.
-
- It returns a ResponseBody.
- """
- resp, body = request_func(uri)
- self.expected_success(resp_status, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def _request_and_check_resp_data(self, request_func, uri, resp_status):
- """Make a request and check response status code.
-
- It returns pair: resp and response data.
- """
- resp, body = request_func(uri)
- self.expected_success(resp_status, resp.status)
- return resp, body
-
- def _request_check_and_parse_resp(self, request_func, uri,
- resp_status, *args, **kwargs):
- """Make a request, check response status code and parse response body.
-
- It returns a ResponseBody.
- """
- headers = {'Content-Type': 'application/json'}
- resp, body = request_func(uri, headers=headers, *args, **kwargs)
- self.expected_success(resp_status, resp.status)
- body = json.loads(body)
- return rest_client.ResponseBody(resp, body)
-
- def list_node_group_templates(self):
- """List all node group templates for a user."""
-
- uri = 'node-group-templates'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_node_group_template(self, tmpl_id):
- """Returns the details of a single node group template."""
-
- uri = 'node-group-templates/%s' % tmpl_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_node_group_template(self, name, plugin_name, hadoop_version,
- node_processes, flavor_id,
- node_configs=None, **kwargs):
- """Creates node group template with specified params.
-
- It supports passing additional params using kwargs and returns created
- object.
- """
- uri = 'node-group-templates'
- body = kwargs.copy()
- body.update({
- 'name': name,
- 'plugin_name': plugin_name,
- 'hadoop_version': hadoop_version,
- 'node_processes': node_processes,
- 'flavor_id': flavor_id,
- 'node_configs': node_configs or dict(),
- })
- return self._request_check_and_parse_resp(self.post, uri, 202,
- body=json.dumps(body))
-
- def delete_node_group_template(self, tmpl_id):
- """Deletes the specified node group template by id."""
-
- uri = 'node-group-templates/%s' % tmpl_id
- return self._request_and_check_resp(self.delete, uri, 204)
-
- def list_plugins(self):
- """List all enabled plugins."""
-
- uri = 'plugins'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_plugin(self, plugin_name, plugin_version=None):
- """Returns the details of a single plugin."""
-
- uri = 'plugins/%s' % plugin_name
- if plugin_version:
- uri += '/%s' % plugin_version
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def list_cluster_templates(self):
- """List all cluster templates for a user."""
-
- uri = 'cluster-templates'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_cluster_template(self, tmpl_id):
- """Returns the details of a single cluster template."""
-
- uri = 'cluster-templates/%s' % tmpl_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_cluster_template(self, name, plugin_name, hadoop_version,
- node_groups, cluster_configs=None,
- **kwargs):
- """Creates cluster template with specified params.
-
- It supports passing additional params using kwargs and returns created
- object.
- """
- uri = 'cluster-templates'
- body = kwargs.copy()
- body.update({
- 'name': name,
- 'plugin_name': plugin_name,
- 'hadoop_version': hadoop_version,
- 'node_groups': node_groups,
- 'cluster_configs': cluster_configs or dict(),
- })
- return self._request_check_and_parse_resp(self.post, uri, 202,
- body=json.dumps(body))
-
- def delete_cluster_template(self, tmpl_id):
- """Deletes the specified cluster template by id."""
-
- uri = 'cluster-templates/%s' % tmpl_id
- return self._request_and_check_resp(self.delete, uri, 204)
-
- def list_data_sources(self):
- """List all data sources for a user."""
-
- uri = 'data-sources'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_data_source(self, source_id):
- """Returns the details of a single data source."""
-
- uri = 'data-sources/%s' % source_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_data_source(self, name, data_source_type, url, **kwargs):
- """Creates data source with specified params.
-
- It supports passing additional params using kwargs and returns created
- object.
- """
- uri = 'data-sources'
- body = kwargs.copy()
- body.update({
- 'name': name,
- 'type': data_source_type,
- 'url': url
- })
- return self._request_check_and_parse_resp(self.post, uri,
- 202, body=json.dumps(body))
-
- def delete_data_source(self, source_id):
- """Deletes the specified data source by id."""
-
- uri = 'data-sources/%s' % source_id
- return self._request_and_check_resp(self.delete, uri, 204)
-
- def list_job_binary_internals(self):
- """List all job binary internals for a user."""
-
- uri = 'job-binary-internals'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_job_binary_internal(self, job_binary_id):
- """Returns the details of a single job binary internal."""
-
- uri = 'job-binary-internals/%s' % job_binary_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_job_binary_internal(self, name, data):
- """Creates job binary internal with specified params."""
-
- uri = 'job-binary-internals/%s' % name
- return self._request_check_and_parse_resp(self.put, uri, 202, data)
-
- def delete_job_binary_internal(self, job_binary_id):
- """Deletes the specified job binary internal by id."""
-
- uri = 'job-binary-internals/%s' % job_binary_id
- return self._request_and_check_resp(self.delete, uri, 204)
-
- def get_job_binary_internal_data(self, job_binary_id):
- """Returns data of a single job binary internal."""
-
- uri = 'job-binary-internals/%s/data' % job_binary_id
- return self._request_and_check_resp_data(self.get, uri, 200)
-
- def list_job_binaries(self):
- """List all job binaries for a user."""
-
- uri = 'job-binaries'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_job_binary(self, job_binary_id):
- """Returns the details of a single job binary."""
-
- uri = 'job-binaries/%s' % job_binary_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_job_binary(self, name, url, extra=None, **kwargs):
- """Creates job binary with specified params.
-
- It supports passing additional params using kwargs and returns created
- object.
- """
- uri = 'job-binaries'
- body = kwargs.copy()
- body.update({
- 'name': name,
- 'url': url,
- 'extra': extra or dict(),
- })
- return self._request_check_and_parse_resp(self.post, uri,
- 202, body=json.dumps(body))
-
- def delete_job_binary(self, job_binary_id):
- """Deletes the specified job binary by id."""
-
- uri = 'job-binaries/%s' % job_binary_id
- return self._request_and_check_resp(self.delete, uri, 204)
-
- def get_job_binary_data(self, job_binary_id):
- """Returns data of a single job binary."""
-
- uri = 'job-binaries/%s/data' % job_binary_id
- return self._request_and_check_resp_data(self.get, uri, 200)
-
- def list_jobs(self):
- """List all jobs for a user."""
-
- uri = 'jobs'
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def get_job(self, job_id):
- """Returns the details of a single job."""
-
- uri = 'jobs/%s' % job_id
- return self._request_check_and_parse_resp(self.get, uri, 200)
-
- def create_job(self, name, job_type, mains, libs=None, **kwargs):
- """Creates job with specified params.
-
- It supports passing additional params using kwargs and returns created
- object.
- """
- uri = 'jobs'
- body = kwargs.copy()
- body.update({
- 'name': name,
- 'type': job_type,
- 'mains': mains,
- 'libs': libs or list(),
- })
- return self._request_check_and_parse_resp(self.post, uri,
- 202, body=json.dumps(body))
-
- def delete_job(self, job_id):
- """Deletes the specified job by id."""
-
- uri = 'jobs/%s' % job_id
- return self._request_and_check_resp(self.delete, uri, 204)
diff --git a/tempest/services/volume/v1/__init__.py b/tempest/services/volume/v1/__init__.py
index 376ab72..7fb3ed3 100644
--- a/tempest/services/volume/v1/__init__.py
+++ b/tempest/services/volume/v1/__init__.py
@@ -24,7 +24,7 @@
from tempest.lib.services.volume.v1.services_client import ServicesClient
from tempest.lib.services.volume.v1.snapshots_client import SnapshotsClient
from tempest.lib.services.volume.v1.types_client import TypesClient
-from tempest.services.volume.v1.json.volumes_client import VolumesClient
+from tempest.lib.services.volume.v1.volumes_client import VolumesClient
__all__ = ['AvailabilityZoneClient', 'EncryptionTypesClient',
'ExtensionsClient', 'HostsClient', 'QuotasClient',
diff --git a/tempest/services/volume/v2/__init__.py b/tempest/services/volume/v2/__init__.py
index 5774977..8edaf2a 100644
--- a/tempest/services/volume/v2/__init__.py
+++ b/tempest/services/volume/v2/__init__.py
@@ -24,7 +24,7 @@
from tempest.lib.services.volume.v2.services_client import ServicesClient
from tempest.lib.services.volume.v2.snapshots_client import SnapshotsClient
from tempest.lib.services.volume.v2.types_client import TypesClient
-from tempest.services.volume.v2.json.volumes_client import VolumesClient
+from tempest.lib.services.volume.v2.volumes_client import VolumesClient
__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'EncryptionTypesClient',
'ExtensionsClient', 'HostsClient', 'QosSpecsClient', 'QuotasClient',
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
deleted file mode 100644
index f21a1a3..0000000
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.services.volume.base import base_volumes_client
-
-
-class VolumesClient(base_volumes_client.BaseVolumesClient):
- """Client class to send CRUD Volume V2 API requests"""
- api_version = "v2"
- create_resp = 202
-
- def update_volume_image_metadata(self, volume_id, **kwargs):
- """Update image metadata for the volume.
-
- Available params: see http://developer.openstack.org/
- api-ref-blockstorage-v2.html
- #setVolumeimagemetadata
- """
- post_body = json.dumps({'os-set_image_metadata': {'metadata': kwargs}})
- url = "volumes/%s/action" % (volume_id)
- resp, body = self.post(url, post_body)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def delete_volume_image_metadata(self, volume_id, key_name):
- """Delete image metadata item for the volume."""
- post_body = json.dumps({'os-unset_image_metadata': {'key': key_name}})
- url = "volumes/%s/action" % (volume_id)
- resp, body = self.post(url, post_body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def show_pools(self, detail=False):
- # List all the volumes pools (hosts)
- url = 'scheduler-stats/get_pools'
- if detail:
- url += '?detail=True'
-
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
-
- def show_backend_capabilities(self, host):
- """Shows capabilities for a storage back end.
-
- Output params: see http://developer.openstack.org/
- api-ref-blockstorage-v2.html
- #showBackendCapabilities
- """
- url = 'capabilities/%s' % host
- resp, body = self.get(url)
- body = json.loads(body)
- self.expected_success(200, resp.status)
- return rest_client.ResponseBody(resp, body)
diff --git a/tempest/test.py b/tempest/test.py
index 609f1f6..6dc065c 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -16,28 +16,21 @@
import atexit
import functools
import os
-import re
import sys
import debtcollector.moves
import fixtures
from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
-from oslo_utils import importutils
import six
-from six.moves import urllib
-import testscenarios
import testtools
from tempest import clients
from tempest.common import cred_client
from tempest.common import credentials_factory as credentials
from tempest.common import fixed_network
-import tempest.common.generator.valid_generator as valid
import tempest.common.validation_resources as vresources
from tempest import config
from tempest import exceptions
-from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -649,224 +642,6 @@
self.assertTrue(len(list) > 0, msg)
-class NegativeAutoTest(BaseTestCase):
-
- _resources = {}
-
- @classmethod
- def setUpClass(cls):
- super(NegativeAutoTest, cls).setUpClass()
- os = cls.get_client_manager(credential_type='primary')
- cls.client = os.negative_client
-
- @staticmethod
- def load_tests(*args):
- """Wrapper for testscenarios
-
- To set the mandatory scenarios variable only in case a real test
- loader is in place. Will be automatically called in case the variable
- "load_tests" is set.
- """
- if getattr(args[0], 'suiteClass', None) is not None:
- loader, standard_tests, pattern = args
- else:
- standard_tests, module, loader = args
- for test in testtools.iterate_tests(standard_tests):
- schema = getattr(test, '_schema', None)
- if schema is not None:
- setattr(test, 'scenarios',
- NegativeAutoTest.generate_scenario(schema))
- return testscenarios.load_tests_apply_scenarios(*args)
-
- @staticmethod
- def generate_scenario(description):
- """Generates the test scenario list for a given description.
-
- :param description: A file or dictionary with the following entries:
- name (required) name for the api
- http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
- url (required) the url to be appended to the catalog url with '%s'
- for each resource mentioned
- resources: (optional) A list of resource names such as "server",
- "flavor", etc. with an element for each '%s' in the url. This
- method will call self.get_resource for each element when
- constructing the positive test case template so negative
- subclasses are expected to return valid resource ids when
- appropriate.
- json-schema (optional) A valid json schema that will be used to
- create invalid data for the api calls. For "GET" and "HEAD",
- the data is used to generate query strings appended to the url,
- otherwise for the body of the http call.
- """
- LOG.debug(description)
- generator = importutils.import_class(
- CONF.negative.test_generator)()
- generator.validate_schema(description)
- schema = description.get("json-schema", None)
- resources = description.get("resources", [])
- scenario_list = []
- expected_result = None
- for resource in resources:
- if isinstance(resource, dict):
- expected_result = resource['expected_result']
- resource = resource['name']
- LOG.debug("Add resource to test %s" % resource)
- scn_name = "inv_res_%s" % (resource)
- scenario_list.append((scn_name, {
- "resource": (resource, data_utils.rand_uuid()),
- "expected_result": expected_result
- }))
- if schema is not None:
- for scenario in generator.generate_scenarios(schema):
- scenario_list.append((scenario['_negtest_name'],
- scenario))
- LOG.debug(scenario_list)
- return scenario_list
-
- def execute(self, description):
- """Execute a http call
-
- Execute a http call on an api that are expected to
- result in client errors. First it uses invalid resources that are part
- of the url, and then invalid data for queries and http request bodies.
-
- :param description: A json file or dictionary with the following
- entries:
- name (required) name for the api
- http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
- url (required) the url to be appended to the catalog url with '%s'
- for each resource mentioned
- resources: (optional) A list of resource names such as "server",
- "flavor", etc. with an element for each '%s' in the url. This
- method will call self.get_resource for each element when
- constructing the positive test case template so negative
- subclasses are expected to return valid resource ids when
- appropriate.
- json-schema (optional) A valid json schema that will be used to
- create invalid data for the api calls. For "GET" and "HEAD",
- the data is used to generate query strings appended to the url,
- otherwise for the body of the http call.
-
- """
- LOG.info("Executing %s" % description["name"])
- LOG.debug(description)
- generator = importutils.import_class(
- CONF.negative.test_generator)()
- schema = description.get("json-schema", None)
- method = description["http-method"]
- url = description["url"]
- expected_result = None
- if "default_result_code" in description:
- expected_result = description["default_result_code"]
-
- resources = [self.get_resource(r) for
- r in description.get("resources", [])]
-
- if hasattr(self, "resource"):
- # Note(mkoderer): The resources list already contains an invalid
- # entry (see get_resource).
- # We just send a valid json-schema with it
- valid_schema = None
- if schema:
- valid_schema = \
- valid.ValidTestGenerator().generate_valid(schema)
- new_url, body = self._http_arguments(valid_schema, url, method)
- elif hasattr(self, "_negtest_name"):
- schema_under_test = \
- valid.ValidTestGenerator().generate_valid(schema)
- local_expected_result = \
- generator.generate_payload(self, schema_under_test)
- if local_expected_result is not None:
- expected_result = local_expected_result
- new_url, body = \
- self._http_arguments(schema_under_test, url, method)
- else:
- raise Exception("testscenarios are not active. Please make sure "
- "that your test runner supports the load_tests "
- "mechanism")
-
- if "admin_client" in description and description["admin_client"]:
- if not credentials.is_admin_available(
- identity_version=self.get_identity_version()):
- msg = ("Missing Identity Admin API credentials in"
- "configuration.")
- raise self.skipException(msg)
- creds = self.credentials_provider.get_admin_creds()
- os_adm = clients.Manager(credentials=creds)
- client = os_adm.negative_client
- else:
- client = self.client
- resp, resp_body = client.send_request(method, new_url,
- resources, body=body)
- self._check_negative_response(expected_result, resp.status, resp_body)
-
- def _http_arguments(self, json_dict, url, method):
- LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
- if not json_dict:
- return url, None
- elif method in ["GET", "HEAD", "PUT", "DELETE"]:
- return "%s?%s" % (url, urllib.parse.urlencode(json_dict)), None
- else:
- return url, json.dumps(json_dict)
-
- def _check_negative_response(self, expected_result, result, body):
- self.assertTrue(result >= 400 and result < 500 and result != 413,
- "Expected client error, got %s:%s" %
- (result, body))
- self.assertTrue(expected_result is None or expected_result == result,
- "Expected %s, got %s:%s" %
- (expected_result, result, body))
-
- @classmethod
- def set_resource(cls, name, resource):
- """Register a resource for a test
-
- This function can be used in setUpClass context to register a resource
- for a test.
-
- :param name: The name of the kind of resource such as "flavor", "role",
- etc.
- :resource: The id of the resource
- """
- cls._resources[name] = resource
-
- def get_resource(self, name):
- """Return a valid uuid for a type of resource.
-
- If a real resource is needed as part of a url then this method should
- return one. Otherwise it can return None.
-
- :param name: The name of the kind of resource such as "flavor", "role",
- etc.
- """
- if isinstance(name, dict):
- name = name['name']
- if hasattr(self, "resource") and self.resource[0] == name:
- LOG.debug("Return invalid resource (%s) value: %s" %
- (self.resource[0], self.resource[1]))
- return self.resource[1]
- if name in self._resources:
- return self._resources[name]
- return None
-
-
-def SimpleNegativeAutoTest(klass):
- """This decorator registers a test function on basis of the class name."""
- @attr(type=['negative'])
- def generic_test(self):
- if hasattr(self, '_schema'):
- self.execute(self._schema)
-
- cn = klass.__name__
- cn = cn.replace('JSON', '')
- cn = cn.replace('Test', '')
- # NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
- lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
- func_name = 'test_%s' % lower_cn
- setattr(klass, func_name, generic_test)
- return klass
-
-
call_until_true = debtcollector.moves.moved_function(
test_utils.call_until_true, 'call_until_true', __name__,
version='Newton', removal_version='Ocata')
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 772391f..7ac347d 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -18,6 +18,7 @@
import subprocess
import tempfile
+import fixtures
import mock
from tempest.cmd import run
@@ -122,3 +123,32 @@
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
self.assertRunExit(['tempest', 'run'], 1)
+
+
+class TestTakeAction(base.TestCase):
+ def test_workspace_not_registered(self):
+ class Exception_(Exception):
+ pass
+
+ m_exit = self.useFixture(fixtures.MockPatch('sys.exit')).mock
+ # sys.exit must not continue (or exit)
+ m_exit.side_effect = Exception_
+
+ workspace = self.getUniqueString()
+
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.config_file = []
+
+ # Override $HOME so that empty workspace gets created in temp dir.
+ self.useFixture(fixtures.TempHomeDir())
+
+ # Force use of the temporary home directory.
+ parsed_args.workspace_path = None
+
+ # Simulate --workspace argument.
+ parsed_args.workspace = workspace
+
+ self.assertRaises(Exception_, tempest_run.take_action, parsed_args)
+ exit_msg = m_exit.call_args[0][0]
+ self.assertIn(workspace, exit_msg)
diff --git a/tempest/tests/common/test_dynamic_creds.py b/tempest/tests/common/test_dynamic_creds.py
index 0033d4e..a90ca8a 100644
--- a/tempest/tests/common/test_dynamic_creds.py
+++ b/tempest/tests/common/test_dynamic_creds.py
@@ -19,7 +19,6 @@
from tempest.common import credentials_factory as credentials
from tempest.common import dynamic_creds
from tempest import config
-from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.identity.v2 import identity_client as v2_iden_client
@@ -176,7 +175,6 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_primary_creds(self, MockRestClient):
- cfg.CONF.set_default('neutron', False, 'service_available')
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
@@ -191,7 +189,6 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
- cfg.CONF.set_default('neutron', False, 'service_available')
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_roles('1234', 'admin')
self._mock_user_create('1234', 'fake_admin_user')
@@ -214,7 +211,6 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_role_creds(self, MockRestClient):
- cfg.CONF.set_default('neutron', False, 'service_available')
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_2_roles()
self._mock_user_create('1234', 'fake_role_user')
@@ -243,7 +239,6 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_all_cred_cleanup(self, MockRestClient):
- cfg.CONF.set_default('neutron', False, 'service_available')
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
@@ -281,7 +276,6 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_alt_creds(self, MockRestClient):
- cfg.CONF.set_default('neutron', False, 'service_available')
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
@@ -296,8 +290,10 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_no_network_creation_with_config_set(self, MockRestClient):
- cfg.CONF.set_default('create_isolated_networks', False, group='auth')
- creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True, create_networks=False,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
+ **self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
@@ -325,7 +321,10 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_creation(self, MockRestClient):
- creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
+ **self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
@@ -356,7 +355,10 @@
"description": args['name'],
"security_group_rules": [],
"id": "sg-%s" % args['tenant_id']}]}
- creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
+ **self.fixed_params)
# Create primary tenant and network
self._mock_assign_user_role()
self._mock_list_role()
@@ -460,7 +462,10 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_alt_creation(self, MockRestClient):
- creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
+ **self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
@@ -485,7 +490,10 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_admin_creation(self, MockRestClient):
- creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
+ **self.fixed_params)
self._mock_assign_user_role()
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
@@ -517,6 +525,8 @@
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
@@ -553,13 +563,15 @@
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
- self.assertRaises(exceptions.InvalidConfiguration,
+ self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
@@ -571,13 +583,15 @@
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
- self.assertRaises(exceptions.InvalidConfiguration,
+ self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
@@ -589,13 +603,15 @@
'dhcp': True,
}
creds = dynamic_creds.DynamicCredentialProvider(
+ neutron_available=True,
+ project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
- self.assertRaises(exceptions.InvalidConfiguration,
+ self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
diff --git a/tempest/tests/common/test_preprov_creds.py b/tempest/tests/common/test_preprov_creds.py
index 13d4713..f824b6c 100644
--- a/tempest/tests/common/test_preprov_creds.py
+++ b/tempest/tests/common/test_preprov_creds.py
@@ -23,10 +23,10 @@
import shutil
import six
-from tempest.common import cred_provider
from tempest.common import preprov_creds
from tempest import config
from tempest.lib import auth
+from tempest.lib.common import cred_provider
from tempest.lib import exceptions as lib_exc
from tempest.tests import base
from tempest.tests import fake_config
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index a56f837..a826337 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -18,7 +18,7 @@
from tempest.common import waiters
from tempest import exceptions
-from tempest.services.volume.base import base_volumes_client
+from tempest.lib.services.volume.v2 import volumes_client
from tempest.tests import base
import tempest.tests.utils as utils
@@ -57,7 +57,7 @@
def test_wait_for_volume_status_error_restoring(self, mock_sleep):
# Tests that the wait method raises VolumeRestoreErrorException if
# the volume status is 'error_restoring'.
- client = mock.Mock(spec=base_volumes_client.BaseVolumesClient,
+ client = mock.Mock(spec=volumes_client.VolumesClient,
build_interval=1)
volume1 = {'volume': {'status': 'restoring-backup'}}
volume2 = {'volume': {'status': 'error_restoring'}}
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index b07f6bc..8a0a84c 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -69,6 +69,7 @@
mock.sentinel.aa)
expected_connect = [mock.call(
'localhost',
+ port=22,
username='root',
pkey=None,
key_filename=None,
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
deleted file mode 100644
index 44ce567..0000000
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest import config
-import tempest.test as test
-from tempest.tests import base
-from tempest.tests import fake_config
-
-
-class TestNegativeAutoTest(base.TestCase):
- # Fake entries
- _service = 'compute'
-
- fake_input_desc = {"name": "list-flavors-with-detail",
- "http-method": "GET",
- "url": "flavors/detail",
- "json-schema": {"type": "object",
- "properties":
- {"minRam": {"type": "integer"},
- "minDisk": {"type": "integer"}}
- },
- "resources": ["flavor", "volume", "image"]
- }
-
- def setUp(self):
- super(TestNegativeAutoTest, self).setUp()
- self.useFixture(fake_config.ConfigFixture())
- self.patchobject(config, 'TempestConfigPrivate',
- fake_config.FakePrivate)
-
- def _check_prop_entries(self, result, entry):
- entries = [a for a in result if entry in a[0]]
- self.assertIsNotNone(entries)
- self.assertGreater(len(entries), 1)
- for entry in entries:
- self.assertIsNotNone(entry[1]['_negtest_name'])
-
- def _check_resource_entries(self, result, entry):
- entries = [a for a in result if entry in a[0]]
- self.assertIsNotNone(entries)
- self.assertIs(len(entries), 3)
- for entry in entries:
- self.assertIsNotNone(entry[1]['resource'])
-
- def test_generate_scenario(self):
- scenarios = test.NegativeAutoTest.\
- generate_scenario(self.fake_input_desc)
- self.assertIsInstance(scenarios, list)
- for scenario in scenarios:
- self.assertIsInstance(scenario, tuple)
- self.assertIsInstance(scenario[0], str)
- self.assertIsInstance(scenario[1], dict)
- self._check_prop_entries(scenarios, "minRam")
- self._check_prop_entries(scenarios, "minDisk")
- self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 8c5d861..17dbea0 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from oslo_config import cfg
from oslotest import mockpatch
import testtools
@@ -232,22 +231,6 @@
service='bad_service')
-class TestSimpleNegativeDecorator(BaseDecoratorsTest):
- @test.SimpleNegativeAutoTest
- class FakeNegativeJSONTest(test.NegativeAutoTest):
- _schema = {}
-
- def test_testfunc_exist(self):
- self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
-
- @mock.patch('tempest.test.NegativeAutoTest.execute')
- def test_testfunc_calls_execute(self, mock):
- obj = self.FakeNegativeJSONTest("test_fake_negative")
- self.assertIn("test_fake_negative", dir(obj))
- obj.test_fake_negative()
- mock.assert_called_once_with(self.FakeNegativeJSONTest._schema)
-
-
class TestConfigDecorators(BaseDecoratorsTest):
def setUp(self):
super(TestConfigDecorators, self).setUp()