Merge "test update volume type extra specs for non existent volume type"
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 4788362..568077e 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -15,5 +15,18 @@
# IPv6 only env for example Devstack IPv6 settings and services listen
# address is IPv6 etc. This is invoked before tests are run so that we can
# fail early if anything missing the IPv6 settings or deployments.
- - ipv6-only-deployments-verification
- - run-tempest
+ - devstack-ipv6-only-deployments-verification
+ tasks:
+ - name: Run Tempest version <= 26.0.0
+ include_role:
+ name: run-tempest-26
+ when:
+ - zuul.branch is defined
+ - zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+ - name: Run Tempest
+ include_role:
+ name: run-tempest
+ when:
+ - zuul.branch is defined
+ - zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 3b969f2..269999c 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -29,9 +29,17 @@
(run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
(run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
+ - name: Run Tempest version <= 26.0.0
+ include_role:
+ name: run-tempest-26
+ when: (zuul.branch is defined and zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]) or
+ (zuul.override_checkout is defined and zuul.override_checkout in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
+
- name: Run Tempest
include_role:
name: run-tempest
+ when: (zuul.branch is defined and zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"] and zuul.override_checkout is not defined) or
+ (zuul.override_checkout is defined and zuul.override_checkout not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
- name: Run tempest cleanup dry-run
include_role:
diff --git a/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
new file mode 100644
index 0000000..8069bd3
--- /dev/null
+++ b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Adding new config option for volume tests which allows to specify the size
+ a volume will be extended by (if a test does extend a volume or needs
+ a new bigger volume). The option is beneficial in case such tests are
+ executed on systems where the chunk size (the minimum size a volume can be
+ extended by) is other than 1 (originally hardcoded in the tests).:
+
+ CONF.volume.volume_size_extend
diff --git a/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
new file mode 100644
index 0000000..da58ba3
--- /dev/null
+++ b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Add "QoS bandwidth limit rules" APIs to:
+ "tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py" module.
+
+ * List bandwidth limit rules for QoS policy
+ * Create bandwidth limit rule
+ * Show bandwidth limit rule details
+ * Update bandwidth limit rule
+ * Delete bandwidth limit rule
\ No newline at end of file
diff --git a/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
new file mode 100644
index 0000000..4b31ff8
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ Application credentials are supported by Keystone since Queens.
+ As Tempest currently supports only much newer OpenStack versions
+ (Ussuri or later), the configuration option which enables
+ application credentials testing
+ (``CONF.identity-feature-enabled.application_credentials``)
+ is now enabled by default.
+deprecations:
+ - |
+ Application credentials are supported by Keystone since Queens.
+ As Tempest currently supports only much newer OpenStack versions
+ (Ussuri or later), the configuration option which enables
+ application credentials testing
+ (``CONF.identity-feature-enabled.application_credentials``)
+ is now deprecated.
diff --git a/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
new file mode 100644
index 0000000..be2df6b
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ Project tags are supported by Keystone since Queens.
+ As Tempest currently supports only much newer OpenStack versions
+ (Ussuri or later), the configuration option which enables
+ project tags testing
+ (``CONF.identity-feature-enabled.project_tags``)
+ is now enabled by default.
+deprecations:
+ - |
+ Project tags are supported by Keystone since Queens.
+ As Tempest currently supports only much newer OpenStack versions
+ (Ussuri or later), the configuration option which enables
+ project tags testing
+ (``CONF.identity-feature-enabled.project_tags``)
+ is now deprecated.
diff --git a/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
new file mode 100644
index 0000000..36681c7
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+ This is an intermediate release during the Xena development cycle to
+ mark the end of support for EM Train release in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Wallaby
+ * Victoria
+ * Ussuri
+
+ Current development of Tempest is for OpenStack Xena development
+ cycle.
diff --git a/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
new file mode 100644
index 0000000..311eca3
--- /dev/null
+++ b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add a new client for keystone's unified limits API to create and update limits.
diff --git a/releasenotes/notes/log-resource-client-20e58a295f729902.yaml b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
new file mode 100644
index 0000000..405fc5f
--- /dev/null
+++ b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add a new client to lists, creates, shows information for,
+ and updates neutron log resource.
diff --git a/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
new file mode 100644
index 0000000..ac83eaf
--- /dev/null
+++ b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Lists neutron's Loggable resources API service clients are available in
+ ``tempest/lib/services/network/loggable_resource_client.py`` module.
\ No newline at end of file
diff --git a/roles/ipv6-only-deployments-verification/README.rst b/roles/ipv6-only-deployments-verification/README.rst
deleted file mode 100644
index 400a8da..0000000
--- a/roles/ipv6-only-deployments-verification/README.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-Verify the IPv6-only deployments
-
-This role needs to be invoked from a playbook that
-run tests. This role verifies the IPv6 setting on
-devstack side and devstack deploy services on IPv6.
-This role is invoked before tests are run so that
-if any missing IPv6 setting or deployments can fail
-the job early.
-
-
-**Role Variables**
-
-.. zuul:rolevar:: devstack_base_dir
- :default: /opt/stack
-
- The devstack base directory.
diff --git a/roles/ipv6-only-deployments-verification/defaults/main.yaml b/roles/ipv6-only-deployments-verification/defaults/main.yaml
deleted file mode 100644
index fea05c8..0000000
--- a/roles/ipv6-only-deployments-verification/defaults/main.yaml
+++ /dev/null
@@ -1 +0,0 @@
-devstack_base_dir: /opt/stack
diff --git a/roles/ipv6-only-deployments-verification/tasks/main.yaml b/roles/ipv6-only-deployments-verification/tasks/main.yaml
deleted file mode 100644
index d73c79c..0000000
--- a/roles/ipv6-only-deployments-verification/tasks/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-- name: Verify the ipv6-only deployments
- become: true
- become_user: stack
- shell: "{{ devstack_base_dir }}/tempest/tools/verify-ipv6-only-deployments.sh"
diff --git a/roles/run-tempest-26/README.rst b/roles/run-tempest-26/README.rst
new file mode 100644
index 0000000..3643edb
--- /dev/null
+++ b/roles/run-tempest-26/README.rst
@@ -0,0 +1,83 @@
+Run Tempest
+
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+ :default: 0
+
+ The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+ :default: ''
+
+ A regular expression used to select the tests.
+
+ It works only when used with some specific tox environments
+ ('all', 'all-plugin'.)
+
+ In the following example only api scenario and third party tests
+ will be executed.
+
+ ::
+ vars:
+ tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
+
+.. zuul:rolevar:: tempest_test_blacklist
+
+ Specifies a blacklist file to skip tests that are not needed.
+
+ Pass a full path to the file.
+
+.. zuul:rolevar:: tox_envlist
+ :default: smoke
+
+ The Tempest tox environment to run.
+
+.. zuul:rolevar:: tempest_black_regex
+ :default: ''
+
+ A regular expression used to skip the tests.
+
+ It works only when used with some specific tox environments
+ ('all', 'all-plugin'.)
+
+ ::
+ vars:
+ tempest_black_regex: (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+ :default: ''
+
+ String of extra command line options to pass to tox.
+
+ Here is an example of running tox with --sitepackages option:
+
+ ::
+ vars:
+ tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+ :default: ''
+
+ The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+ :default: ''
+
+ Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+ :default: ''
+
+ Environment variable to set for run-tempst task.
+
+ Env variables set in this variable will be combined with some more
+ defaults env variable set at runtime.
diff --git a/roles/run-tempest-26/defaults/main.yaml b/roles/run-tempest-26/defaults/main.yaml
new file mode 100644
index 0000000..cbac76d
--- /dev/null
+++ b/roles/run-tempest-26/defaults/main.yaml
@@ -0,0 +1,12 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_envlist: smoke
+tempest_black_regex: ''
+tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
\ No newline at end of file
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
new file mode 100644
index 0000000..f846006
--- /dev/null
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -0,0 +1,73 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+ shell: sysctl hw.logicalcpu | cut -d' ' -f2
+ register: sysctl_hw_logicalcpu
+ when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+ set_fact:
+ num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+ set_fact:
+ default_concurrency: "{{ num_cores }}"
+ when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+ set_fact:
+ default_concurrency: "{{ num_cores|int // 2 }}"
+ when: num_cores|int > 3
+
+- name: Override target branch
+ set_fact:
+ target_branch: "{{ zuul.override_checkout }}"
+ when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/stein
+ set_fact:
+ # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+ set_fact:
+ # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+ when:
+ - devstack_localrc is defined
+ - "'TEMPEST_BRANCH' in devstack_localrc"
+ - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+ - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+ - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'default'
+
+- name: Set OS_TEST_TIMEOUT if requested
+ set_fact:
+ tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+ when: tempest_test_timeout != ''
+
+- when:
+ - tempest_test_blacklist is defined
+ block:
+ - name: Check for test blacklist file
+ stat:
+ path: "{{ tempest_test_blacklist }}"
+ register:
+ blacklist_stat
+
+ - name: Build blacklist option
+ set_fact:
+ blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
+ when: blacklist_stat.stat.exists
+
+- name: Run Tempest
+ command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+ --concurrency={{tempest_concurrency|default(default_concurrency)}} \
+ --black-regex={{tempest_black_regex|quote}}
+ args:
+ chdir: "{{devstack_base_dir}}/tempest"
+ register: tempest_run_result
+ become: true
+ become_user: tempest
+ environment: "{{ tempest_tox_environment }}"
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index c4a8bf5..ccdfbf3 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -42,6 +42,8 @@
@decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
+ @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+ "Aarch64 does not support ephemeral disk test")
def test_verify_created_server_ephemeral_disk(self):
"""Verify that the ephemeral disk is created when creating server"""
flavor_base = self.flavors_client.show_flavor(
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 7900b77..922a14c 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -454,6 +454,12 @@
server = self.servers_client.show_server(server_id)['server']
self.assert_flavor_equal(new_flavor_id, server['flavor'])
+ def reboot_server(self, server_id, type):
+ """Reboot a server and wait for it to be ACTIVE."""
+ self.servers_client.reboot_server(server_id, type=type)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'ACTIVE')
+
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
@@ -515,6 +521,12 @@
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
+ if CONF.volume.volume_type and 'volume_type' not in kwargs:
+ # If volume_type is not provided in config then no need to
+ # add a volume type and
+ # if volume_type has already been added by child class then
+ # no need to override.
+ kwargs['volume_type'] = CONF.volume.volume_type
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
@@ -564,24 +576,33 @@
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
- # On teardown detach the volume and for multiattach volumes wait for
- # the attachment to be removed. For non-multiattach volumes wait for
- # the state of the volume to change to available. This is so we don't
- # error out when trying to delete the volume during teardown.
- if volume['multiattach']:
- att = waiters.wait_for_volume_attachment_create(
- self.volumes_client, volume['id'], server['id'])
- self.addCleanup(waiters.wait_for_volume_attachment_remove,
- self.volumes_client, volume['id'],
- att['attachment_id'])
- else:
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume['id'], 'available')
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'in-use')
- # Ignore 404s on detach in case the server is deleted or the volume
- # is already detached.
+
+ # NOTE(lyarwood): During attach we initially wait for the volume
+ # attachment and then check the volume state.
+ waiters.wait_for_volume_attachment_create(
+ self.volumes_client, volume['id'], server['id'])
+ # TODO(lyarwood): Remove the following volume status checks and move to
+ # attachment status checks across all volumes now with the 3.27
+ # microversion somehow.
+ if not volume['multiattach']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], 'in-use')
+
+ # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume
+ # attachment in Nova to be removed. While this technically happens last
+ # we want this to be the first waiter as if it fails we can then dump
+ # the contents of the console log. The final check of the volume state
+ # should be a no-op by this point and is just added for completeness
+ # when detaching non-multiattach volumes.
+ if not volume['multiattach']:
+ self.addCleanup(
+ waiters.wait_for_volume_resource_status, self.volumes_client,
+ volume['id'], 'available')
+ self.addCleanup(
+ waiters.wait_for_volume_attachment_remove_from_server,
+ self.servers_client, server['id'], volume['id'])
self.addCleanup(self._detach_volume, server, volume)
+
return attachment
def create_volume_snapshot(self, volume_id, name=None, description=None,
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 671a779..a1f3514 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -109,9 +109,7 @@
sg['id'])
# Reboot and add the other security group
- self.servers_client.reboot_server(server_id, type='HARD')
- waiters.wait_for_server_status(self.servers_client, server_id,
- 'ACTIVE')
+ self.reboot_server(server_id, type='HARD')
self.servers_client.add_security_group(server_id, name=sg2['name'])
# Check that we are not able to delete the other security
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 48f32a8..c9aec62 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -180,3 +180,56 @@
if not utils.get_service_list()['volume']:
msg = "Volume service not enabled."
raise cls.skipException(msg)
+
+
+class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
+ """Test creating server with FQDN hostname and verifying atrributes
+
+ Starting Wallaby release, Nova sanitizes freeform characters in
+ server hostname with dashes. This test verifies the same.
+ """
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServersTestFqdnHostnames, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServersTestFqdnHostnames, cls).setup_clients()
+ cls.client = cls.servers_client
+
+ @decorators.idempotent_id('622066d2-39fc-4c09-9eeb-35903c114a0a')
+ @testtools.skipUnless(
+ CONF.compute_feature_enabled.hostname_fqdn_sanitization,
+ 'FQDN hostname sanitization is not supported.')
+ @testtools.skipUnless(CONF.validation.run_validation,
+ 'Instance validation tests are disabled.')
+ def test_create_server_with_fqdn_name(self):
+ """Test to create an instance with FQDN type name scheme"""
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ self.server_name = 'guest-instance-1.domain.com'
+ self.password = data_utils.rand_password()
+ self.accessIPv4 = '2.2.2.2'
+ test_server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='ACTIVE',
+ adminPass=self.password,
+ name=self.server_name,
+ accessIPv4=self.accessIPv4)
+
+ """Verify the hostname within the instance is sanitized
+
+ Freeform characters in the hostname are replaced with dashes
+ """
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(test_server, validation_resources),
+ self.ssh_user,
+ self.password,
+ validation_resources['keypair']['private_key'],
+ server=test_server,
+ servers_client=self.client)
+ hostname = linux_client.exec_command("hostname").rstrip()
+ self.assertEqual('guest-instance-1-domain-com', hostname)
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index 5ab592a..028da68 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -37,9 +37,7 @@
@decorators.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
def test_list_instance_actions(self):
"""Test listing actions of the provided server"""
- self.client.reboot_server(self.server['id'], type='HARD')
- waiters.wait_for_server_status(self.client,
- self.server['id'], 'ACTIVE')
+ self.reboot_server(self.server['id'], type='HARD')
body = (self.client.list_instance_actions(self.server['id'])
['instanceActions'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index deb21c7..152e7e8 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -136,8 +136,7 @@
# in a server
linux_client.exec_command("sync")
- self.client.reboot_server(self.server_id, type=reboot_type)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ self.reboot_server(self.server_id, type=reboot_type)
if CONF.validation.run_validation:
# Log in and verify the boot time has changed
@@ -607,8 +606,7 @@
# log file is truncated and we cannot get any console log through
# "console-log" API.
# The detail is https://bugs.launchpad.net/nova/+bug/1251920
- self.client.reboot_server(self.server_id, type='HARD')
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ self.reboot_server(self.server_id, type='HARD')
self.wait_for(self._get_output)
@decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index a9f8c09..354e3b9 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -168,6 +168,8 @@
raise cls.skipException("IDE bus not available.")
@decorators.idempotent_id('947004c3-e8ef-47d9-9f00-97b74f9eaf96')
+ @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+ "Aarch64 does not support ide bus for cdrom")
def test_stable_device_rescue_cdrom_ide(self):
"""Test rescuing server with cdrom and ide as the rescue disk"""
server_id, rescue_image_id = self._create_server_and_rescue_image(
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 6425ea9..dc6dd4a 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -77,6 +77,8 @@
time.sleep(1)
self.non_admin_users_client.auth_provider.set_auth()
+ @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+ 'Security compliance not available.')
@decorators.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
@testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
'Skipped because environment has an '
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 190d9e3..1ce9f47 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -45,11 +45,17 @@
super(QuotasNegativeTest, self).setUp()
name = data_utils.rand_name('test_project_')
description = data_utils.rand_name('desc_')
- self.project = identity.identity_utils(self.os_admin).create_project(
+ self.creds_client = identity.identity_utils(self.os_admin)
+ self.project = self.creds_client.create_project(
name=name, description=description)
self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
self.project['id'])
+ def tearDown(self):
+ super(QuotasNegativeTest, self).tearDown()
+ self.credentials_provider.cleanup_default_secgroup(
+ self.os_admin.security_groups_client, self.project['id'])
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
def test_network_quota_exceeding(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index b6bf369..47a8590 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -84,6 +84,8 @@
cls.network_versions_client = cls.os_primary.network_versions_client
cls.service_providers_client = cls.os_primary.service_providers_client
cls.tags_client = cls.os_primary.tags_client
+ cls.log_resource_client = cls.os_primary.log_resource_client
+ cls.loggable_resource_client = cls.os_primary.loggable_resource_client
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 905bf13..bf9eae6 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -97,6 +97,18 @@
body = self.ports_client.update_port(
port_id, allowed_address_pairs=allowed_address_pairs)
allowed_address_pair = body['port']['allowed_address_pairs']
+ # NOTE(slaweq): Attribute "active" is added to the
+ # allowed_address_pairs in the Xena release.
+ # To make our existing allowed_address_pairs API tests to be passing in
+ # both cases, with and without that "active" attribute, we need to
+ # removes that field from the allowed_address_pairs which are returned
+ # by the Neutron server.
+ # We could make expected results of those tests to be dependend on the
+ # available Neutron's API extensions but in that case existing tests
+ # may fail randomly as all tests are always using same IP addresses
+ # thus allowed_address_pair may be active=True or active=False.
+ for pair in allowed_address_pair:
+ pair.pop('active', None)
self.assertCountEqual(allowed_address_pair, allowed_address_pairs)
@decorators.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index c8731fe..0259373 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -31,9 +31,10 @@
super(ObjectTestACLs, self).setUp()
self.container_name = self.create_container()
- def tearDown(self):
- self.delete_containers()
- super(ObjectTestACLs, self).tearDown()
+ @classmethod
+ def resource_cleanup(cls):
+ cls.delete_containers()
+ super(ObjectTestACLs, cls).resource_cleanup()
@decorators.idempotent_id('a3270f3f-7640-4944-8448-c7ea783ea5b6')
def test_read_object_with_rights(self):
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 73d7f27..85e6ddb 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -42,9 +42,10 @@
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.update_container(self.container_name)
- def tearDown(self):
- self.delete_containers([self.container_name])
- super(ObjectACLsNegativeTest, self).tearDown()
+ @classmethod
+ def resource_cleanup(cls):
+ cls.delete_containers()
+ super(ObjectACLsNegativeTest, cls).resource_cleanup()
@decorators.attr(type=['negative'])
@decorators.idempotent_id('af587587-0c24-4e15-9822-8352ce711013')
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index fcd9a7c..7977a7a 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -44,10 +44,10 @@
self.container_client.create_update_or_delete_container_metadata(
self.container_name, create_update_metadata=metadata)
- def tearDown(self):
- """Cleans the container of any object after each test."""
- self.delete_containers()
- super(ContainerQuotasTest, self).tearDown()
+ @classmethod
+ def resource_cleanup(cls):
+ cls.delete_containers()
+ super(ContainerQuotasTest, cls).resource_cleanup()
@decorators.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
@utils.requires_ext(extension='container_quotas', service='object')
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 7ad6f6f..085b8ab 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -21,9 +21,10 @@
class ContainerTest(base.BaseObjectTest):
"""Test containers"""
- def tearDown(self):
- self.delete_containers()
- super(ContainerTest, self).tearDown()
+ @classmethod
+ def resource_cleanup(cls):
+ cls.delete_containers()
+ super(ContainerTest, cls).resource_cleanup()
@decorators.attr(type='smoke')
@decorators.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 174cf9e..42d3bdf 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -54,3 +54,18 @@
volume_type = self.create_volume_type(**params)
self.assertRaises(lib_exc.NotFound,
self.create_volume, volume_type=volume_type['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('a5924b5f-b6c1-49ba-994c-b4af55d26e52')
+ def test_create_volume_type_encryption_nonexistent_type_id(self):
+ """Test create encryption with nonexistent type id will fail"""
+ create_kwargs = {
+ 'type_id': data_utils.rand_uuid(),
+ 'provider': 'LuksEncryptor',
+ 'key_size': 256,
+ 'cipher': 'aes-xts-plain64',
+ 'control_location': 'front-end'
+ }
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.create_encryption_type, **create_kwargs)
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index eb54426..9ca1c5e 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -49,13 +49,14 @@
# Creates a volume from another volume passing a size different from
# the source volume.
src_size = CONF.volume.volume_size
+ extend_size = CONF.volume.volume_size_extend
src_vol = self.create_volume(size=src_size)
# Destination volume bigger than source
dst_vol = self.create_volume(source_volid=src_vol['id'],
- size=src_size + 1)
+ size=src_size + extend_size)
- self._verify_volume_clone(src_vol, dst_vol, extra_size=1)
+ self._verify_volume_clone(src_vol, dst_vol, extra_size=extend_size)
@decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
@utils.services('image')
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index 4bfb166..115465c 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -36,11 +36,11 @@
"""Test cloning a volume with decreasing size will fail"""
# Creates a volume from another volume passing a size different from
# the source volume.
- src_size = CONF.volume.volume_size + 1
+ src_size = CONF.volume.volume_size + CONF.volume.volume_size_extend
src_vol = self.create_volume(size=src_size)
# Destination volume smaller than source
self.assertRaises(exceptions.BadRequest,
self.volumes_client.create_volume,
- size=src_size - 1,
+ size=src_size - CONF.volume.volume_size_extend,
source_volid=src_vol['id'])
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 35dd0ca..554fc6a 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -45,7 +45,7 @@
container_format=CONF.image.container_formats[0],
disk_format=CONF.image.disk_formats[0],
visibility='private',
- min_disk=CONF.volume.volume_size + 1)
+ min_disk=CONF.volume.volume_size + CONF.volume.volume_size_extend)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image, image['id'])
@@ -223,7 +223,7 @@
@decorators.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
def test_volume_extend_with_nonexistent_volume_id(self):
"""Test extending non existent volume should fail"""
- extend_size = self.volume['size'] + 1
+ extend_size = self.volume['size'] + CONF.volume.volume_size_extend
self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
data_utils.rand_uuid(), new_size=extend_size)
@@ -231,7 +231,7 @@
@decorators.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
def test_volume_extend_without_passing_volume_id(self):
"""Test extending volume without passing volume id should fail"""
- extend_size = self.volume['size'] + 1
+ extend_size = self.volume['size'] + CONF.volume.volume_size_extend
self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
None, new_size=extend_size)
diff --git a/tempest/clients.py b/tempest/clients.py
index 6080f01..51bd204 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -73,6 +73,8 @@
self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
self.segments_client = self.network.SegmentsClient()
self.trunks_client = self.network.TrunksClient()
+ self.log_resource_client = self.network.LogResourceClient()
+ self.loggable_resource_client = self.network.LoggableResourceClient()
def _set_image_clients(self):
if CONF.service_available.glance:
@@ -221,6 +223,8 @@
self.identity_v3.ApplicationCredentialsClient(**params_v3)
self.access_rules_client = \
self.identity_v3.AccessRulesClient(**params_v3)
+ self.identity_limits_client = \
+ self.identity_v3.LimitsClient(**params_v3)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index b68a879..5d6e129 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -108,7 +108,7 @@
LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
return nic.strip().strip(":").split('@')[0].lower()
- def get_dns_servers(self):
+ def _get_dns_servers(self):
cmd = 'cat /etc/resolv.conf'
resolve_file = self.exec_command(cmd).strip().split('\n')
entries = (l.split() for l in resolve_file)
@@ -116,6 +116,19 @@
if len(l) and l[0] == 'nameserver']
return dns_servers
+ def get_dns_servers(self, timeout=5):
+ start_time = int(time.time())
+ dns_servers = []
+ while True:
+ dns_servers = self._get_dns_servers()
+ if dns_servers:
+ break
+ LOG.debug("DNS Servers list empty.")
+ if int(time.time()) - start_time >= timeout:
+ LOG.debug("DNS Servers list empty after %s.", timeout)
+ break
+ return dns_servers
+
def _renew_lease_udhcpc(self, fixed_ip=None):
"""Renews DHCP lease via udhcpc client. """
file_path = '/var/run/udhcpc.'
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 3750b11..f6a4555 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -356,23 +356,36 @@
This waiter checks the compute API if the volume attachment is removed.
"""
start = int(time.time())
- volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+ try:
+ volumes = client.list_volume_attachments(
+ server_id)['volumeAttachments']
+ except lib_exc.NotFound:
+ # Ignore 404s on detach in case the server is deleted or the volume
+ # is already detached.
+ return
while any(volume for volume in volumes if volume['volumeId'] == volume_id):
time.sleep(client.build_interval)
timed_out = int(time.time()) - start >= client.build_timeout
if timed_out:
+ console_output = client.get_console_output(server_id)['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server_id, console_output)
message = ('Volume %s failed to detach from server %s within '
'the required time (%s s) from the compute API '
'perspective' %
(volume_id, server_id, client.build_timeout))
raise lib_exc.TimeoutException(message)
-
- volumes = client.list_volume_attachments(server_id)[
- 'volumeAttachments']
-
- return volumes
+ try:
+ volumes = client.list_volume_attachments(
+ server_id)['volumeAttachments']
+ except lib_exc.NotFound:
+ # Ignore 404s on detach in case the server is deleted or the volume
+ # is already detached.
+ return
+ return
def wait_for_volume_migration(client, volume_id, new_host):
diff --git a/tempest/config.py b/tempest/config.py
index c409db6..662a249 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -259,14 +259,18 @@
help='Does the environment have the security compliance '
'settings enabled?'),
cfg.BoolOpt('project_tags',
- default=False,
- help='Is the project tags identity v3 API available?'),
- # Application credentials is a default feature in Queens. This config
- # option can removed once Pike is EOL.
+ default=True,
+ help='Is the project tags identity v3 API available?',
+ deprecated_for_removal=True,
+ deprecated_reason='Project tags API is a default feature '
+ 'since Queens'),
cfg.BoolOpt('application_credentials',
- default=False,
+ default=True,
help='Does the environment have application credentials '
- 'enabled?'),
+ 'enabled?',
+ deprecated_for_removal=True,
+ deprecated_reason='Application credentials is a default '
+ 'feature since Queens'),
# Access rules for application credentials is a default feature in Train.
# This config option can removed once Stein is EOL.
cfg.BoolOpt('access_rules',
@@ -437,6 +441,15 @@
cfg.BoolOpt('disk_config',
default=True,
help="If false, skip disk config tests"),
+ # TODO(pkesav): Make it True by default once wallaby
+ # is oldest supported stable for Tempest.
+ cfg.BoolOpt('hostname_fqdn_sanitization',
+ default=False,
+ help="If false, skip fqdn instance sanitization tests. "
+ "Nova started sanitizing the instance name by replacing "
+ "the '.' with '-' to comply with fqdn hostname. Nova "
+ "changed that in Wallaby cycle, if your cloud is older "
+ "than wallaby then you can keep/make it False."),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled compute extensions with a special '
@@ -1000,6 +1013,11 @@
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
+ cfg.IntOpt('volume_size_extend',
+ default=1,
+ help="Size in GB a volume is extended by - if a test "
+ "extends a volume, the size of the new volume will be "
+ "volume_size + volume_size_extend."),
cfg.ListOpt('manage_volume_ref',
default=['source-name', 'volume-%s'],
help="A reference to existing volume for volume manage. "
diff --git a/tempest/lib/api_schema/response/volume/services.py b/tempest/lib/api_schema/response/volume/services.py
index 70de878..216631c 100644
--- a/tempest/lib/api_schema/response/volume/services.py
+++ b/tempest/lib/api_schema/response/volume/services.py
@@ -33,10 +33,6 @@
'frozen': {'type': 'boolean'},
'updated_at': parameter_types.date_time,
'zone': {'type': 'string'},
- # TODO(zhufl): cluster is added in 3.7, we should move
- # it to the 3.7 schema file when microversion is
- # supported in volume interfaces
- 'cluster': {'type': 'string'},
'replication_status': {'type': 'string'},
'active_backend_id': {'type': ['string', 'null']},
'backend_state': {'type': 'string'},
diff --git a/tempest/lib/api_schema/response/volume/v3_7/__init__.py b/tempest/lib/api_schema/response/volume/v3_7/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_7/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_7/services.py b/tempest/lib/api_schema/response/volume/v3_7/services.py
new file mode 100644
index 0000000..8d43188
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_7/services.py
@@ -0,0 +1,34 @@
+# Copyright 2020 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.volume import services
+
+# Volume microversion 3.7:
+# 1. New optional attribute in 'services' dict.
+# 'cluster'
+
+list_services = copy.deepcopy(services.list_services)
+list_services['response_body']['properties']['services']['items'][
+ 'properties'].update({'cluster': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 3.0 ******
+enable_service = copy.deepcopy(services.enable_service)
+disable_service = copy.deepcopy(services.disable_service)
+disable_log_reason = copy.deepcopy(services.disable_log_reason)
+freeze_host = copy.deepcopy(services.freeze_host)
+thaw_host = copy.deepcopy(services.thaw_host)
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 069172a..2da206f 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,11 +13,13 @@
# limitations under the License.
import abc
-
+from oslo_log import log as logging
from tempest.lib import auth
from tempest.lib import exceptions
+LOG = logging.getLogger(__name__)
+
class CredentialProvider(object, metaclass=abc.ABCMeta):
def __init__(self, identity_version, name=None,
@@ -125,6 +127,18 @@
def is_role_available(self, role):
return
+ def cleanup_default_secgroup(self, security_group_client, tenant):
+ resp_body = security_group_client.list_security_groups(
+ tenant_id=tenant,
+ name="default")
+ secgroups_to_delete = resp_body['security_groups']
+ for secgroup in secgroups_to_delete:
+ try:
+ security_group_client.delete_security_group(secgroup['id'])
+ except exceptions.NotFound:
+ LOG.warning('Security group %s, id %s not found for clean-up',
+ secgroup['name'], secgroup['id'])
+
class TestResources(object):
"""Readonly Credentials, with network resources added."""
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index d86522a..be8c0e8 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -254,8 +254,8 @@
user, role, domain)
elif scope == 'system':
self.creds_client.assign_user_role_on_system(user, role)
- LOG.info("Roles assigned to the user %s are: %s",
- user['id'], roles_to_assign)
+ LOG.info("Dynamic test user %s is created with scope %s and roles: %s",
+ user['id'], scope, roles_to_assign)
creds = self.creds_client.get_credentials(**cred_params)
return cred_provider.TestResources(creds)
@@ -407,13 +407,23 @@
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n"
" credentials: %s", credentials)
- if (self.neutron_available and self.create_networks):
- network, subnet, router = self._create_network_resources(
- credentials.tenant_id)
- credentials.set_resources(network=network, subnet=subnet,
- router=router)
- LOG.info("Created isolated network resources for:\n"
- " credentials: %s", credentials)
+ # NOTE(gmann): For 'domain' and 'system' scoped token, there is no
+ # project_id so we are skipping the network creation for both
+ # scope. How these scoped token can create the network, Nova
+ # server or other project mapped resources is one of the open
+ # question and discussed a lot in Xena cycle PTG. Once we sort
+ # out that then if needed we can update the network creation here.
+ if (not scope or scope == 'project'):
+ if (self.neutron_available and self.create_networks):
+ network, subnet, router = self._create_network_resources(
+ credentials.tenant_id)
+ credentials.set_resources(network=network, subnet=subnet,
+ router=router)
+ LOG.info("Created isolated network resources for:\n"
+ " credentials: %s", credentials)
+ else:
+ LOG.info("Network resources are not created for scope: %s",
+ scope)
return credentials
# TODO(gmann): Remove this method in favor of get_project_member_creds()
@@ -508,18 +518,6 @@
LOG.warning('network with name: %s not found for delete',
network_name)
- def _cleanup_default_secgroup(self, tenant):
- nsg_client = self.security_groups_admin_client
- resp_body = nsg_client.list_security_groups(tenant_id=tenant,
- name="default")
- secgroups_to_delete = resp_body['security_groups']
- for secgroup in secgroups_to_delete:
- try:
- nsg_client.delete_security_group(secgroup['id'])
- except lib_exc.NotFound:
- LOG.warning('Security group %s, id %s not found for clean-up',
- secgroup['name'], secgroup['id'])
-
def _clear_isolated_net_resources(self):
client = self.routers_admin_client
for cred in self._creds:
@@ -562,13 +560,14 @@
LOG.warning("user with name: %s not found for delete",
creds.username)
# NOTE(zhufl): Only when neutron's security_group ext is
- # enabled, _cleanup_default_secgroup will not raise error. But
+ # enabled, cleanup_default_secgroup will not raise error. But
# here cannot use test_utils.is_extension_enabled for it will cause
# "circular dependency". So here just use try...except to
# ensure tenant deletion without big changes.
try:
if self.neutron_available:
- self._cleanup_default_secgroup(creds.tenant_id)
+ self.cleanup_default_secgroup(
+ self.security_groups_admin_client, creds.tenant_id)
except lib_exc.NotFound:
LOG.warning("failed to cleanup tenant %s's secgroup",
creds.tenant_name)
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 573d64e..3f735f5 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
+from collections import abc
import email.utils
import re
import time
@@ -884,7 +884,7 @@
resp=resp)
def is_absolute_limit(self, resp, resp_body):
- if (not isinstance(resp_body, collections.Mapping) or
+ if (not isinstance(resp_body, abc.Mapping) or
'retry-after' not in resp):
return True
return 'exceed' in resp_body.get('message', 'blabla')
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index 86fa991..af09fb1 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -32,6 +32,7 @@
from tempest.lib.services.identity.v3.identity_client import IdentityClient
from tempest.lib.services.identity.v3.inherited_roles_client import \
InheritedRolesClient
+from tempest.lib.services.identity.v3.limits_client import LimitsClient
from tempest.lib.services.identity.v3.oauth_consumers_client import \
OAUTHConsumerClient
from tempest.lib.services.identity.v3.oauth_token_client import \
@@ -55,7 +56,8 @@
'DomainConfigurationClient', 'EndPointGroupsClient',
'EndPointsClient', 'EndPointsFilterClient',
'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
- 'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
- 'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
- 'RoleAssignmentsClient', 'RolesClient', 'ServicesClient',
- 'V3TokenClient', 'TrustsClient', 'UsersClient', 'VersionsClient']
+ 'LimitsClient', 'OAUTHConsumerClient', 'OAUTHTokenClient',
+ 'PoliciesClient', 'ProjectsClient', 'ProjectTagsClient',
+ 'RegionsClient', 'RoleAssignmentsClient', 'RolesClient',
+ 'ServicesClient', 'V3TokenClient', 'TrustsClient', 'UsersClient',
+ 'VersionsClient']
diff --git a/tempest/lib/services/identity/v3/limits_client.py b/tempest/lib/services/identity/v3/limits_client.py
new file mode 100644
index 0000000..26d04bc
--- /dev/null
+++ b/tempest/lib/services/identity/v3/limits_client.py
@@ -0,0 +1,57 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class LimitsClient(rest_client.RestClient):
+ api_version = "v3"
+
+ def get_registered_limits(self):
+ """Lists all registered limits."""
+ resp, body = self.get('registered_limits')
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, json.loads(body))
+
+ def create_limit(self, region_id, service_id, project_id, resource_name,
+ default_limit, description=None, domain_id=None):
+ """Creates a limit in keystone."""
+ limit = {
+ 'service_id': service_id,
+ 'project_id': project_id,
+ 'resource_name': resource_name,
+ 'resource_limit': default_limit,
+ 'region_id': region_id,
+ 'description': description or '%s limit for %s' % (
+ resource_name, project_id),
+ }
+ if domain_id:
+ limit['domain_id'] = domain_id
+ post_body = json.dumps({'limits': [limit]})
+ resp, body = self.post('limits', post_body)
+ self.expected_success(201, resp.status)
+ return rest_client.ResponseBody(resp, json.loads(body))
+
+ def update_limit(self, limit_id, resource_limit, description=None):
+ """Updates a limit in keystone by id."""
+
+ limit = {'resource_limit': resource_limit}
+ if description:
+ limit['description'] = description
+ patch_body = json.dumps({'limit': limit})
+ resp, body = self.patch('limits/%s' % limit_id, patch_body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, json.loads(body))
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index 7e57499..d553373 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,6 +15,9 @@
from tempest.lib.services.network.agents_client import AgentsClient
from tempest.lib.services.network.extensions_client import ExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.log_resource_client import LogResourceClient
+from tempest.lib.services.network.loggable_resource_client import \
+ LoggableResourceClient
from tempest.lib.services.network.metering_label_rules_client import \
MeteringLabelRulesClient
from tempest.lib.services.network.metering_labels_client import \
@@ -45,4 +48,5 @@
'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
- 'SubnetsClient', 'TagsClient', 'TrunksClient']
+ 'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
+ 'LoggableResourceClient']
diff --git a/tempest/lib/services/network/log_resource_client.py b/tempest/lib/services/network/log_resource_client.py
new file mode 100644
index 0000000..727b138
--- /dev/null
+++ b/tempest/lib/services/network/log_resource_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class LogResourceClient(base.BaseNetworkClient):
+
+ def create_log(self, **kwargs):
+ """Creates a log resource.
+
+ Creates a log resource by using the configuration that you define in
+ the request object.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-log
+ """
+ uri = '/log/logs/'
+ post_data = {'log': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_log(self, log_id, **kwargs):
+ """Updates a log resource.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-log
+ """
+ uri = '/log/logs/%s' % log_id
+ post_data = {'log': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_log(self, log_id, **fields):
+ """Shows details for a log id.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-log
+ """
+ uri = '/log/logs/%s' % log_id
+ return self.show_resource(uri, **fields)
+
+ def delete_log(self, log_id):
+ """Deletes a log resource.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-log
+ """
+ uri = '/log/logs/%s' % log_id
+ return self.delete_resource(uri)
+
+ def list_logs(self, **filters):
+ """Lists Logs.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-logs
+ """
+ uri = '/log/logs'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/loggable_resource_client.py b/tempest/lib/services/network/loggable_resource_client.py
new file mode 100644
index 0000000..774046f
--- /dev/null
+++ b/tempest/lib/services/network/loggable_resource_client.py
@@ -0,0 +1,29 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class LoggableResourceClient(base.BaseNetworkClient):
+
+ def list_loggable_resources(self, **filters):
+ """List Loggable resources.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-loggable-resources
+ """
+ uri = '/log/loggable-resources'
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..09483e3
--- /dev/null
+++ b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosLimitBandwidthRulesClient(base.BaseNetworkClient):
+
+ def create_limit_bandwidth_rule(self, qos_policy_id, **kwargs):
+ """Creates a limit bandwidth rule for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-bandwidth-limit-rule
+ """
+ uri = '/qos/policies/{}/bandwidth_limit_rules'.format(
+ qos_policy_id)
+ post_data = {'bandwidth_limit_rule': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_limit_bandwidth_rule(self, qos_policy_id, rule_id, **kwargs):
+ """Updates a limit bandwidth rule.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-bandwidth-limit-rule
+ """
+ uri = '/qos/policies/{}/bandwidth_limit_rules{}/'.format(
+ qos_policy_id, rule_id)
+ post_data = {'bandwidth_limit_rule': kwargs}
+ return self.update_resource(uri, post_data, expect_response_code=202)
+
+ def show_limit_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
+ """Show details of a limit bandwidth rule.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-bandwidth-limit-rule-details
+ """
+ uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+ qos_policy_id, rule_id)
+ return self.show_resource(uri, **fields)
+
+ def delete_limit_bandwidth_rule(self, qos_policy_id, rule_id):
+ """Deletes a limit bandwidth rule for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-bandwidth-limit-rule
+ """
+ uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+ qos_policy_id, rule_id)
+ return self.delete_resource(uri)
+
+ def list_limit_bandwidth_rules(self, qos_policy_id, **filters):
+ """Lists all limit bandwidth rules for a QoS policy.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-bandwidth-limit-rules-for-qos-policy
+ """
+ uri = '/qos/policies/%s/bandwidth_limit_rules'.format(qos_policy_id)
+ return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py b/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py
index dd9f45f..e512aca 100644
--- a/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py
+++ b/tempest/lib/services/network/qos_minimum_bandwidth_rules_client.py
@@ -38,7 +38,7 @@
uri = '/qos/policies/%s/minimum_bandwidth_rules/%s' % (
qos_policy_id, rule_id)
post_data = {'minimum_bandwidth_rule': kwargs}
- return self.update_resource(uri, post_data)
+ return self.update_resource(uri, post_data, expect_response_code=202)
def show_minimum_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
"""Show details of a minimum bandwidth rule.
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index bb82975..65e8227 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -28,6 +28,8 @@
self.get_object(container, object_name)
except exceptions.NotFound:
return True
+ except exceptions.Conflict:
+ return False
return False
def create_object(self, container, object_name, data,
diff --git a/tempest/lib/services/volume/base_client.py b/tempest/lib/services/volume/base_client.py
index c7fb21a..c0ac62d 100644
--- a/tempest/lib/services/volume/base_client.py
+++ b/tempest/lib/services/volume/base_client.py
@@ -13,8 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common import rest_client
+from tempest.lib import exceptions
VOLUME_MICROVERSION = None
@@ -43,3 +45,39 @@
'volume %s' % VOLUME_MICROVERSION,
resp)
return resp, resp_body
+
+ def get_schema(self, schema_versions_info):
+ """Get JSON schema
+
+ This method provides the matching schema for requested
+ microversion.
+
+ :param schema_versions_info: List of dict which provides schema
+ information with range of valid versions.
+
+ Example::
+
+ schema_versions_info = [
+ {'min': None, 'max': '2.1', 'schema': schemav21},
+ {'min': '2.2', 'max': '2.9', 'schema': schemav22},
+ {'min': '2.10', 'max': None, 'schema': schemav210}]
+ """
+ schema = None
+ version = api_version_request.APIVersionRequest(VOLUME_MICROVERSION)
+ for items in schema_versions_info:
+ min_version = api_version_request.APIVersionRequest(items['min'])
+ max_version = api_version_request.APIVersionRequest(items['max'])
+ # This is case where COMPUTE_MICROVERSION is None, which means
+ # request without microversion So select base v2.1 schema.
+ if version.is_null() and items['min'] is None:
+ schema = items['schema']
+ break
+ # else select appropriate schema as per COMPUTE_MICROVERSION
+ elif version.matches(min_version, max_version):
+ schema = items['schema']
+ break
+ if schema is None:
+ raise exceptions.JSONSchemaNotFound(
+ version=version.get_string(),
+ schema_versions_info=schema_versions_info)
+ return schema
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 4672da8..1111f81 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -18,12 +18,18 @@
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.volume import services as schema
+from tempest.lib.api_schema.response.volume.v3_7 import services as schemav37
from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
-class ServicesClient(rest_client.RestClient):
+class ServicesClient(base_client.BaseClient):
"""Client class to send CRUD Volume Services API requests"""
+ schema_versions_info = [
+ {'min': None, 'max': '3.6', 'schema': schema},
+ {'min': '3.7', 'max': None, 'schema': schemav37}]
+
def list_services(self, **params):
"""List all Cinder services.
@@ -37,6 +43,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_services, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/test_unified_limits.py b/tempest/scenario/test_unified_limits.py
new file mode 100644
index 0000000..22256b4
--- /dev/null
+++ b/tempest/scenario/test_unified_limits.py
@@ -0,0 +1,435 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+
+from oslo_utils import units
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class ImageQuotaTest(manager.ScenarioTest):
+ credentials = ['primary', 'system_admin']
+
+ @classmethod
+ def resource_setup(cls):
+ super(ImageQuotaTest, cls).resource_setup()
+
+ # Figure out and record the glance service id
+ services = cls.os_system_admin.identity_services_v3_client.\
+ list_services()
+ glance_services = [x for x in services['services']
+ if x['name'] == 'glance']
+ cls.glance_service_id = glance_services[0]['id']
+
+ # Pre-create all the quota limits and record their IDs so we can
+ # update them in-place without needing to know which ones have been
+ # created and in which order.
+ cls.limit_ids = {}
+
+ try:
+ cls.limit_ids['image_size_total'] = cls._create_limit(
+ 'image_size_total', 10)
+ cls.limit_ids['image_stage_total'] = cls._create_limit(
+ 'image_stage_total', 10)
+ cls.limit_ids['image_count_total'] = cls._create_limit(
+ 'image_count_total', 10)
+ cls.limit_ids['image_count_uploading'] = cls._create_limit(
+ 'image_count_uploading', 10)
+ except lib_exc.Forbidden:
+ # If we fail to set limits, it means they are not
+ # registered, and thus we will skip these tests once we
+ # have our os_system_admin client and run
+ # check_quotas_enabled().
+ pass
+
+ def setUp(self):
+ super(ImageQuotaTest, self).setUp()
+ self.created_images = []
+
+ def create_image(self, data=None, **kwargs):
+ """Wrapper that returns a test image."""
+
+ if 'name' not in kwargs:
+ name = data_utils.rand_name(self.__name__ + "-image")
+ kwargs['name'] = name
+
+ params = dict(kwargs)
+ if data:
+ # NOTE: On glance v1 API, the data should be passed on
+ # a header. Then here handles the data separately.
+ params['data'] = data
+
+ image = self.image_client.create_image(**params)
+ # Image objects returned by the v1 client have the image
+ # data inside a dict that is keyed against 'image'.
+ if 'image' in image:
+ image = image['image']
+ self.created_images.append(image['id'])
+ self.addCleanup(
+ self.image_client.wait_for_resource_deletion,
+ image['id'])
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.image_client.delete_image, image['id'])
+ return image
+
+ def check_quotas_enabled(self):
+ # Check to see if we should even be running these tests. Use
+ # the presence of a registered limit that we recognize as an
+ # indication. This will be set up by the operator (or
+ # devstack) if glance is configured to use/honor the unified
+ # limits. If one is set, they must all be set, because glance
+ # has a single all-or-nothing flag for whether or not to use
+ # keystone limits. If anything, checking only one helps to
+ # assert the assumption that, if enabled, they must all be at
+ # least registered for proper operation.
+ registered_limits = self.os_system_admin.identity_limits_client.\
+ get_registered_limits()['registered_limits']
+ if 'image_count_total' not in [x['resource_name']
+ for x in registered_limits]:
+ raise self.skipException('Target system is not configured with '
+ 'glance unified limits')
+
+ @classmethod
+ def _create_limit(cls, name, value):
+ return cls.os_system_admin.identity_limits_client.create_limit(
+ CONF.identity.region, cls.glance_service_id,
+ cls.image_client.tenant_id, name, value)['limits'][0]['id']
+
+ def _update_limit(self, name, value):
+ self.os_system_admin.identity_limits_client.update_limit(
+ self.limit_ids[name], value)
+
+ def _cleanup_images(self):
+ while self.created_images:
+ image_id = self.created_images.pop()
+ try:
+ self.image_client.delete_image(image_id)
+ except lib_exc.NotFound:
+ pass
+
+ @decorators.idempotent_id('9b74fe24-183b-41e6-bf42-84c2958a7be8')
+ @utils.services('image', 'identity')
+ def test_image_count_quota(self):
+ self.check_quotas_enabled()
+
+ # Set a quota on the number of images for our tenant to one.
+ self._update_limit('image_count_total', 1)
+
+ # Create one image
+ image = self.create_image(name='first',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+
+ # Second image would put us over quota, so expect failure.
+ self.assertRaises(lib_exc.OverLimit,
+ self.create_image,
+ name='second',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+
+ # Update our limit to two.
+ self._update_limit('image_count_total', 2)
+
+ # Now the same create should succeed.
+ self.create_image(name='second',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+
+ # Third image would put us over quota, so expect failure.
+ self.assertRaises(lib_exc.OverLimit,
+ self.create_image,
+ name='third',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+
+ # Delete the first image to put us under quota.
+ self.image_client.delete_image(image['id'])
+
+ # Now the same create should succeed.
+ self.create_image(name='third',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+
+ # Delete all the images we created before the next test runs,
+ # so that it starts with full quota.
+ self._cleanup_images()
+
+ @decorators.idempotent_id('b103788b-5329-4aa9-8b0d-97f8733460db')
+ @utils.services('image', 'identity')
+ def test_image_count_uploading_quota(self):
+ if not CONF.image_feature_enabled.import_image:
+ skip_msg = (
+ "%s skipped as image import is not available" % __name__)
+ raise self.skipException(skip_msg)
+
+ self.check_quotas_enabled()
+
+ # Set a quota on the number of images we can have in uploading state.
+ self._update_limit('image_stage_total', 10)
+ self._update_limit('image_size_total', 10)
+ self._update_limit('image_count_total', 10)
+ self._update_limit('image_count_uploading', 1)
+
+ file_content = data_utils.random_bytes(1 * units.Mi)
+
+ # Create and stage an image
+ image1 = self.create_image(name='first',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.stage_image_file(image1['id'],
+ io.BytesIO(file_content))
+
+ # Check that we can not stage another
+ image2 = self.create_image(name='second',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.stage_image_file,
+ image2['id'], io.BytesIO(file_content))
+
+ # ... nor upload directly
+ image3 = self.create_image(name='third',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.store_image_file,
+ image3['id'],
+ io.BytesIO(file_content))
+
+ # Update our quota to make room
+ self._update_limit('image_count_uploading', 2)
+
+ # Now our upload should work
+ self.image_client.store_image_file(image3['id'],
+ io.BytesIO(file_content))
+
+ # ...and because that is no longer in uploading state, we should be
+ # able to stage our second image from above.
+ self.image_client.stage_image_file(image2['id'],
+ io.BytesIO(file_content))
+
+ # Finish our import of image2
+ self.image_client.image_import(image2['id'], method='glance-direct')
+ waiters.wait_for_image_imported_to_stores(self.image_client,
+ image2['id'])
+
+ # Set our quota back to one
+ self._update_limit('image_count_uploading', 1)
+
+ # Since image1 is still staged, we should not be able to upload
+ # an image.
+ image4 = self.create_image(name='fourth',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.store_image_file,
+ image4['id'],
+ io.BytesIO(file_content))
+
+ # Finish our import of image1 to make space in our uploading quota.
+ self.image_client.image_import(image1['id'], method='glance-direct')
+ waiters.wait_for_image_imported_to_stores(self.image_client,
+ image1['id'])
+
+ # Make sure that freed up the one upload quota to complete our upload
+ self.image_client.store_image_file(image4['id'],
+ io.BytesIO(file_content))
+
+ # Delete all the images we created before the next test runs,
+ # so that it starts with full quota.
+ self._cleanup_images()
+
+ @decorators.idempotent_id('05e8d064-c39a-4801-8c6a-465df375ec5b')
+ @utils.services('image', 'identity')
+ def test_image_size_quota(self):
+ self.check_quotas_enabled()
+
+ # Set a quota on the image size for our tenant to 1MiB, and allow ten
+ # images.
+ self._update_limit('image_size_total', 1)
+ self._update_limit('image_count_total', 10)
+ self._update_limit('image_count_uploading', 10)
+
+ file_content = data_utils.random_bytes(1 * units.Mi)
+
+ # Create and upload a 1MiB image.
+ image1 = self.create_image(name='first',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.store_image_file(image1['id'],
+ io.BytesIO(file_content))
+
+ # Create and upload a second 1MiB image. This succeeds, but
+ # after completion, we are over quota. Despite us being at
+ # quota above, the initial quota check for the second
+ # operation has no idea what the image size will be, and thus
+ # uses delta=0. This will succeed because we're not
+ # technically over-quota and have not asked for any more (this
+ # is oslo.limit behavior). After the second operation,
+ # however, we will be over-quota regardless of the delta and
+ # subsequent attempts will fail. Because glance goes not
+ # require an image size to be declared before upload, this is
+ # really the best it can do without an API change.
+ image2 = self.create_image(name='second',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.store_image_file(image2['id'],
+ io.BytesIO(file_content))
+
+ # Create and attempt to upload a third 1MiB image. This should fail to
+ # upload (but not create) because we are over quota.
+ image3 = self.create_image(name='third',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.store_image_file,
+ image3['id'], io.BytesIO(file_content))
+
+ # Increase our size quota to 2MiB.
+ self._update_limit('image_size_total', 2)
+
+ # Now the upload of the already-created image is allowed, but
+ # after completion, we are over quota again.
+ self.image_client.store_image_file(image3['id'],
+ io.BytesIO(file_content))
+
+ # Create and attempt to upload a fourth 1MiB image. This should
+ # fail to upload (but not create) because we are over quota.
+ image4 = self.create_image(name='fourth',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.store_image_file,
+ image4['id'], io.BytesIO(file_content))
+
+ # Delete our first image to make space in our existing 2MiB quota.
+ self.image_client.delete_image(image1['id'])
+
+ # Now the upload of the already-created image is allowed.
+ self.image_client.store_image_file(image4['id'],
+ io.BytesIO(file_content))
+
+ # Delete all the images we created before the next test runs,
+ # so that it starts with full quota.
+ self._cleanup_images()
+
+ @decorators.idempotent_id('fc76b8d9-aae5-46fb-9285-099e37f311f7')
+ @utils.services('image', 'identity')
+ def test_image_stage_quota(self):
+ if not CONF.image_feature_enabled.import_image:
+ skip_msg = (
+ "%s skipped as image import is not available" % __name__)
+ raise self.skipException(skip_msg)
+
+ self.check_quotas_enabled()
+
+ # Create a staging quota of 1MiB, allow 10MiB of active
+ # images, and a total of ten images.
+ self._update_limit('image_stage_total', 1)
+ self._update_limit('image_size_total', 10)
+ self._update_limit('image_count_total', 10)
+ self._update_limit('image_count_uploading', 10)
+
+ file_content = data_utils.random_bytes(1 * units.Mi)
+
+ # Create and stage a 1MiB image.
+ image1 = self.create_image(name='first',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.stage_image_file(image1['id'],
+ io.BytesIO(file_content))
+
+ # Create and stage a second 1MiB image. This succeeds, but
+ # after completion, we are over quota.
+ image2 = self.create_image(name='second',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.stage_image_file(image2['id'],
+ io.BytesIO(file_content))
+
+ # Create and attempt to stage a third 1MiB image. This should fail to
+ # stage (but not create) because we are over quota.
+ image3 = self.create_image(name='third',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.stage_image_file,
+ image3['id'], io.BytesIO(file_content))
+
+ # Make sure that even though we are over our stage quota, we
+ # can still create and upload an image the regular way.
+ image_upload = self.create_image(name='uploaded',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.image_client.store_image_file(image_upload['id'],
+ io.BytesIO(file_content))
+
+ # Increase our stage quota to two MiB.
+ self._update_limit('image_stage_total', 2)
+
+ # Now the upload of the already-created image is allowed, but
+ # after completion, we are over quota again.
+ self.image_client.stage_image_file(image3['id'],
+ io.BytesIO(file_content))
+
+ # Create and attempt to stage a fourth 1MiB image. This should
+ # fail to stage (but not create) because we are over quota.
+ image4 = self.create_image(name='fourth',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertRaises(lib_exc.OverLimit,
+ self.image_client.stage_image_file,
+ image4['id'], io.BytesIO(file_content))
+
+ # Finish our import of image1 to make space in our stage quota.
+ self.image_client.image_import(image1['id'], method='glance-direct')
+ waiters.wait_for_image_imported_to_stores(self.image_client,
+ image1['id'])
+
+ # Now the upload of the already-created image is allowed.
+ self.image_client.stage_image_file(image4['id'],
+ io.BytesIO(file_content))
+
+ # Delete all the images we created before the next test runs,
+ # so that it starts with full quota.
+ self._cleanup_images()
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f801243..5cdbfbf 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -453,11 +453,14 @@
"volumeAttachments": [{"volumeId": uuids.volume_id}]}
mock_list_volume_attachments = mock.Mock(
side_effect=[volume_attached, volume_attached])
+ mock_get_console_output = mock.Mock(
+ return_value={'output': 'output'})
mock_client = mock.Mock(
spec=servers_client.ServersClient,
build_interval=1,
build_timeout=1,
- list_volume_attachments=mock_list_volume_attachments)
+ list_volume_attachments=mock_list_volume_attachments,
+ get_console_output=mock_get_console_output)
self.patch(
'time.time',
side_effect=[0., 0.5, mock_client.build_timeout + 1.])
@@ -473,3 +476,22 @@
mock_list_volume_attachments.assert_has_calls([
mock.call(uuids.server_id),
mock.call(uuids.server_id)])
+
+ # Assert that we fetch console output
+ mock_get_console_output.assert_called_once_with(uuids.server_id)
+
+ def test_wait_for_volume_attachment_remove_from_server_not_found(self):
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=lib_exc.NotFound)
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ list_volume_attachments=mock_list_volume_attachments)
+
+ # Assert that nothing is raised when lib_exc_NotFound is raised
+ # by the client call to list_volume_attachments
+ waiters.wait_for_volume_attachment_remove_from_server(
+ mock_client, mock.sentinel.server_id, mock.sentinel.volume_id)
+
+ # Assert that list_volume_attachments was actually called
+ mock_list_volume_attachments.assert_called_once_with(
+ mock.sentinel.server_id)
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
index 5d63dec..403de38 100644
--- a/tempest/tests/lib/cmd/test_check_uuid.py
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -19,7 +19,6 @@
from unittest import mock
from tempest.lib.cmd import check_uuid
-from tempest.lib import decorators
from tempest.tests import base
@@ -30,12 +29,13 @@
" pass"
def create_tests_file(self, directory):
- with open(directory + "/__init__.py", "w"):
- pass
+ init_file = open(directory + "/__init__.py", "w")
+ init_file.close()
tests_file = directory + "/tests.py"
with open(tests_file, "w") as fake_file:
fake_file.write(TestCLInterface.CODE)
+ fake_file.close()
return tests_file
@@ -50,7 +50,6 @@
with open(tests_file, "r") as f:
self.assertTrue(TestCLInterface.CODE == f.read())
- @decorators.skip_because(bug='1918316')
def test_fix_argument_yes(self):
temp_dir = tempfile.mkdtemp(prefix='check-uuid-yes', dir=".")
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_limit_client.py b/tempest/tests/lib/services/identity/v3/test_limit_client.py
new file mode 100644
index 0000000..07ec6cd
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_limit_client.py
@@ -0,0 +1,82 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import limits_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLimitsClient(base.BaseServiceTest):
+ def setUp(self):
+ super(TestLimitsClient, self).setUp()
+ self.client = limits_client.LimitsClient(
+ fake_auth_provider.FakeAuthProvider(),
+ 'identity', 'regionOne')
+
+ def test_get_registered_limits(self):
+ fake_result = {'foo': 'bar'}
+ self.check_service_client_function(
+ self.client.get_registered_limits,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ fake_result,
+ False,
+ status=200)
+
+ def test_create_limit(self):
+ fake_result = {'foo': 'bar'}
+ self.check_service_client_function(
+ self.client.create_limit,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ fake_result,
+ False,
+ region_id='regionOne', service_id='image',
+ project_id='project', resource_name='widgets',
+ default_limit=10,
+ description='Spacely Widgets',
+ status=201)
+
+ def test_create_limit_with_domain(self):
+ fake_result = {'foo': 'bar'}
+ self.check_service_client_function(
+ self.client.create_limit,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ fake_result,
+ False,
+ region_id='regionOne', service_id='image',
+ project_id='project', resource_name='widgets',
+ default_limit=10,
+ domain_id='foo',
+ description='Spacely Widgets',
+ status=201)
+
+ def test_update_limit(self):
+ fake_result = {'foo': 'bar'}
+ self.check_service_client_function(
+ self.client.update_limit,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ fake_result,
+ False,
+ limit_id='123', resource_limit=20,
+ status=200)
+
+ def test_update_limit_with_description(self):
+ fake_result = {'foo': 'bar'}
+ self.check_service_client_function(
+ self.client.update_limit,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ fake_result,
+ False,
+ limit_id='123', resource_limit=20,
+ description='new description',
+ status=200)
diff --git a/tempest/tests/lib/services/network/test_log_resource_client.py b/tempest/tests/lib/services/network/test_log_resource_client.py
new file mode 100644
index 0000000..ef502bc
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_log_resource_client.py
@@ -0,0 +1,145 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import log_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLogResourceClient(base.BaseServiceTest):
+
+ FAKE_LOGS = {
+ "logs": [
+ {
+ "name": "security group log1",
+ "description": "Log for test demo.",
+ "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+ "project_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+ "tenant_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+ "created_at": "2018-04-03T21:03:04Z",
+ "updated_at": "2018-04-03T21:03:04Z",
+ "enabled": True,
+ "revision_number": 1,
+ "resource_type": "security_group",
+ "resource_id": None,
+ "target_id": None,
+ "event": "ALL"
+ },
+ {
+ "name": "security group log2",
+ "description": "Log for test demo.",
+ "id": "46ebaec1-0570-43ac-82f6-60d2b03168c4",
+ "project_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+ "tenant_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+ "created_at": "2018-04-03T21:04:04Z",
+ "updated_at": "2018-04-03T21:04:04Z",
+ "enabled": True,
+ "revision_number": 2,
+ "resource_type": "security_group",
+ "resource_id": None,
+ "target_id": None,
+ "event": "ALL"
+ }
+ ]
+ }
+
+ FAKE_LOG_ID = "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
+
+ def setUp(self):
+ super(TestLogResourceClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.log_resource_client = log_resource_client.LogResourceClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_list_logs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.log_resource_client.list_logs,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_LOGS,
+ bytes_body,
+ 200)
+
+ def _test_show_log(self, bytes_body=False):
+ self.check_service_client_function(
+ self.log_resource_client.show_log,
+ "tempest.lib.common.rest_client.RestClient.get",
+ {"log": self.FAKE_LOGS["logs"][0]},
+ bytes_body,
+ 200,
+ log_id=self.FAKE_LOG_ID)
+
+ def _test_create_log(self, bytes_body=False):
+ self.check_service_client_function(
+ self.log_resource_client.create_log,
+ "tempest.lib.common.rest_client.RestClient.post",
+ {"logs": self.FAKE_LOGS["logs"][1]},
+ bytes_body,
+ 201,
+ log_id="2f245a7b-796b-4f26-9cf9-9e82d248fda7")
+
+ def _test_update_log(self, bytes_body=False):
+ update_kwargs = {
+ "tenant_id": "83a5a4f4245a4abbafacb7ca73b027b0"
+ }
+
+ resp_body = {
+ "logs": copy.deepcopy(
+ self.FAKE_LOGS["logs"][0]
+ )
+ }
+ resp_body["logs"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.log_resource_client.update_log,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ log_id=self.FAKE_LOG_ID,
+ **update_kwargs)
+
+ def test_list_logs_with_str_body(self):
+ self._test_list_logs()
+
+ def test_list_logs_with_bytes_body(self):
+ self._test_list_logs(bytes_body=True)
+
+ def test_create_log_with_str_body(self):
+ self._test_create_log()
+
+ def test_create_log_with_bytes_body(self):
+ self._test_create_log(bytes_body=True)
+
+ def test_show_log_with_str_body(self):
+ self._test_show_log()
+
+ def test_show_log_with_bytes_body(self):
+ self._test_show_log(bytes_body=True)
+
+ def test_update_log_with_str_body(self):
+ self._test_update_log()
+
+ def test_update_log_with_bytes_body(self):
+ self._test_update_log(bytes_body=True)
+
+ def test_delete_log(self):
+ self.check_service_client_function(
+ self.log_resource_client.delete_log,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ status=204,
+ log_id=self.FAKE_LOG_ID)
diff --git a/tempest/tests/lib/services/network/test_loggable_resource_client.py b/tempest/tests/lib/services/network/test_loggable_resource_client.py
new file mode 100644
index 0000000..232775b
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_loggable_resource_client.py
@@ -0,0 +1,53 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import loggable_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLoggableResourceClient(base.BaseServiceTest):
+
+ FAKE_LOGS = {
+ "loggable_resources": [
+ {
+ "type": "security_group"
+ },
+ {
+ "type": "none"
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestLoggableResourceClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.loggable_resource_client = \
+ loggable_resource_client.LoggableResourceClient(
+ fake_auth, "network", "regionOne")
+
+ def _test_list_loggable_resources(self, bytes_body=False):
+ self.check_service_client_function(
+ self.loggable_resource_client.list_loggable_resources,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_LOGS,
+ bytes_body,
+ 200)
+
+ def test_list_loggable_resources_with_str_body(self):
+ self._test_list_loggable_resources()
+
+ def test_list_loggable_resources_with_bytes_body(self):
+ self._test_list_loggable_resources(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..b038968
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,124 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib import decorators
+
+from tempest.lib.services.network import qos_limit_bandwidth_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_log import log as logging
+LOG = logging.getLogger('tempest')
+
+
+class TestQosLimitBandwidthRulesClient(base.BaseServiceTest):
+
+ FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+ FAKE_MAX_BW_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+ FAKE_MAX_BW_RULE_REQUEST = {
+ 'qos_policy_id': FAKE_QOS_POLICY_ID,
+ 'max_kbps': 1000,
+ 'max_burst_kbps': 0,
+ 'direction': 'ingress'
+ }
+
+ FAKE_MAX_BW_RULE_RESPONSE = {
+ 'bandwidth_limit_rule': {
+ 'id': FAKE_MAX_BW_RULE_ID,
+ 'max_kbps': 10000,
+ 'max_burst_kbps': 0,
+ 'direction': 'ingress'
+ }
+ }
+
+ FAKE_MAX_BW_RULES = {
+ 'bandwidth_limit_rules': [
+ FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+ ]
+ }
+
+ def setUp(self):
+ super(TestQosLimitBandwidthRulesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.qos_limit_bw_client = qos_limit_bandwidth_rules_client.\
+ QosLimitBandwidthRulesClient(fake_auth, "network", "regionOne")
+
+ @decorators.idempotent_id('cde981fa-e93b-11eb-aacb-74e5f9e2a801')
+ def test_create_limit_bandwidth_rules(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_limit_bw_client.create_limit_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_MAX_BW_RULE_RESPONSE,
+ bytes_body,
+ 201,
+ **self.FAKE_MAX_BW_RULE_REQUEST
+ )
+
+ @decorators.idempotent_id('86e6803a-e974-11eb-aacb-74e5f9e2a801')
+ def test_update_limit_bandwidth_rules(self, bytes_body=False):
+ update_kwargs = {
+ "max_kbps": "20000"
+ }
+
+ resp_body = {
+ "bandwidth_limit_rule": copy.deepcopy(
+ self.FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+ )
+ }
+ resp_body["bandwidth_limit_rule"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.qos_limit_bw_client.update_limit_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 202,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MAX_BW_RULE_ID,
+ **update_kwargs)
+
+ @decorators.idempotent_id('be60ae6e-e979-11eb-aacb-74e5f9e2a801')
+ def test_show_limit_bandwidth_rules(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_limit_bw_client.show_limit_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_MAX_BW_RULE_RESPONSE,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MAX_BW_RULE_ID
+ )
+
+ @decorators.idempotent_id('0a7c0964-e97b-11eb-aacb-74e5f9e2a801')
+ def test_delete_limit_bandwidth_rule(self):
+ self.check_service_client_function(
+ self.qos_limit_bw_client.delete_limit_bandwidth_rule,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID,
+ rule_id=self.FAKE_MAX_BW_RULE_ID)
+
+ @decorators.idempotent_id('08df88ae-e97d-11eb-aacb-74e5f9e2a801')
+ def test_list_minimum_bandwidth_rules(self, bytes_body=False):
+ self.check_service_client_function(
+ self.qos_limit_bw_client.list_limit_bandwidth_rules,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_MAX_BW_RULES,
+ bytes_body,
+ 200,
+ qos_policy_id=self.FAKE_QOS_POLICY_ID
+ )
diff --git a/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py
index 8234dda..7187ffa 100644
--- a/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py
+++ b/tempest/tests/lib/services/network/test_qos_minimum_bandwidth_rules_client.py
@@ -98,7 +98,7 @@
"tempest.lib.common.rest_client.RestClient.put",
resp_body,
bytes_body,
- 200,
+ 202,
qos_policy_id=self.FAKE_QOS_POLICY_ID,
rule_id=self.FAKE_MIN_BW_RULE_ID,
**update_kwargs)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 1b5b369..eef5886 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -35,28 +35,36 @@
# TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
# future when the patches are merged.
NON_ACTIVE_LIST = [
- 'x/gce-api', # It looks gce-api doesn't support python3 yet.
+ 'x/gce-api', # It looks gce-api doesn't support python3 yet
+ # https://bugs.launchpad.net/gce-api/+bug/1931094
'x/glare', # To avoid sanity-job failure
- 'x/group-based-policy', # It looks this doesn't support python3 yet.
- 'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
+ 'x/group-based-policy',
+ # https://bugs.launchpad.net/group-based-policy/+bug/1931091
+ 'x/intel-nfv-ci-tests', # To avoid sanity-job failure
'openstack/networking-generic-switch',
+ # This is not a real tempest plugin,
# https://review.opendev.org/#/c/634846/
- 'x/networking-l2gw-tempest-plugin',
- # https://review.opendev.org/#/c/635093/
- 'openstack/networking-midonet', # https://review.opendev.org/#/c/635096/
- 'x/networking-plumgrid', # https://review.opendev.org/#/c/635096/
+ 'x/networking-plumgrid', # No longer contains tempest tests
'x/networking-spp', # https://review.opendev.org/#/c/635098/
+ # networking-spp is missing neutron-tempest-plugin as a dep plus
+ # test-requirements.txt is nested in a openstack dir and sanity script
+ # doesn't count with such scenario yet
'openstack/neutron-dynamic-routing',
+ # As tests have been migrated to neutron-tempest-plugin:
# https://review.opendev.org/#/c/637718/
- 'openstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
- 'x/tap-as-a-service', # To avoid sanity-job failure
- 'x/valet', # https://review.opendev.org/#/c/638339/
- 'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
- # vmware-nsx is excluded since https://review.opendev.org/#/c/736952
- 'x/vmware-nsx-tempest-plugin',
+ 'openstack/neutron-vpnaas',
+ # As tests have been migrated to neutron-tempest-plugin:
+ # https://review.opendev.org/c/openstack/neutron-vpnaas/+/695834
+ 'x/valet', # valet is unmaintained now
+ # https://review.opendev.org/c/x/valet/+/638339
+ 'x/kingbird', # kingbird is unmaintained now
+ # https://bugs.launchpad.net/kingbird/+bug/1869722
+ 'x/mogan',
# mogan is unmaintained now, remove from the list when this is merged:
# https://review.opendev.org/c/x/mogan/+/767718
- 'x/mogan',
+ 'x/vmware-nsx-tempest-plugin'
+ # Failing since 2021-08-27
+ # https://zuul.opendev.org/t/openstack/build/45f6c8d3c62d4387a70b7b471ec687c8
]
url = 'https://review.opendev.org/projects/'
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
index 2596395..bfb1403 100755
--- a/tools/verify-ipv6-only-deployments.sh
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -1,92 +1,8 @@
#!/bin/bash
-#
-#
-# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that
-# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack
-# plugins are missing the required setting to listen on IPv6 address. This is run as part of
-# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6'
-# can expand the IPv6 verification specific to project by defining the new post-run script which
-# will run along with this base script.
-# If there are more common verification for IPv6 then we can always extent this script.
-# Keep track of the DevStack directory
-TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
-source $TOP_DIR/stackrc
-source $TOP_DIR/openrc admin admin
+# NOTE(yoctozepto): This scripts lives now in devstack where it belongs.
+# It is kept here for the legacy (dsvm) jobs which look for it in tempest still.
+# TODO: Drop it when no legacy jobs use the master tempest.
-function verify_devstack_ipv6_setting {
- local _service_host=''
- _service_host=$(echo $SERVICE_HOST | tr -d [])
- local _host_ipv6=''
- _host_ipv6=$(echo $HOST_IPV6 | tr -d [])
- local _service_listen_address=''
- _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
- local _service_local_host=''
- _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
- if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
- echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
- exit 1
- fi
- is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
- if [[ "$is_service_host_ipv6" != "True" ]]; then
- echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
- exit 1
- fi
- is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
- if [[ "$is_host_ipv6" != "True" ]]; then
- echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
- exit 1
- fi
- is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
- if [[ "$is_service_listen_address" != "True" ]]; then
- echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
- exit 1
- fi
- is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
- if [[ "$is_service_local_host" != "True" ]]; then
- echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
- exit 1
- fi
- echo "Devstack is properly configured with IPv6"
- echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
-}
-
-function sanity_check_system_ipv6_enabled {
- system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())')
- if [[ $system_ipv6_enabled != "True" ]]; then
- echo "IPv6 is disabled in system"
- exit 1
- fi
- echo "IPv6 is enabled in system"
-}
-
-function verify_service_listen_address_is_ipv6 {
- local endpoints_verified=False
- local all_ipv6=True
- endpoints=$(openstack endpoint list -f value -c URL)
- for endpoint in ${endpoints}; do
- local endpoint_address=''
- endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}')
- endpoint_address=$(echo $endpoint_address | tr -d [])
- local is_endpoint_ipv6=''
- is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
- if [[ "$is_endpoint_ipv6" != "True" ]]; then
- all_ipv6=False
- echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
- continue
- fi
- endpoints_verified=True
- done
- if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then
- exit 1
- fi
- echo "All services deployed by devstack is on IPv6 endpoints"
- echo $endpoints
-}
-
-#First thing to verify if system has IPv6 enabled or not
-sanity_check_system_ipv6_enabled
-#Verify whether devstack is configured properly with IPv6 setting
-verify_devstack_ipv6_setting
-#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6.
-verify_service_listen_address_is_ipv6
+DEVSTACK_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
+$DEVSTACK_DIR/tools/verify-ipv6-only-deployments.sh
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index d931c55..2da5579 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -62,16 +62,12 @@
- job:
name: tempest-full-py3
parent: devstack-tempest
- # This job version is with swift disabled on py3
- # as swift was not ready on py3 until stable/train.
- branches:
- - stable/pike
- - stable/queens
- - stable/rocky
- - stable/stein
- - stable/train
+ # This job version is with swift enabled on py3
+ # as swift is ready on py3 from stable/ussuri onwards.
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
description: |
- Base integration test with Neutron networking, swift disabled, and py3.
+ Base integration test with Neutron networking, horizon, swift enable,
+ and py3.
Former names for this job where:
* legacy-tempest-dsvm-py35
* gate-tempest-dsvm-py35
@@ -99,41 +95,10 @@
devstack_services:
# Enbale horizon so that we can run horizon test.
horizon: true
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- # NOTE(mriedem): Disable the cinder-backup service from
- # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
- # project template but the backup tests do not really involve other
- # services so they should be run in some more cinder-specific job,
- # especially because the tests fail at a high rate (see bugs 1483434,
- # 1813217, 1745168)
- c-bak: false
neutron-placement: true
neutron-qos: true
- job:
- name: tempest-full-py3
- parent: devstack-tempest
- # This job version is with swift enabled on py3
- # as swift is ready on py3 from stable/ussuri onwards.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
- description: |
- Base integration test with Neutron networking, swift enable, and py3.
- Former names for this job where:
- * legacy-tempest-dsvm-py35
- * gate-tempest-dsvm-py35
- vars:
- tox_envlist: full
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- GLANCE_USE_IMPORT_WORKFLOW: True
-
-- job:
name: tempest-integrated-networking
parent: devstack-tempest
branches: ^(?!stable/ocata).*$
@@ -243,45 +208,6 @@
USE_PYTHON3: False
- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-bionic
- # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
- # This job is prepared to make sure all stable branches from stable/stein till stable/train
- # will keep running on bionic. This can be removed once stable/train is EOL.
- branches:
- - stable/stein
- - stable/train
- - stable/ussuri
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-xenial
- # This job runs on Xenial and this is for stable/pike, stable/queens
- # and stable/rocky. This job is prepared to make sure all stable branches
- # before stable/stein will keep running on xenial. This job can be
- # removed once stable/rocky is EOL.
- branches:
- - stable/pike
- - stable/queens
- - stable/rocky
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
name: tempest-multinode-full-py3
parent: tempest-multinode-full
vars:
@@ -328,7 +254,7 @@
timeout: 10800
# This job runs on stable/stein onwards.
branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
- vars:
+ vars: &tempest_slow_vars
tox_envlist: slow-serial
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
@@ -348,62 +274,13 @@
ENABLE_VOLUME_MULTIATTACH: true
- job:
- name: tempest-slow
- parent: tempest-multinode-full
- description: |
- This multinode integration job will run all the tests tagged as slow.
- It enables the lvm multibackend setup to cover few scenario tests.
- This job will run only slow tests (API or Scenario) serially.
-
- Former names for this job were:
- * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
- * tempest-scenario-multinode-lvm-multibackend
- timeout: 10800
- branches:
- - stable/pike
- - stable/queens
- - stable/rocky
- vars:
- tox_envlist: slow-serial
- devstack_localrc:
- CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
- ENABLE_VOLUME_MULTIATTACH: true
- # to avoid https://bugs.launchpad.net/neutron/+bug/1914037
- # as we couldn't backport the fix to rocky and older releases
- IPV6_PUBLIC_RANGE: 2001:db8:0:10::/64
- IPV6_PUBLIC_NETWORK_GATEWAY: 2001:db8:0:10::2
- IPV6_ROUTER_GW_IP: 2001:db8:0:10::1
- devstack_plugins:
- neutron: https://opendev.org/openstack/neutron
- devstack_services:
- neutron-placement: true
- neutron-qos: true
- tempest_concurrency: 2
- group-vars:
- # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
- # the controller and subnode prior to Rocky so we have to make sure the
- # variable is set in both locations.
- subnode:
- devstack_localrc:
- ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
name: tempest-slow-py3
- parent: tempest-slow
- vars:
- devstack_localrc:
- USE_PYTHON3: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- c-bak: false
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: true
+ parent: tempest-multinode-full-py3
+ # This job version is with swift enabled on py3
+ # as swift is ready on py3 from stable/ussuri onwards.
+ timeout: 10800
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+ vars: *tempest_slow_vars
- job:
name: tempest-cinder-v2-api
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 698df53..7f490e1 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -54,9 +54,44 @@
- ^.gitignore$
- ^.gitreview$
- ^.mailmap$
+ - ^tools/check_logs.py
+ - ^tools/format.sh
+ - ^tools/skip_tracker.py
+ - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+ - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+ - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+ - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+ - ^tools/tempest-integrated-gate-storage-blacklist.txt
+ - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+ - ^tools/verify-ipv6-only-deployments.sh
+ - ^tools/with_venv.sh
# tools/ is not here since this relies on a script in tools/.
- tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
+ irrelevant-files: &tempest-irrelevant-files-3
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - ^tools/check_logs.py
+ - ^tools/format.sh
+ - ^tools/generate-tempest-plugins-list.py
+ - ^tools/generate-tempest-plugins-list.sh
+ - ^tools/skip_tracker.py
+ - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+ - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+ - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+ - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+ - ^tools/tempest-integrated-gate-storage-blacklist.txt
+ - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+ - ^tools/tempest-plugin-sanity.sh
+ - ^tools/with_venv.sh
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
@@ -64,23 +99,11 @@
irrelevant-files: *tempest-irrelevant-files
- devstack-plugin-ceph-tempest-py3:
irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
+ - neutron-ovs-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- grenade:
irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario001-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario002-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario003-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario004-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr:
+ - neutron-ovs-tempest-dvr:
voting: false
irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
@@ -97,14 +120,14 @@
jobs:
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
+ - neutron-ovs-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3:
irrelevant-files: *tempest-irrelevant-files
- grenade:
irrelevant-files: *tempest-irrelevant-files
- tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
+ irrelevant-files: *tempest-irrelevant-files-3
- devstack-plugin-ceph-tempest-py3:
irrelevant-files: *tempest-irrelevant-files
experimental:
@@ -114,7 +137,7 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all:
irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr-ha-multinode-full:
+ - neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files: *tempest-irrelevant-files
- nova-tempest-v2-api:
irrelevant-files: *tempest-irrelevant-files
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 2f0df66..852bafb 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -20,3 +20,162 @@
parent: tempest-full-py3
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+
+- job:
+ name: tempest-full-py3
+ parent: devstack-tempest
+ # This job version is with swift disabled on py3
+ # as swift was not ready on py3 until stable/train.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ - stable/stein
+ - stable/train
+ description: |
+ Base integration test with Neutron networking, swift disabled, and py3.
+ Former names for this job where:
+ * legacy-tempest-dsvm-py35
+ * gate-tempest-dsvm-py35
+ required-projects:
+ - openstack/horizon
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ devstack_services:
+ # Enbale horizon so that we can run horizon test.
+ horizon: true
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
+ c-bak: false
+ neutron-placement: true
+ neutron-qos: true
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+ # This job is prepared to make sure all stable branches from stable/stein till stable/train
+ # will keep running on bionic. This can be removed once stable/train is EOL.
+ branches:
+ - stable/stein
+ - stable/train
+ - stable/ussuri
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-slow
+ parent: tempest-multinode-full
+ description: |
+ This multinode integration job will run all the tests tagged as slow.
+ It enables the lvm multibackend setup to cover few scenario tests.
+ This job will run only slow tests (API or Scenario) serially.
+ Former names for this job were:
+ * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+ * tempest-scenario-multinode-lvm-multibackend
+ timeout: 10800
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ vars:
+ tox_envlist: slow-serial
+ devstack_localrc:
+ CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ # to avoid https://bugs.launchpad.net/neutron/+bug/1914037
+ # as we couldn't backport the fix to rocky and older releases
+ IPV6_PUBLIC_RANGE: 2001:db8:0:10::/64
+ IPV6_PUBLIC_NETWORK_GATEWAY: 2001:db8:0:10::2
+ IPV6_ROUTER_GW_IP: 2001:db8:0:10::1
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ # This job version is with swift disabled on py3
+ # as swift was not ready on py3 until stable/train.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ - stable/stein
+ - stable/train
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true