Merge "Wait couple of seconds for dns servers to be set in the guest"
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index d56fb73..568077e 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -16,4 +16,17 @@
     # address is IPv6 etc. This is invoked before tests are run so that we can
     # fail early if anything missing the IPv6 settings or deployments.
     - devstack-ipv6-only-deployments-verification
-    - run-tempest
+  tasks:
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when:
+        - zuul.branch is defined
+        - zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+    - name: Run Tempest
+      include_role:
+        name: run-tempest
+      when:
+        - zuul.branch is defined
+        - zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 3b969f2..269999c 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -29,9 +29,17 @@
             (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
             (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
 
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when: (zuul.branch is defined and zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]) or
+            (zuul.override_checkout is defined and zuul.override_checkout in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
+
     - name: Run Tempest
       include_role:
         name: run-tempest
+      when: (zuul.branch is defined and zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"] and zuul.override_checkout is not defined) or
+            (zuul.override_checkout is defined and zuul.override_checkout not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
 
     - name: Run tempest cleanup dry-run
       include_role:
diff --git a/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
new file mode 100644
index 0000000..36681c7
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Xena development cycle to
+    mark the end of support for EM Train release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Xena development
+    cycle.
diff --git a/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
new file mode 100644
index 0000000..311eca3
--- /dev/null
+++ b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add a new client for keystone's unified limits API to create and update limits.
diff --git a/releasenotes/notes/log-resource-client-20e58a295f729902.yaml b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
new file mode 100644
index 0000000..405fc5f
--- /dev/null
+++ b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add a new client to lists, creates, shows information for,
+    and updates neutron log resource.
diff --git a/roles/run-tempest-26/README.rst b/roles/run-tempest-26/README.rst
new file mode 100644
index 0000000..3643edb
--- /dev/null
+++ b/roles/run-tempest-26/README.rst
@@ -0,0 +1,83 @@
+Run Tempest
+
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+   :default: 0
+
+   The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+   :default: ''
+
+   A regular expression used to select the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+   In the following example only api scenario and third party tests
+   will be executed.
+
+       ::
+           vars:
+             tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
+
+.. zuul:rolevar:: tempest_test_blacklist
+
+   Specifies a blacklist file to skip tests that are not needed.
+
+   Pass a full path to the file.
+
+.. zuul:rolevar:: tox_envlist
+   :default: smoke
+
+   The Tempest tox environment to run.
+
+.. zuul:rolevar:: tempest_black_regex
+   :default: ''
+
+   A regular expression used to skip the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+       ::
+           vars:
+             tempest_black_regex: (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+   :default: ''
+
+   String of extra command line options to pass to tox.
+
+   Here is an example of running tox with --sitepackages option:
+
+       ::
+           vars:
+             tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+   :default: ''
+
+   The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+   :default: ''
+
+   Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+   :default: ''
+
+   Environment variable to set for run-tempst task.
+
+   Env variables set in this variable will be combined with some more
+   defaults env variable set at runtime.
diff --git a/roles/run-tempest-26/defaults/main.yaml b/roles/run-tempest-26/defaults/main.yaml
new file mode 100644
index 0000000..cbac76d
--- /dev/null
+++ b/roles/run-tempest-26/defaults/main.yaml
@@ -0,0 +1,12 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_envlist: smoke
+tempest_black_regex: ''
+tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
\ No newline at end of file
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
new file mode 100644
index 0000000..f846006
--- /dev/null
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -0,0 +1,73 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+  shell: sysctl hw.logicalcpu | cut -d' ' -f2
+  register: sysctl_hw_logicalcpu
+  when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+  set_fact:
+    num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+  set_fact:
+    default_concurrency: "{{ num_cores }}"
+  when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+  set_fact:
+    default_concurrency: "{{ num_cores|int // 2 }}"
+  when: num_cores|int > 3
+
+- name: Override target branch
+  set_fact:
+    target_branch: "{{ zuul.override_checkout }}"
+  when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/stein
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+  when:
+    - devstack_localrc is defined
+    - "'TEMPEST_BRANCH' in devstack_localrc"
+    - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+    - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+    - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'default'
+
+- name: Set OS_TEST_TIMEOUT if requested
+  set_fact:
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+  when: tempest_test_timeout != ''
+
+- when:
+    - tempest_test_blacklist is defined
+  block:
+    - name: Check for test blacklist file
+      stat:
+        path: "{{ tempest_test_blacklist }}"
+      register:
+        blacklist_stat
+
+    - name: Build blacklist option
+      set_fact:
+        blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
+      when: blacklist_stat.stat.exists
+
+- name: Run Tempest
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+            --concurrency={{tempest_concurrency|default(default_concurrency)}} \
+            --black-regex={{tempest_black_regex|quote}}
+  args:
+    chdir: "{{devstack_base_dir}}/tempest"
+  register: tempest_run_result
+  become: true
+  become_user: tempest
+  environment: "{{ tempest_tox_environment }}"
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 48f32a8..c9aec62 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -180,3 +180,56 @@
         if not utils.get_service_list()['volume']:
             msg = "Volume service not enabled."
             raise cls.skipException(msg)
+
+
+class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
+    """Test creating server with FQDN hostname and verifying atrributes
+
+    Starting Wallaby release, Nova sanitizes freeform characters in
+    server hostname with dashes. This test verifies the same.
+    """
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(ServersTestFqdnHostnames, cls).setup_credentials()
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersTestFqdnHostnames, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    @decorators.idempotent_id('622066d2-39fc-4c09-9eeb-35903c114a0a')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.hostname_fqdn_sanitization,
+        'FQDN hostname sanitization is not supported.')
+    @testtools.skipUnless(CONF.validation.run_validation,
+                          'Instance validation tests are disabled.')
+    def test_create_server_with_fqdn_name(self):
+        """Test to create an instance with FQDN type name scheme"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        self.server_name = 'guest-instance-1.domain.com'
+        self.password = data_utils.rand_password()
+        self.accessIPv4 = '2.2.2.2'
+        test_server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='ACTIVE',
+            adminPass=self.password,
+            name=self.server_name,
+            accessIPv4=self.accessIPv4)
+
+        """Verify the hostname within the instance is sanitized
+
+        Freeform characters in the hostname are replaced with dashes
+        """
+        linux_client = remote_client.RemoteClient(
+            self.get_server_ip(test_server, validation_resources),
+            self.ssh_user,
+            self.password,
+            validation_resources['keypair']['private_key'],
+            server=test_server,
+            servers_client=self.client)
+        hostname = linux_client.exec_command("hostname").rstrip()
+        self.assertEqual('guest-instance-1-domain-com', hostname)
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 190d9e3..614dfcf 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -45,11 +45,16 @@
         super(QuotasNegativeTest, self).setUp()
         name = data_utils.rand_name('test_project_')
         description = data_utils.rand_name('desc_')
-        self.project = identity.identity_utils(self.os_admin).create_project(
+        self.creds_client = identity.identity_utils(self.os_admin)
+        self.project = self.creds_client.create_project(
             name=name, description=description)
         self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
                         self.project['id'])
 
+    def tearDown(self):
+        super(QuotasNegativeTest, self).tearDown()
+        self.credentials_provider.cleanup_default_secgroup(self.project['id'])
+
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
     def test_network_quota_exceeding(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index b6bf369..260ba74 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -84,6 +84,7 @@
         cls.network_versions_client = cls.os_primary.network_versions_client
         cls.service_providers_client = cls.os_primary.service_providers_client
         cls.tags_client = cls.os_primary.tags_client
+        cls.log_resource_client = cls.os_primary.log_resource_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/clients.py b/tempest/clients.py
index 6080f01..1b05b54 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -73,6 +73,7 @@
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
         self.segments_client = self.network.SegmentsClient()
         self.trunks_client = self.network.TrunksClient()
+        self.log_resource_client = self.network.LogResourceClient()
 
     def _set_image_clients(self):
         if CONF.service_available.glance:
@@ -221,6 +222,8 @@
             self.identity_v3.ApplicationCredentialsClient(**params_v3)
         self.access_rules_client = \
             self.identity_v3.AccessRulesClient(**params_v3)
+        self.identity_limits_client = \
+            self.identity_v3.LimitsClient(**params_v3)
 
         # Token clients do not use the catalog. They only need default_params.
         # They read auth_url, so they should only be set if the corresponding
diff --git a/tempest/config.py b/tempest/config.py
index 6715a00..3a497cb 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -437,6 +437,15 @@
     cfg.BoolOpt('disk_config',
                 default=True,
                 help="If false, skip disk config tests"),
+    # TODO(pkesav): Make it True by default once wallaby
+    # is oldest supported stable for Tempest.
+    cfg.BoolOpt('hostname_fqdn_sanitization',
+                default=False,
+                help="If false, skip fqdn instance sanitization tests. "
+                     "Nova started sanitizing the instance name by replacing "
+                     "the '.' with '-' to comply with fqdn hostname. Nova "
+                     "changed that in Wallaby cycle, if your cloud is older "
+                     "than wallaby then you can keep/make it False."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled compute extensions with a special '
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 3b17af2..2e93fd5 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -254,8 +254,8 @@
                     user, role, domain)
             elif scope == 'system':
                 self.creds_client.assign_user_role_on_system(user, role)
-        LOG.info("Roles assigned to the user %s are: %s",
-                 user['id'], roles_to_assign)
+        LOG.info("Dynamic test user %s is created with scope %s and roles: %s",
+                 user['id'], scope, roles_to_assign)
 
         creds = self.creds_client.get_credentials(**cred_params)
         return cred_provider.TestResources(creds)
@@ -518,7 +518,7 @@
             LOG.warning('network with name: %s not found for delete',
                         network_name)
 
-    def _cleanup_default_secgroup(self, tenant):
+    def cleanup_default_secgroup(self, tenant):
         nsg_client = self.security_groups_admin_client
         resp_body = nsg_client.list_security_groups(tenant_id=tenant,
                                                     name="default")
@@ -572,13 +572,13 @@
                 LOG.warning("user with name: %s not found for delete",
                             creds.username)
             # NOTE(zhufl): Only when neutron's security_group ext is
-            # enabled, _cleanup_default_secgroup will not raise error. But
+            # enabled, cleanup_default_secgroup will not raise error. But
             # here cannot use test_utils.is_extension_enabled for it will cause
             # "circular dependency". So here just use try...except to
             # ensure tenant deletion without big changes.
             try:
                 if self.neutron_available:
-                    self._cleanup_default_secgroup(creds.tenant_id)
+                    self.cleanup_default_secgroup(creds.tenant_id)
             except lib_exc.NotFound:
                 LOG.warning("failed to cleanup tenant %s's secgroup",
                             creds.tenant_name)
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 573d64e..3f735f5 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -14,7 +14,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import collections
+from collections import abc
 import email.utils
 import re
 import time
@@ -884,7 +884,7 @@
                                                     resp=resp)
 
     def is_absolute_limit(self, resp, resp_body):
-        if (not isinstance(resp_body, collections.Mapping) or
+        if (not isinstance(resp_body, abc.Mapping) or
                 'retry-after' not in resp):
             return True
         return 'exceed' in resp_body.get('message', 'blabla')
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index 86fa991..af09fb1 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -32,6 +32,7 @@
 from tempest.lib.services.identity.v3.identity_client import IdentityClient
 from tempest.lib.services.identity.v3.inherited_roles_client import \
     InheritedRolesClient
+from tempest.lib.services.identity.v3.limits_client import LimitsClient
 from tempest.lib.services.identity.v3.oauth_consumers_client import \
     OAUTHConsumerClient
 from tempest.lib.services.identity.v3.oauth_token_client import \
@@ -55,7 +56,8 @@
            'DomainConfigurationClient', 'EndPointGroupsClient',
            'EndPointsClient', 'EndPointsFilterClient',
            'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
-           'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
-           'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
-           'RoleAssignmentsClient', 'RolesClient', 'ServicesClient',
-           'V3TokenClient', 'TrustsClient', 'UsersClient', 'VersionsClient']
+           'LimitsClient', 'OAUTHConsumerClient', 'OAUTHTokenClient',
+           'PoliciesClient', 'ProjectsClient', 'ProjectTagsClient',
+           'RegionsClient', 'RoleAssignmentsClient', 'RolesClient',
+           'ServicesClient', 'V3TokenClient', 'TrustsClient', 'UsersClient',
+           'VersionsClient']
diff --git a/tempest/lib/services/identity/v3/limits_client.py b/tempest/lib/services/identity/v3/limits_client.py
new file mode 100644
index 0000000..26d04bc
--- /dev/null
+++ b/tempest/lib/services/identity/v3/limits_client.py
@@ -0,0 +1,57 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class LimitsClient(rest_client.RestClient):
+    api_version = "v3"
+
+    def get_registered_limits(self):
+        """Lists all registered limits."""
+        resp, body = self.get('registered_limits')
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def create_limit(self, region_id, service_id, project_id, resource_name,
+                     default_limit, description=None, domain_id=None):
+        """Creates a limit in keystone."""
+        limit = {
+            'service_id': service_id,
+            'project_id': project_id,
+            'resource_name': resource_name,
+            'resource_limit': default_limit,
+            'region_id': region_id,
+            'description': description or '%s limit for %s' % (
+                resource_name, project_id),
+        }
+        if domain_id:
+            limit['domain_id'] = domain_id
+        post_body = json.dumps({'limits': [limit]})
+        resp, body = self.post('limits', post_body)
+        self.expected_success(201, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def update_limit(self, limit_id, resource_limit, description=None):
+        """Updates a limit in keystone by id."""
+
+        limit = {'resource_limit': resource_limit}
+        if description:
+            limit['description'] = description
+        patch_body = json.dumps({'limit': limit})
+        resp, body = self.patch('limits/%s' % limit_id, patch_body)
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index 7e57499..fc85140 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,6 +15,7 @@
 from tempest.lib.services.network.agents_client import AgentsClient
 from tempest.lib.services.network.extensions_client import ExtensionsClient
 from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.log_resource_client import LogResourceClient
 from tempest.lib.services.network.metering_label_rules_client import \
     MeteringLabelRulesClient
 from tempest.lib.services.network.metering_labels_client import \
@@ -45,4 +46,4 @@
            'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
            'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
-           'SubnetsClient', 'TagsClient', 'TrunksClient']
+           'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient']
diff --git a/tempest/lib/services/network/log_resource_client.py b/tempest/lib/services/network/log_resource_client.py
new file mode 100644
index 0000000..727b138
--- /dev/null
+++ b/tempest/lib/services/network/log_resource_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LogResourceClient(base.BaseNetworkClient):
+
+    def create_log(self, **kwargs):
+        """Creates a log resource.
+
+        Creates a log resource by using the configuration that you define in
+        the request object.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-log
+        """
+        uri = '/log/logs/'
+        post_data = {'log': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_log(self, log_id, **kwargs):
+        """Updates a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-log
+        """
+        uri = '/log/logs/%s' % log_id
+        post_data = {'log': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_log(self, log_id, **fields):
+        """Shows details for a log id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.show_resource(uri, **fields)
+
+    def delete_log(self, log_id):
+        """Deletes a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.delete_resource(uri)
+
+    def list_logs(self, **filters):
+        """Lists Logs.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-logs
+        """
+        uri = '/log/logs'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/scenario/test_unified_limits.py b/tempest/scenario/test_unified_limits.py
new file mode 100644
index 0000000..22256b4
--- /dev/null
+++ b/tempest/scenario/test_unified_limits.py
@@ -0,0 +1,435 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from oslo_utils import units
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class ImageQuotaTest(manager.ScenarioTest):
+    credentials = ['primary', 'system_admin']
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImageQuotaTest, cls).resource_setup()
+
+        # Figure out and record the glance service id
+        services = cls.os_system_admin.identity_services_v3_client.\
+            list_services()
+        glance_services = [x for x in services['services']
+                           if x['name'] == 'glance']
+        cls.glance_service_id = glance_services[0]['id']
+
+        # Pre-create all the quota limits and record their IDs so we can
+        # update them in-place without needing to know which ones have been
+        # created and in which order.
+        cls.limit_ids = {}
+
+        try:
+            cls.limit_ids['image_size_total'] = cls._create_limit(
+                'image_size_total', 10)
+            cls.limit_ids['image_stage_total'] = cls._create_limit(
+                'image_stage_total', 10)
+            cls.limit_ids['image_count_total'] = cls._create_limit(
+                'image_count_total', 10)
+            cls.limit_ids['image_count_uploading'] = cls._create_limit(
+                'image_count_uploading', 10)
+        except lib_exc.Forbidden:
+            # If we fail to set limits, it means they are not
+            # registered, and thus we will skip these tests once we
+            # have our os_system_admin client and run
+            # check_quotas_enabled().
+            pass
+
+    def setUp(self):
+        super(ImageQuotaTest, self).setUp()
+        self.created_images = []
+
+    def create_image(self, data=None, **kwargs):
+        """Wrapper that returns a test image."""
+
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(self.__name__ + "-image")
+            kwargs['name'] = name
+
+        params = dict(kwargs)
+        if data:
+            # NOTE: On glance v1 API, the data should be passed on
+            # a header. Then here handles the data separately.
+            params['data'] = data
+
+        image = self.image_client.create_image(**params)
+        # Image objects returned by the v1 client have the image
+        # data inside a dict that is keyed against 'image'.
+        if 'image' in image:
+            image = image['image']
+        self.created_images.append(image['id'])
+        self.addCleanup(
+            self.image_client.wait_for_resource_deletion,
+            image['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.image_client.delete_image, image['id'])
+        return image
+
+    def check_quotas_enabled(self):
+        # Check to see if we should even be running these tests. Use
+        # the presence of a registered limit that we recognize as an
+        # indication.  This will be set up by the operator (or
+        # devstack) if glance is configured to use/honor the unified
+        # limits. If one is set, they must all be set, because glance
+        # has a single all-or-nothing flag for whether or not to use
+        # keystone limits. If anything, checking only one helps to
+        # assert the assumption that, if enabled, they must all be at
+        # least registered for proper operation.
+        registered_limits = self.os_system_admin.identity_limits_client.\
+            get_registered_limits()['registered_limits']
+        if 'image_count_total' not in [x['resource_name']
+                                       for x in registered_limits]:
+            raise self.skipException('Target system is not configured with '
+                                     'glance unified limits')
+
+    @classmethod
+    def _create_limit(cls, name, value):
+        return cls.os_system_admin.identity_limits_client.create_limit(
+            CONF.identity.region, cls.glance_service_id,
+            cls.image_client.tenant_id, name, value)['limits'][0]['id']
+
+    def _update_limit(self, name, value):
+        self.os_system_admin.identity_limits_client.update_limit(
+            self.limit_ids[name], value)
+
+    def _cleanup_images(self):
+        while self.created_images:
+            image_id = self.created_images.pop()
+            try:
+                self.image_client.delete_image(image_id)
+            except lib_exc.NotFound:
+                pass
+
+    @decorators.idempotent_id('9b74fe24-183b-41e6-bf42-84c2958a7be8')
+    @utils.services('image', 'identity')
+    def test_image_count_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images for our tenant to one.
+        self._update_limit('image_count_total', 1)
+
+        # Create one image
+        image = self.create_image(name='first',
+                                  container_format='bare',
+                                  disk_format='raw',
+                                  visibility='private')
+
+        # Second image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Update our limit to two.
+        self._update_limit('image_count_total', 2)
+
+        # Now the same create should succeed.
+        self.create_image(name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Third image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete the first image to put us under quota.
+        self.image_client.delete_image(image['id'])
+
+        # Now the same create should succeed.
+        self.create_image(name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('b103788b-5329-4aa9-8b0d-97f8733460db')
+    @utils.services('image', 'identity')
+    def test_image_count_uploading_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images we can have in uploading state.
+        self._update_limit('image_stage_total', 10)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 1)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage an image
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Check that we can not stage another
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image2['id'], io.BytesIO(file_content))
+
+        # ... nor upload directly
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'],
+                          io.BytesIO(file_content))
+
+        # Update our quota to make room
+        self._update_limit('image_count_uploading', 2)
+
+        # Now our upload should work
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # ...and because that is no longer in uploading state, we should be
+        # able to stage our second image from above.
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Finish our import of image2
+        self.image_client.image_import(image2['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image2['id'])
+
+        # Set our quota back to one
+        self._update_limit('image_count_uploading', 1)
+
+        # Since image1 is still staged, we should not be able to upload
+        # an image.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'],
+                          io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our uploading quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Make sure that freed up the one upload quota to complete our upload
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('05e8d064-c39a-4801-8c6a-465df375ec5b')
+    @utils.services('image', 'identity')
+    def test_image_size_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the image size for our tenant to 1MiB, and allow ten
+        # images.
+        self._update_limit('image_size_total', 1)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and upload a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and upload a second 1MiB image. This succeeds, but
+        # after completion, we are over quota. Despite us being at
+        # quota above, the initial quota check for the second
+        # operation has no idea what the image size will be, and thus
+        # uses delta=0. This will succeed because we're not
+        # technically over-quota and have not asked for any more (this
+        # is oslo.limit behavior). After the second operation,
+        # however, we will be over-quota regardless of the delta and
+        # subsequent attempts will fail. Because glance goes not
+        # require an image size to be declared before upload, this is
+        # really the best it can do without an API change.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a third 1MiB image. This should fail to
+        # upload (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Increase our size quota to 2MiB.
+        self._update_limit('image_size_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a fourth 1MiB image. This should
+        # fail to upload (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Delete our first image to make space in our existing 2MiB quota.
+        self.image_client.delete_image(image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('fc76b8d9-aae5-46fb-9285-099e37f311f7')
+    @utils.services('image', 'identity')
+    def test_image_stage_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Create a staging quota of 1MiB, allow 10MiB of active
+        # images, and a total of ten images.
+        self._update_limit('image_stage_total', 1)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and stage a second 1MiB image. This succeeds, but
+        # after completion, we are over quota.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a third 1MiB image. This should fail to
+        # stage (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Make sure that even though we are over our stage quota, we
+        # can still create and upload an image the regular way.
+        image_upload = self.create_image(name='uploaded',
+                                         container_format='bare',
+                                         disk_format='raw',
+                                         visibility='private')
+        self.image_client.store_image_file(image_upload['id'],
+                                           io.BytesIO(file_content))
+
+        # Increase our stage quota to two MiB.
+        self._update_limit('image_stage_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.stage_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a fourth 1MiB image. This should
+        # fail to stage (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our stage quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.stage_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
diff --git a/tempest/tests/lib/services/identity/v3/test_limit_client.py b/tempest/tests/lib/services/identity/v3/test_limit_client.py
new file mode 100644
index 0000000..07ec6cd
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_limit_client.py
@@ -0,0 +1,82 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import limits_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLimitsClient(base.BaseServiceTest):
+    def setUp(self):
+        super(TestLimitsClient, self).setUp()
+        self.client = limits_client.LimitsClient(
+            fake_auth_provider.FakeAuthProvider(),
+            'identity', 'regionOne')
+
+    def test_get_registered_limits(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.get_registered_limits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            fake_result,
+            False,
+            status=200)
+
+    def test_create_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            description='Spacely Widgets',
+            status=201)
+
+    def test_create_limit_with_domain(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            domain_id='foo',
+            description='Spacely Widgets',
+            status=201)
+
+    def test_update_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            status=200)
+
+    def test_update_limit_with_description(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            description='new description',
+            status=200)
diff --git a/tempest/tests/lib/services/network/test_log_resource_client.py b/tempest/tests/lib/services/network/test_log_resource_client.py
new file mode 100644
index 0000000..ef502bc
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_log_resource_client.py
@@ -0,0 +1,145 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import log_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLogResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "logs": [
+            {
+                "name": "security group log1",
+                "description": "Log for test demo.",
+                "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+                "project_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:03:04Z",
+                "updated_at": "2018-04-03T21:03:04Z",
+                "enabled": True,
+                "revision_number": 1,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            },
+            {
+                "name": "security group log2",
+                "description": "Log for test demo.",
+                "id": "46ebaec1-0570-43ac-82f6-60d2b03168c4",
+                "project_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:04:04Z",
+                "updated_at": "2018-04-03T21:04:04Z",
+                "enabled": True,
+                "revision_number": 2,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            }
+        ]
+    }
+
+    FAKE_LOG_ID = "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
+
+    def setUp(self):
+        super(TestLogResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.log_resource_client = log_resource_client.LogResourceClient(
+            fake_auth, "network", "regionOne")
+
+    def _test_list_logs(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.list_logs,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def _test_show_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.show_log,
+            "tempest.lib.common.rest_client.RestClient.get",
+            {"log": self.FAKE_LOGS["logs"][0]},
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID)
+
+    def _test_create_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.create_log,
+            "tempest.lib.common.rest_client.RestClient.post",
+            {"logs": self.FAKE_LOGS["logs"][1]},
+            bytes_body,
+            201,
+            log_id="2f245a7b-796b-4f26-9cf9-9e82d248fda7")
+
+    def _test_update_log(self, bytes_body=False):
+        update_kwargs = {
+            "tenant_id": "83a5a4f4245a4abbafacb7ca73b027b0"
+        }
+
+        resp_body = {
+            "logs": copy.deepcopy(
+                self.FAKE_LOGS["logs"][0]
+            )
+        }
+        resp_body["logs"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.log_resource_client.update_log,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID,
+            **update_kwargs)
+
+    def test_list_logs_with_str_body(self):
+        self._test_list_logs()
+
+    def test_list_logs_with_bytes_body(self):
+        self._test_list_logs(bytes_body=True)
+
+    def test_create_log_with_str_body(self):
+        self._test_create_log()
+
+    def test_create_log_with_bytes_body(self):
+        self._test_create_log(bytes_body=True)
+
+    def test_show_log_with_str_body(self):
+        self._test_show_log()
+
+    def test_show_log_with_bytes_body(self):
+        self._test_show_log(bytes_body=True)
+
+    def test_update_log_with_str_body(self):
+        self._test_update_log()
+
+    def test_update_log_with_bytes_body(self):
+        self._test_update_log(bytes_body=True)
+
+    def test_delete_log(self):
+        self.check_service_client_function(
+            self.log_resource_client.delete_log,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            status=204,
+            log_id=self.FAKE_LOG_ID)
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 622bbad..2da5579 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -254,7 +254,7 @@
     timeout: 10800
     # This job runs on stable/stein onwards.
     branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
-    vars:
+    vars: &tempest_slow_vars
       tox_envlist: slow-serial
       devstack_localrc:
         CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
@@ -275,10 +275,12 @@
 
 - job:
     name: tempest-slow-py3
-    parent: tempest-slow
+    parent: tempest-multinode-full-py3
     # This job version is with swift enabled on py3
     # as swift is ready on py3 from stable/ussuri onwards.
+    timeout: 10800
     branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    vars: *tempest_slow_vars
 
 - job:
     name: tempest-cinder-v2-api
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 698df53..7a3afc2 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -64,23 +64,11 @@
             irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
+        - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr:
+        - neutron-ovs-tempest-dvr:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
         - interop-tempest-consistency:
@@ -97,7 +85,7 @@
       jobs:
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
+        - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
@@ -114,7 +102,7 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-all:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr-ha-multinode-full:
+        - neutron-ovs-tempest-dvr-ha-multinode-full:
             irrelevant-files: *tempest-irrelevant-files
         - nova-tempest-v2-api:
             irrelevant-files: *tempest-irrelevant-files