Partially merge tag '0.2.0' into HEAD

Do not include new tests.

octavia-tempest-plugin 0.2.0 release

Change-Id: I2762ac90e1e998f6439df5159c417dfb1ab832e1
Related-PROD: PROD-26588 (PROD:26588)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
deleted file mode 100644
index 4887995..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ /dev/null
@@ -1,803 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-from uuid import UUID
-
-from dateutil import parser
-from oslo_log import log as logging
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class HealthMonitorAPITest(test_base.LoadBalancerBaseTest):
-    """Test the healthmonitor object API."""
-
-    @classmethod
-    def skip_checks(cls):
-        super(HealthMonitorAPITest, cls).skip_checks()
-        if not CONF.loadbalancer_feature_enabled.health_monitor_enabled:
-            cls.skip('Health Monitors not supported')
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(HealthMonitorAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_hm")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-    @decorators.idempotent_id('30288670-5772-40c2-92e6-6d4a6d62d029')
-    def test_healthmonitor_create(self):
-        """Tests healthmonitor create and basic show APIs.
-
-        * Create a clean pool to use for the healthmonitor.
-        * Tests that users without the loadbalancer member role cannot
-          create healthmonitors.
-        * Create a fully populated healthmonitor.
-        * Show healthmonitor details.
-        * Validate the show reflects the requested values.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1_hm-create")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        hm_name = data_utils.rand_name("lb_member_hm1-create")
-        hm_kwargs = {
-            const.POOL_ID: pool[const.ID],
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: True,
-        }
-
-        # Test that a user without the loadbalancer role cannot
-        # create a healthmonitor
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.healthmonitor_client.create_healthmonitor,
-                **hm_kwargs)
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        parser.parse(hm[const.CREATED_AT])
-        parser.parse(hm[const.UPDATED_AT])
-        UUID(hm[const.ID])
-
-        # Healthmonitors are always ONLINE
-        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-    # Helper functions for test healthmonitor list
-    def _filter_hms_by_pool_id(self, hms, pool_ids):
-        return [hm for hm in hms
-                if hm[const.POOLS][0][const.ID] in pool_ids]
-
-    def _filter_hms_by_index(self, hms, indexes):
-        return [hm for i, hm in enumerate(hms) if i not in indexes]
-
-    @decorators.idempotent_id('c9a9f20c-3680-4ae8-b657-33c687258fea')
-    def test_healthmonitor_list(self):
-        """Tests healthmonitor list API and field filtering.
-
-        * Create three clean pools to use for the healthmonitors.
-        * Create three healthmonitors.
-        * Validates that other accounts cannot list the healthmonitors.
-        * List the healthmonitors using the default sort order.
-        * List the healthmonitors using descending sort order.
-        * List the healthmonitors using ascending sort order.
-        * List the healthmonitors returning one field at a time.
-        * List the healthmonitors returning two fields.
-        * List the healthmonitors filtering to one of the three.
-        * List the healthmonitors filtered, one field, and sorted.
-        """
-        # Get a list of pre-existing HMs to filter from test data
-        pretest_hms = self.mem_healthmonitor_client.list_healthmonitors()
-        # Store their IDs for easy access
-        pretest_hm_ids = [hm['id'] for hm in pretest_hms]
-
-        pool1_name = data_utils.rand_name("lb_member_pool1_hm-list")
-        pool1_kwargs = {
-            const.NAME: pool1_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool1 = self.mem_pool_client.create_pool(**pool1_kwargs)
-        pool1_id = pool1[const.ID]
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool1_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool2_name = data_utils.rand_name("lb_member_pool2_hm-list")
-        pool2_kwargs = {
-            const.NAME: pool2_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool2 = self.mem_pool_client.create_pool(**pool2_kwargs)
-        pool2_id = pool2[const.ID]
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool2_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool3_name = data_utils.rand_name("lb_member_pool3_hm-list")
-        pool3_kwargs = {
-            const.NAME: pool3_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool3 = self.mem_pool_client.create_pool(**pool3_kwargs)
-        pool3_id = pool3[const.ID]
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool3_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        hm1_name = data_utils.rand_name("lb_member_hm2-list")
-        hm1_kwargs = {
-            const.POOL_ID: pool1_id,
-            const.NAME: hm1_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/B',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: True,
-        }
-        hm1 = self.mem_healthmonitor_client.create_healthmonitor(
-            **hm1_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm1[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        hm1 = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor, hm1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        hm2_name = data_utils.rand_name("lb_member_hm1-list")
-        hm2_kwargs = {
-            const.POOL_ID: pool2_id,
-            const.NAME: hm2_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/A',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: True,
-        }
-        hm2 = self.mem_healthmonitor_client.create_healthmonitor(
-            **hm2_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        hm2 = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor, hm2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        hm3_name = data_utils.rand_name("lb_member_hm3-list")
-        hm3_kwargs = {
-            const.POOL_ID: pool3_id,
-            const.NAME: hm3_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/C',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: False,
-        }
-        hm3 = self.mem_healthmonitor_client.create_healthmonitor(
-            **hm3_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm3[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        hm3 = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor, hm3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        # Test that a different user cannot list healthmonitors
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.healthmonitor_client
-            primary = member2_client.list_healthmonitors(
-                query_params='pool_id={pool_id}'.format(pool_id=pool1_id))
-            self.assertEqual(0, len(primary))
-
-        # Test that users without the lb member role cannot list healthmonitors
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.healthmonitor_client.list_healthmonitors)
-
-        # Check the default sort order, created_at
-        hms = self.mem_healthmonitor_client.list_healthmonitors()
-        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
-        self.assertEqual(hm1[const.URL_PATH],
-                         hms[0][const.URL_PATH])
-        self.assertEqual(hm2[const.URL_PATH],
-                         hms[1][const.URL_PATH])
-        self.assertEqual(hm3[const.URL_PATH],
-                         hms[2][const.URL_PATH])
-
-        # Test sort descending by description
-        hms = self.mem_healthmonitor_client.list_healthmonitors(
-            query_params='{sort}={url_path}:{desc}'
-                         .format(sort=const.SORT,
-                                 url_path=const.URL_PATH, desc=const.DESC))
-        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
-        self.assertEqual(hm1[const.URL_PATH],
-                         hms[1][const.URL_PATH])
-        self.assertEqual(hm2[const.URL_PATH],
-                         hms[2][const.URL_PATH])
-        self.assertEqual(hm3[const.URL_PATH],
-                         hms[0][const.URL_PATH])
-
-        # Test sort ascending by description
-        hms = self.mem_healthmonitor_client.list_healthmonitors(
-            query_params='{sort}={url_path}:{asc}'
-                         .format(sort=const.SORT,
-                                 url_path=const.URL_PATH, asc=const.ASC))
-        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
-        self.assertEqual(hm1[const.URL_PATH],
-                         hms[1][const.URL_PATH])
-        self.assertEqual(hm2[const.URL_PATH],
-                         hms[0][const.URL_PATH])
-        self.assertEqual(hm3[const.URL_PATH],
-                         hms[2][const.URL_PATH])
-
-        # Determine indexes of pretest HMs in default sort
-        pretest_hm_indexes = []
-        hms = self.mem_healthmonitor_client.list_healthmonitors()
-        for i, hm in enumerate(hms):
-            if hm['id'] in pretest_hm_ids:
-                pretest_hm_indexes.append(i)
-
-        # Test fields
-        for field in const.SHOW_HEALTHMONITOR_RESPONSE_FIELDS:
-            hms = self.mem_healthmonitor_client.list_healthmonitors(
-                query_params='{fields}={field}'
-                             .format(fields=const.FIELDS, field=field))
-            hms = self._filter_hms_by_index(hms, pretest_hm_indexes)
-            self.assertEqual(1, len(hms[0]))
-            self.assertEqual(hm1[field], hms[0][field])
-            self.assertEqual(hm2[field], hms[1][field])
-            self.assertEqual(hm3[field], hms[2][field])
-
-        # Test multiple fields at the same time
-        hms = self.mem_healthmonitor_client.list_healthmonitors(
-            query_params='{fields}={admin}&'
-                         '{fields}={created}&'
-                         '{fields}={pools}'.format(
-                             fields=const.FIELDS,
-                             admin=const.ADMIN_STATE_UP,
-                             created=const.CREATED_AT,
-                             pools=const.POOLS))
-        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
-        self.assertEqual(3, len(hms[0]))
-        self.assertTrue(hms[0][const.ADMIN_STATE_UP])
-        parser.parse(hms[0][const.CREATED_AT])
-        self.assertTrue(hms[1][const.ADMIN_STATE_UP])
-        parser.parse(hms[1][const.CREATED_AT])
-        self.assertFalse(hms[2][const.ADMIN_STATE_UP])
-        parser.parse(hms[2][const.CREATED_AT])
-
-        # Test filtering
-        hms = self.mem_healthmonitor_client.list_healthmonitors(
-            query_params='{name}={hm_name}'.format(
-                name=const.NAME,
-                hm_name=hm2[const.NAME]))
-        self.assertEqual(1, len(hms))
-        self.assertEqual(hm2[const.NAME],
-                         hms[0][const.NAME])
-
-        # Test combined params
-        hms = self.mem_healthmonitor_client.list_healthmonitors(
-            query_params='{admin}={true}&'
-                         '{fields}={name}&{fields}={pools}&'
-                         '{sort}={name}:{desc}'.format(
-                             admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, name=const.NAME,
-                             pools=const.POOLS, sort=const.SORT,
-                             desc=const.DESC))
-        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
-        # Should get two healthmonitors
-        self.assertEqual(2, len(hms))
-        # healthmonitors should have two fields
-        self.assertEqual(2, len(hms[0]))
-        # Should be in descending order
-        self.assertEqual(hm2[const.NAME],
-                         hms[1][const.NAME])
-        self.assertEqual(hm1[const.NAME],
-                         hms[0][const.NAME])
-
-    @decorators.idempotent_id('284e8d3b-7b2d-4697-9e41-580b3423c0b4')
-    def test_healthmonitor_show(self):
-        """Tests healthmonitor show API.
-
-        * Create a clean pool to use for the healthmonitor.
-        * Create a fully populated healthmonitor.
-        * Show healthmonitor details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the healthmonitor.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1_hm-show")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        hm_name = data_utils.rand_name("lb_member_hm1-show")
-        hm_kwargs = {
-            const.POOL_ID: pool[const.ID],
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: True,
-        }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        parser.parse(hm[const.CREATED_AT])
-        parser.parse(hm[const.UPDATED_AT])
-        UUID(hm[const.ID])
-
-        # Healthmonitors are always ONLINE
-        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-        # Test that a user with lb_admin role can see the healthmonitor
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            healthmonitor_client = self.os_roles_lb_admin.healthmonitor_client
-            hm_adm = healthmonitor_client.show_healthmonitor(hm[const.ID])
-            self.assertEqual(hm_name, hm_adm[const.NAME])
-
-        # Test that a user with cloud admin role can see the healthmonitor
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.healthmonitor_client.show_healthmonitor(
-                hm[const.ID])
-            self.assertEqual(hm_name, adm[const.NAME])
-
-        # Test that a different user, with loadbalancer member role, cannot
-        # see this healthmonitor
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.healthmonitor_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_healthmonitor,
-                              hm[const.ID])
-
-        # Test that a user, without the loadbalancer member role, cannot
-        # show healthmonitors
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.healthmonitor_client.show_healthmonitor,
-                hm[const.ID])
-
-    @decorators.idempotent_id('fa584b2c-f179-4c4e-ad2e-ff51fd1c5973')
-    def test_healthmonitor_update(self):
-        """Tests healthmonitor update and show APIs.
-
-        * Create a clean pool to use for the healthmonitor.
-        * Create a fully populated healthmonitor.
-        * Show healthmonitor details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the healthmonitor.
-        * Update the healthmonitor details.
-        * Show healthmonitor details.
-        * Validate the show reflects the updated values.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1_hm-update")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        hm_name = data_utils.rand_name("lb_member_hm1-update")
-        hm_kwargs = {
-            const.POOL_ID: pool[const.ID],
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-            const.MAX_RETRIES_DOWN: 5,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200-204',
-            const.ADMIN_STATE_UP: False,
-        }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        parser.parse(hm[const.CREATED_AT])
-        parser.parse(hm[const.UPDATED_AT])
-        UUID(hm[const.ID])
-
-        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
-        if hm_kwargs[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-        # Test that a user, without the loadbalancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.healthmonitor_client.update_healthmonitor,
-                hm[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
-            hm[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         hm_check[const.PROVISIONING_STATUS])
-        self.assertFalse(hm_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the loadbalancer member role, cannot
-        # update this healthmonitor
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.healthmonitor_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_healthmonitor,
-                              hm[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
-            hm[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         hm_check[const.PROVISIONING_STATUS])
-        self.assertFalse(hm_check[const.ADMIN_STATE_UP])
-
-        new_name = data_utils.rand_name("lb_member_hm1-UPDATED")
-        hm_update_kwargs = {
-            const.NAME: new_name,
-            const.DELAY: hm_kwargs[const.DELAY] + 1,
-            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
-            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
-            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
-            const.HTTP_METHOD: const.POST,
-            const.URL_PATH: '/test',
-            const.EXPECTED_CODES: '201,202',
-            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
-        }
-        hm = self.mem_healthmonitor_client.update_healthmonitor(
-            hm[const.ID], **hm_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
-        if hm_update_kwargs[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])
-
-        # Test changed items
-        equal_items = [const.NAME, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_update_kwargs[item], hm[item])
-
-        # Test unchanged items
-        equal_items = [const.TYPE]
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-    @decorators.idempotent_id('a7bab4ac-340c-4776-ab9d-9fcb66869432')
-    def test_healthmonitor_delete(self):
-        """Tests healthmonitor create and delete APIs.
-
-        * Create a clean pool to use for the healthmonitor.
-        * Creates a healthmonitor.
-        * Validates that other accounts cannot delete the healthmonitor
-        * Deletes the healthmonitor.
-        * Validates the healthmonitor is in the DELETED state.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1_hm-delete")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        hm_name = data_utils.rand_name("lb_member_hm1-delete")
-        hm_kwargs = {
-            const.POOL_ID: pool[const.ID],
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_TCP,
-            const.DELAY: 2,
-            const.TIMEOUT: 3,
-            const.MAX_RETRIES: 4,
-        }
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the loadbalancer role cannot
-        # delete this healthmonitor
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.healthmonitor_client.delete_healthmonitor,
-                hm[const.ID])
-
-        # Test that a different user, with the loadbalancer member role
-        # cannot delete this healthmonitor
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.healthmonitor_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_healthmonitor,
-                              hm[const.ID])
-
-        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
deleted file mode 100644
index 255bbde..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ /dev/null
@@ -1,796 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class L7PolicyAPITest(test_base.LoadBalancerBaseTest):
-    """Test the l7policy object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(L7PolicyAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_l7policy")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_l7policy")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_l7policy")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('eba4ddc2-758b-4dd5-bd28-c1b41d6575ca')
-    def test_l7policy_create_redirect_pool(self):
-        self._test_l7policy_create(pool_id=self.pool_id)
-
-    @decorators.idempotent_id('2b529135-71bc-46f7-912f-74d238d67190')
-    def test_l7policy_create_redirect_url(self):
-        self._test_l7policy_create(url='http://localhost')
-
-    @decorators.idempotent_id('aa9b0d50-0d16-4365-85eb-846b17eb8398')
-    def test_l7policy_create_reject(self):
-        self._test_l7policy_create()
-
-    def _test_l7policy_create(self, url=None, pool_id=None):
-        """Tests l7policy create and basic show APIs.
-
-        * Tests that users without the loadbalancer member role cannot
-          create l7policies.
-        * Create a fully populated l7policy.
-        * Show l7policy details.
-        * Validate the show reflects the requested values.
-        """
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1-create")
-        l7policy_description = data_utils.arbitrary_string(size=255)
-        l7policy_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy_name,
-            const.DESCRIPTION: l7policy_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-        }
-        if url:
-            l7policy_kwargs[const.ACTION] = const.REDIRECT_TO_URL
-            l7policy_kwargs[const.REDIRECT_URL] = url
-        elif pool_id:
-            l7policy_kwargs[const.ACTION] = const.REDIRECT_TO_POOL
-            l7policy_kwargs[const.REDIRECT_POOL_ID] = pool_id
-        else:
-            l7policy_kwargs[const.ACTION] = const.REJECT
-
-        # Test that a user without the load balancer role cannot
-        # create a l7policy
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7policy_client.create_l7policy,
-                **l7policy_kwargs)
-
-        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            l7policy = waiters.wait_for_status(
-                self.mem_l7policy_client.show_l7policy,
-                l7policy[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(l7policy_name, l7policy[const.NAME])
-        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
-        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
-        parser.parse(l7policy[const.CREATED_AT])
-        parser.parse(l7policy[const.UPDATED_AT])
-        UUID(l7policy[const.ID])
-        # Operating status for a l7policy will be ONLINE if it is enabled:
-        if l7policy[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
-        self.assertEqual(1, l7policy[const.POSITION])
-        if url:
-            self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
-            self.assertEqual(url, l7policy[const.REDIRECT_URL])
-            self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
-        elif pool_id:
-            self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
-            self.assertEqual(pool_id, l7policy[const.REDIRECT_POOL_ID])
-            self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
-        else:
-            self.assertEqual(const.REJECT, l7policy[const.ACTION])
-            self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
-            self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
-
-    @decorators.idempotent_id('42fa14ba-caf1-465e-ab36-27e7501f95ef')
-    def test_l7policy_list(self):
-        """Tests l7policy list API and field filtering.
-
-        * Create a clean listener.
-        * Create three l7policies.
-        * Validates that other accounts cannot list the l7policies.
-        * List the l7policies using the default sort order.
-        * List the l7policies using descending sort order.
-        * List the l7policies using ascending sort order.
-        * List the l7policies returning one field at a time.
-        * List the l7policies returning two fields.
-        * List the l7policies filtering to one of the three.
-        * List the l7policies filtered, one field, and sorted.
-        """
-        listener_name = data_utils.rand_name(
-            "lb_member_listener2_l7policy-list")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '81',
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        listener_id = listener[const.ID]
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        l7policy1_name = data_utils.rand_name("lb_member_l7policy2-list")
-        l7policy1_desc = 'B'
-        l7policy1_kwargs = {
-            const.LISTENER_ID: listener_id,
-            const.NAME: l7policy1_name,
-            const.DESCRIPTION: l7policy1_desc,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REJECT
-        }
-        l7policy1 = self.mem_l7policy_client.create_l7policy(
-            **l7policy1_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy1[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7policy1 = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy, l7policy1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        l7policy2_name = data_utils.rand_name("lb_member_l7policy1-list")
-        l7policy2_desc = 'A'
-        l7policy2_kwargs = {
-            const.LISTENER_ID: listener_id,
-            const.NAME: l7policy2_name,
-            const.DESCRIPTION: l7policy2_desc,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_POOL,
-            const.REDIRECT_POOL_ID: self.pool_id
-        }
-        l7policy2 = self.mem_l7policy_client.create_l7policy(
-            **l7policy2_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7policy2 = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy, l7policy2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        l7policy3_name = data_utils.rand_name("lb_member_l7policy3-list")
-        l7policy3_desc = 'C'
-        l7_redirect_url = 'http://localhost'
-        l7policy3_kwargs = {
-            const.LISTENER_ID: listener_id,
-            const.NAME: l7policy3_name,
-            const.DESCRIPTION: l7policy3_desc,
-            const.ADMIN_STATE_UP: False,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_URL,
-            const.REDIRECT_URL: l7_redirect_url
-        }
-        l7policy3 = self.mem_l7policy_client.create_l7policy(
-            **l7policy3_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy3[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7policy3 = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy, l7policy3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        # Test that a different user cannot list l7policies
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7policy_client
-            primary = member2_client.list_l7policies(
-                query_params='listener_id={listener_id}'.format(
-                    listener_id=listener_id))
-            self.assertEqual(0, len(primary))
-
-        # Test that a user without the lb member role cannot list load
-        # balancers
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7policy_client.list_l7policies)
-
-        # Check the default sort order, created_at
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}'.format(
-                listener_id=listener_id))
-        self.assertEqual(l7policy1[const.DESCRIPTION],
-                         l7policies[0][const.DESCRIPTION])
-        self.assertEqual(l7policy2[const.DESCRIPTION],
-                         l7policies[1][const.DESCRIPTION])
-        self.assertEqual(l7policy3[const.DESCRIPTION],
-                         l7policies[2][const.DESCRIPTION])
-
-        # Test sort descending by description
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&{sort}={descr}:{desc}'
-                         .format(listener_id=listener_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, desc=const.DESC))
-        self.assertEqual(l7policy1[const.DESCRIPTION],
-                         l7policies[1][const.DESCRIPTION])
-        self.assertEqual(l7policy2[const.DESCRIPTION],
-                         l7policies[2][const.DESCRIPTION])
-        self.assertEqual(l7policy3[const.DESCRIPTION],
-                         l7policies[0][const.DESCRIPTION])
-
-        # Test sort ascending by description
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&{sort}={descr}:{asc}'
-                         .format(listener_id=listener_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, asc=const.ASC))
-        self.assertEqual(l7policy1[const.DESCRIPTION],
-                         l7policies[1][const.DESCRIPTION])
-        self.assertEqual(l7policy2[const.DESCRIPTION],
-                         l7policies[0][const.DESCRIPTION])
-        self.assertEqual(l7policy3[const.DESCRIPTION],
-                         l7policies[2][const.DESCRIPTION])
-
-        # Use this opportunity to verify the position insertion is working
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&{sort}={position}:{asc}'
-                         .format(listener_id=listener_id, sort=const.SORT,
-                                 position=const.POSITION, asc=const.ASC))
-        self.assertEqual(1, l7policies[0][const.POSITION])
-        self.assertEqual(2, l7policies[1][const.POSITION])
-        self.assertEqual(3, l7policies[2][const.POSITION])
-        self.assertEqual(l7policy1[const.NAME],
-                         l7policies[2][const.NAME])
-        self.assertEqual(l7policy2[const.NAME],
-                         l7policies[1][const.NAME])
-        self.assertEqual(l7policy3[const.NAME],
-                         l7policies[0][const.NAME])
-
-        # Test fields
-        for field in const.SHOW_L7POLICY_RESPONSE_FIELDS:
-            # Test position / updated fields separately, because they're odd
-            if field not in (const.POSITION, const.UPDATED_AT):
-                l7policies = self.mem_l7policy_client.list_l7policies(
-                    query_params='listener_id={listener_id}&{fields}={field}'
-                                 .format(listener_id=listener_id,
-                                         fields=const.FIELDS, field=field))
-                self.assertEqual(1, len(l7policies[0]))
-                self.assertEqual(l7policy1[field], l7policies[0][field])
-                self.assertEqual(l7policy2[field], l7policies[1][field])
-                self.assertEqual(l7policy3[field], l7policies[2][field])
-            elif field == const.POSITION:
-                l7policies = self.mem_l7policy_client.list_l7policies(
-                    query_params='listener_id={listener_id}&{fields}={field}'
-                                 .format(listener_id=listener_id,
-                                         fields=const.FIELDS, field=field))
-                self.assertEqual(1, len(l7policies[0]))
-                # Positions won't match the request due to insertion reordering
-                self.assertEqual(3, l7policies[0][field])
-                self.assertEqual(2, l7policies[1][field])
-                self.assertEqual(1, l7policies[2][field])
-            elif field == const.UPDATED_AT:
-                l7policies = self.mem_l7policy_client.list_l7policies(
-                    query_params='listener_id={listener_id}&{fields}={field}'
-                                 .format(listener_id=listener_id,
-                                         fields=const.FIELDS, field=field))
-                # Just test that we get it -- the actual value is unpredictable
-                self.assertEqual(1, len(l7policies[0]))
-
-        # Test multiple fields at the same time
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&{fields}={admin}&'
-                         '{fields}={created}'.format(
-                             listener_id=listener_id, fields=const.FIELDS,
-                             admin=const.ADMIN_STATE_UP,
-                             created=const.CREATED_AT))
-        self.assertEqual(2, len(l7policies[0]))
-        self.assertTrue(l7policies[0][const.ADMIN_STATE_UP])
-        parser.parse(l7policies[0][const.CREATED_AT])
-        self.assertTrue(l7policies[1][const.ADMIN_STATE_UP])
-        parser.parse(l7policies[1][const.CREATED_AT])
-        self.assertFalse(l7policies[2][const.ADMIN_STATE_UP])
-        parser.parse(l7policies[2][const.CREATED_AT])
-
-        # Test filtering
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&'
-                         '{desc}={l7policy_desc}'.format(
-                             listener_id=listener_id, desc=const.DESCRIPTION,
-                             l7policy_desc=l7policy2[const.DESCRIPTION]))
-        self.assertEqual(1, len(l7policies))
-        self.assertEqual(l7policy2[const.DESCRIPTION],
-                         l7policies[0][const.DESCRIPTION])
-
-        # Test combined params
-        l7policies = self.mem_l7policy_client.list_l7policies(
-            query_params='listener_id={listener_id}&{admin}={true}&'
-                         '{fields}={descr}&{fields}={id}&'
-                         '{sort}={descr}:{desc}'.format(
-                             listener_id=listener_id,
-                             admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, descr=const.DESCRIPTION,
-                             id=const.ID, sort=const.SORT, desc=const.DESC))
-        # Should get two l7policies
-        self.assertEqual(2, len(l7policies))
-        # l7policies should have two fields
-        self.assertEqual(2, len(l7policies[0]))
-        # Should be in descending order
-        self.assertEqual(l7policy2[const.DESCRIPTION],
-                         l7policies[1][const.DESCRIPTION])
-        self.assertEqual(l7policy1[const.DESCRIPTION],
-                         l7policies[0][const.DESCRIPTION])
-
-    @decorators.idempotent_id('baaa8104-a037-4976-b908-82a0b3e08129')
-    def test_l7policy_show(self):
-        """Tests l7policy show API.
-
-        * Create a fully populated l7policy.
-        * Show l7policy details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the l7policy.
-        """
-        listener_name = data_utils.rand_name(
-            "lb_member_listener4_l7policy-show")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '81',
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        listener_id = listener[const.ID]
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1-show")
-        l7policy_description = data_utils.arbitrary_string(size=255)
-        l7policy_kwargs = {
-            const.LISTENER_ID: listener_id,
-            const.NAME: l7policy_name,
-            const.DESCRIPTION: l7policy_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REJECT,
-        }
-
-        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            l7policy = waiters.wait_for_status(
-                self.mem_l7policy_client.show_l7policy,
-                l7policy[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(l7policy_name, l7policy[const.NAME])
-        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
-        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
-        parser.parse(l7policy[const.CREATED_AT])
-        parser.parse(l7policy[const.UPDATED_AT])
-        UUID(l7policy[const.ID])
-        # Operating status for a l7policy will be ONLINE if it is enabled:
-        if l7policy[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
-        self.assertEqual(1, l7policy[const.POSITION])
-        self.assertEqual(const.REJECT, l7policy[const.ACTION])
-        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
-        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
-
-        # Test that a user with lb_admin role can see the l7policy
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            l7policy_client = self.os_roles_lb_admin.l7policy_client
-            l7policy_adm = l7policy_client.show_l7policy(l7policy[const.ID])
-            self.assertEqual(l7policy_name, l7policy_adm[const.NAME])
-
-        # Test that a user with cloud admin role can see the l7policy
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.l7policy_client.show_l7policy(
-                l7policy[const.ID])
-            self.assertEqual(l7policy_name, adm[const.NAME])
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this l7policy
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7policy_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_l7policy,
-                              l7policy[const.ID])
-
-        # Test that a user, without the load balancer member role, cannot
-        # show l7policies
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7policy_client.show_l7policy,
-                l7policy[const.ID])
-
-    @decorators.idempotent_id('08f73b22-550b-4e5a-b3d6-2ec03251ca13')
-    def test_l7policy_update(self):
-        """Tests l7policy update and show APIs.
-
-        * Create a clean listener.
-        * Create a fully populated l7policy.
-        * Show l7policy details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the l7policy.
-        * Update the l7policy details.
-        * Show l7policy details.
-        * Validate the show reflects the updated values.
-        """
-        listener_name = data_utils.rand_name(
-            "lb_member_listener3_l7policy-update")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '81',
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        listener_id = listener[const.ID]
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1-update")
-        l7policy_description = data_utils.arbitrary_string(size=255)
-        l7_redirect_url = 'http://localhost'
-        l7policy_kwargs = {
-            const.LISTENER_ID: listener_id,
-            const.NAME: l7policy_name,
-            const.DESCRIPTION: l7policy_description,
-            const.ADMIN_STATE_UP: False,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_URL,
-            const.REDIRECT_URL: l7_redirect_url,
-        }
-
-        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(l7policy_name, l7policy[const.NAME])
-        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
-        self.assertFalse(l7policy[const.ADMIN_STATE_UP])
-        parser.parse(l7policy[const.CREATED_AT])
-        parser.parse(l7policy[const.UPDATED_AT])
-        UUID(l7policy[const.ID])
-        # Operating status for a l7policy will be ONLINE if it is enabled:
-        if l7policy[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
-        self.assertEqual(1, l7policy[const.POSITION])
-        self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
-        self.assertEqual(l7_redirect_url, l7policy[const.REDIRECT_URL])
-        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7policy_client.update_l7policy,
-                l7policy[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        l7policy_check = self.mem_l7policy_client.show_l7policy(
-            l7policy[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         l7policy_check[const.PROVISIONING_STATUS])
-        self.assertFalse(l7policy_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the load balancer member role, cannot
-        # update this l7policy
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7policy_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_l7policy,
-                              l7policy[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        l7policy_check = self.mem_l7policy_client.show_l7policy(
-            l7policy[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         l7policy_check[const.PROVISIONING_STATUS])
-        self.assertFalse(l7policy_check[const.ADMIN_STATE_UP])
-
-        new_name = data_utils.rand_name("lb_member_l7policy1-UPDATED")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        l7policy_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 2,
-            const.ACTION: const.REDIRECT_TO_POOL,
-            const.REDIRECT_POOL_ID: self.pool_id,
-        }
-        l7policy = self.mem_l7policy_client.update_l7policy(
-            l7policy[const.ID], **l7policy_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            l7policy = waiters.wait_for_status(
-                self.mem_l7policy_client.show_l7policy,
-                l7policy[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, l7policy[const.NAME])
-        self.assertEqual(new_description, l7policy[const.DESCRIPTION])
-        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
-        parser.parse(l7policy[const.CREATED_AT])
-        parser.parse(l7policy[const.UPDATED_AT])
-        UUID(l7policy[const.ID])
-        # Operating status for a l7policy will be ONLINE if it is enabled:
-        if l7policy[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(listener_id, l7policy[const.LISTENER_ID])
-        self.assertEqual(1, l7policy[const.POSITION])
-        self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
-        self.assertEqual(self.pool_id, l7policy[const.REDIRECT_POOL_ID])
-        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
-
-    @decorators.idempotent_id('7925eb4b-94b6-4c28-98c2-fd0b4f0976cc')
-    def test_l7policy_delete(self):
-        """Tests l7policy create and delete APIs.
-
-        * Creates a l7policy.
-        * Validates that other accounts cannot delete the l7policy
-        * Deletes the l7policy.
-        * Validates the l7policy is in the DELETED state.
-        """
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1-delete")
-        l7policy_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy_name,
-            const.ACTION: const.REJECT,
-        }
-        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the load balancer role cannot
-        # delete this l7policy
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7policy_client.delete_l7policy,
-                l7policy[const.ID])
-
-        # Test that a different user, with the load balancer member role
-        # cannot delete this l7policy
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7policy_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_l7policy,
-                              l7policy[const.ID])
-
-        self.mem_l7policy_client.delete_l7policy(l7policy[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
deleted file mode 100644
index 395a3ad..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ /dev/null
@@ -1,682 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class L7RuleAPITest(test_base.LoadBalancerBaseTest):
-    """Test the l7rule object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(L7RuleAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_l7rule")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_l7rule")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_l7rule")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LISTENER_ID: cls.listener_id,
-        }
-
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1_l7rule")
-        l7policy_kwargs = {
-            const.NAME: l7policy_name,
-            const.LISTENER_ID: cls.listener_id,
-            const.ACTION: const.REJECT,
-        }
-        l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        cls.l7policy_id = l7policy[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_l7policy_client.cleanup_l7policy,
-            cls.l7policy_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    # Note: This test also covers basic l7rule show API
-    @decorators.idempotent_id('55ac1337-189d-40a6-b614-47d7a8e991f6')
-    def test_l7rule_create(self):
-        """Tests l7rule create and basic show APIs.
-
-        * Tests that users without the loadbalancer member role cannot
-          create l7rules.
-        * Create a fully populated l7rule.
-        * Show l7rule details.
-        * Validate the show reflects the requested values.
-        """
-        l7rule_kwargs = {
-            const.ADMIN_STATE_UP: True,
-            const.L7POLICY_ID: self.l7policy_id,
-            const.TYPE: const.HEADER,
-            const.VALUE: 'myvalue-create',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey-create',
-            const.INVERT: False,
-        }
-
-        # Test that a user without the load balancer role cannot
-        # create a l7rule
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7rule_client.create_l7rule,
-                **l7rule_kwargs)
-
-        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule[const.ID], l7policy_id=self.l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-        if not CONF.load_balancer.test_with_noop:
-            l7rule = waiters.wait_for_status(
-                self.mem_l7rule_client.show_l7rule,
-                l7rule[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout,
-                l7policy_id=self.l7policy_id)
-
-        parser.parse(l7rule[const.CREATED_AT])
-        parser.parse(l7rule[const.UPDATED_AT])
-        UUID(l7rule[const.ID])
-        # Operating status for a l7rule will be ONLINE if it is enabled:
-        if l7rule[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])
-
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-
-        for item in equal_items:
-            self.assertEqual(l7rule_kwargs[item], l7rule[item])
-
-    @decorators.idempotent_id('69095254-f106-4fb6-9f54-7a78cc14fb51')
-    def test_l7rule_list(self):
-        """Tests l7rule list API and field filtering.
-
-        * Create a clean l7policy.
-        * Create three l7rules.
-        * Validates that other accounts cannot list the l7rules.
-        * List the l7rules using the default sort order.
-        * List the l7rules using descending sort order.
-        * List the l7rules using ascending sort order.
-        * List the l7rules returning one field at a time.
-        * List the l7rules returning two fields.
-        * List the l7rules filtering to one of the three.
-        * List the l7rules filtered, one field, and sorted.
-        """
-        l7policy_name = data_utils.rand_name("lb_member_l7policy2_l7rule-list")
-        l7policy = self.mem_l7policy_client.create_l7policy(
-            name=l7policy_name, listener_id=self.listener_id,
-            action=const.REJECT)
-        l7policy_id = l7policy[const.ID]
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy, l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        l7rule1_kwargs = {
-            const.L7POLICY_ID: l7policy_id,
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.HEADER,
-            const.VALUE: '2',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey2-list',
-        }
-        l7rule1 = self.mem_l7rule_client.create_l7rule(
-            **l7rule1_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule1[const.ID], l7policy_id=l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7rule1 = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule, l7rule1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=l7policy_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        l7rule2_kwargs = {
-            const.L7POLICY_ID: l7policy_id,
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.HEADER,
-            const.VALUE: '1',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey1-list',
-        }
-        l7rule2 = self.mem_l7rule_client.create_l7rule(
-            **l7rule2_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule2[const.ID], l7policy_id=l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7rule2 = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule, l7rule2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=l7policy_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        l7rule3_kwargs = {
-            const.L7POLICY_ID: l7policy_id,
-            const.ADMIN_STATE_UP: False,
-            const.TYPE: const.HEADER,
-            const.VALUE: '3',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey3-list',
-        }
-        l7rule3 = self.mem_l7rule_client.create_l7rule(
-            **l7rule3_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule3[const.ID], l7policy_id=l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        l7rule3 = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule, l7rule3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=l7policy_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-
-        # Test that a different user cannot list l7rules
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7rule_client
-            self.assertRaises(
-                exceptions.Forbidden,
-                member2_client.list_l7rules,
-                l7policy_id)
-
-        # Test that a user without the lb l7rule role cannot list load
-        # balancers
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7rule_client.list_l7rules,
-                l7policy_id)
-
-        # Check the default sort order, created_at
-        l7rules = self.mem_l7rule_client.list_l7rules(l7policy_id)
-        self.assertEqual(l7rule1[const.VALUE],
-                         l7rules[0][const.VALUE])
-        self.assertEqual(l7rule2[const.VALUE],
-                         l7rules[1][const.VALUE])
-        self.assertEqual(l7rule3[const.VALUE],
-                         l7rules[2][const.VALUE])
-
-        # Test sort descending by `value`
-        l7rules = self.mem_l7rule_client.list_l7rules(
-            l7policy_id, query_params='{sort}={value}:{desc}'.format(
-                sort=const.SORT, value=const.VALUE, desc=const.DESC))
-        self.assertEqual(l7rule1[const.VALUE],
-                         l7rules[1][const.VALUE])
-        self.assertEqual(l7rule2[const.VALUE],
-                         l7rules[2][const.VALUE])
-        self.assertEqual(l7rule3[const.VALUE],
-                         l7rules[0][const.VALUE])
-
-        # Test sort ascending by `value`
-        l7rules = self.mem_l7rule_client.list_l7rules(
-            l7policy_id, query_params='{sort}={value}:{asc}'.format(
-                sort=const.SORT, value=const.VALUE, asc=const.ASC))
-        self.assertEqual(l7rule1[const.VALUE],
-                         l7rules[1][const.VALUE])
-        self.assertEqual(l7rule2[const.VALUE],
-                         l7rules[0][const.VALUE])
-        self.assertEqual(l7rule3[const.VALUE],
-                         l7rules[2][const.VALUE])
-
-        # Test fields
-        for field in const.SHOW_L7RULE_RESPONSE_FIELDS:
-            l7rules = self.mem_l7rule_client.list_l7rules(
-                l7policy_id, query_params='{fields}={field}'.format(
-                    fields=const.FIELDS, field=field))
-            self.assertEqual(1, len(l7rules[0]))
-            self.assertEqual(l7rule1[field], l7rules[0][field])
-            self.assertEqual(l7rule2[field], l7rules[1][field])
-            self.assertEqual(l7rule3[field], l7rules[2][field])
-
-        # Test multiple fields at the same time
-        l7rules = self.mem_l7rule_client.list_l7rules(
-            l7policy_id,
-            query_params='{fields}={admin}&{fields}={created}'.format(
-                fields=const.FIELDS, admin=const.ADMIN_STATE_UP,
-                created=const.CREATED_AT))
-        self.assertEqual(2, len(l7rules[0]))
-        self.assertTrue(l7rules[0][const.ADMIN_STATE_UP])
-        parser.parse(l7rules[0][const.CREATED_AT])
-        self.assertTrue(l7rules[1][const.ADMIN_STATE_UP])
-        parser.parse(l7rules[1][const.CREATED_AT])
-        self.assertFalse(l7rules[2][const.ADMIN_STATE_UP])
-        parser.parse(l7rules[2][const.CREATED_AT])
-
-        # Test filtering
-        l7rules = self.mem_l7rule_client.list_l7rules(
-            l7policy_id,
-            query_params='{value}={rule_value}'.format(
-                value=const.VALUE,
-                rule_value=l7rule2[const.VALUE]))
-        self.assertEqual(1, len(l7rules))
-        self.assertEqual(l7rule2[const.VALUE],
-                         l7rules[0][const.VALUE])
-
-        # Test combined params
-        l7rules = self.mem_l7rule_client.list_l7rules(
-            l7policy_id,
-            query_params='{admin}={true}&'
-                         '{fields}={value}&{fields}={id}&'
-                         '{sort}={value}:{desc}'.format(
-                             admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, value=const.VALUE,
-                             id=const.ID, sort=const.SORT, desc=const.DESC))
-        # Should get two l7rules
-        self.assertEqual(2, len(l7rules))
-        # l7rules should have two fields
-        self.assertEqual(2, len(l7rules[0]))
-        # Should be in descending order by `value`
-        self.assertEqual(l7rule2[const.VALUE],
-                         l7rules[1][const.VALUE])
-        self.assertEqual(l7rule1[const.VALUE],
-                         l7rules[0][const.VALUE])
-
-    @decorators.idempotent_id('b80b34c3-09fc-467b-8027-7350adb17070')
-    def test_l7rule_show(self):
-        """Tests l7rule show API.
-
-        * Create a fully populated l7rule.
-        * Show l7rule details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the l7rule.
-        """
-        l7rule_kwargs = {
-            const.ADMIN_STATE_UP: True,
-            const.L7POLICY_ID: self.l7policy_id,
-            const.TYPE: const.HEADER,
-            const.VALUE: 'myvalue-show',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey-show',
-            const.INVERT: False,
-        }
-
-        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule[const.ID], l7policy_id=self.l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-        if not CONF.load_balancer.test_with_noop:
-            l7rule = waiters.wait_for_status(
-                self.mem_l7rule_client.show_l7rule,
-                l7rule[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout,
-                l7policy_id=self.l7policy_id)
-
-        parser.parse(l7rule[const.CREATED_AT])
-        parser.parse(l7rule[const.UPDATED_AT])
-        UUID(l7rule[const.ID])
-        # Operating status for a l7rule will be ONLINE if it is enabled:
-        if l7rule[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])
-
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-
-        for item in equal_items:
-            self.assertEqual(l7rule_kwargs[item], l7rule[item])
-
-        # Test that a user with lb_admin role can see the l7rule
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            l7rule_client = self.os_roles_lb_admin.l7rule_client
-            l7rule_adm = l7rule_client.show_l7rule(
-                l7rule[const.ID], l7policy_id=self.l7policy_id)
-            self.assertEqual(l7rule_kwargs[const.KEY], l7rule_adm[const.KEY])
-
-        # Test that a user with cloud admin role can see the l7rule
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.l7rule_client.show_l7rule(
-                l7rule[const.ID], l7policy_id=self.l7policy_id)
-            self.assertEqual(l7rule_kwargs[const.KEY], adm[const.KEY])
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this l7rule
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7rule_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_l7rule,
-                              l7rule[const.ID], l7policy_id=self.l7policy_id)
-
-        # Test that a user, without the load balancer member role, cannot
-        # show l7rules
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7rule_client.show_l7rule,
-                l7rule[const.ID], l7policy_id=self.l7policy_id)
-
-    @decorators.idempotent_id('f8cee23b-89b6-4f3a-a842-1463daf42cf7')
-    def test_l7rule_update(self):
-        """Tests l7rule show API and field filtering.
-
-        * Create a fully populated l7rule.
-        * Show l7rule details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the l7rule.
-        * Update the l7rule details.
-        * Show l7rule details.
-        * Validate the show reflects the initial values.
-        """
-        l7rule_kwargs = {
-            const.ADMIN_STATE_UP: False,
-            const.L7POLICY_ID: self.l7policy_id,
-            const.TYPE: const.HEADER,
-            const.VALUE: 'myvalue-update',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey-update',
-            const.INVERT: False,
-        }
-
-        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule[const.ID], l7policy_id=self.l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-
-        parser.parse(l7rule[const.CREATED_AT])
-        parser.parse(l7rule[const.UPDATED_AT])
-        UUID(l7rule[const.ID])
-        # Operating status for a l7rule will be ONLINE if it is enabled:
-        if l7rule[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])
-
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-
-        for item in equal_items:
-            self.assertEqual(l7rule_kwargs[item], l7rule[item])
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7rule_client.update_l7rule,
-                l7rule[const.ID], l7policy_id=self.l7policy_id,
-                admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        l7rule_check = self.mem_l7rule_client.show_l7rule(
-            l7rule[const.ID], l7policy_id=self.l7policy_id)
-        self.assertEqual(const.ACTIVE, l7rule_check[const.PROVISIONING_STATUS])
-        self.assertEqual(False, l7rule_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the load balancer member role, cannot
-        # update this l7rule
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7rule_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_l7rule,
-                              l7rule[const.ID], l7policy_id=self.l7policy_id,
-                              admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        l7rule_check = self.mem_l7rule_client.show_l7rule(
-            l7rule[const.ID], l7policy_id=self.l7policy_id)
-        self.assertEqual(const.ACTIVE, l7rule_check[const.PROVISIONING_STATUS])
-        self.assertEqual(False, l7rule_check[const.ADMIN_STATE_UP])
-
-        l7rule_update_kwargs = {
-            const.L7POLICY_ID: self.l7policy_id,
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.COOKIE,
-            const.VALUE: 'myvalue-UPDATED',
-            const.COMPARE_TYPE: const.CONTAINS,
-            const.KEY: 'mykey-UPDATED',
-            const.INVERT: True,
-        }
-        l7rule = self.mem_l7rule_client.update_l7rule(
-            l7rule[const.ID], **l7rule_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-
-        # Operating status for a l7rule will be ONLINE if it is enabled:
-        if l7rule[const.ADMIN_STATE_UP]:
-            self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])
-
-        # Test changed items (which is all of them, for l7rules)
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-        for item in equal_items:
-            self.assertEqual(l7rule_update_kwargs[item], l7rule[item])
-
-    @decorators.idempotent_id('8e15d68d-70e7-4cf3-82bc-9604384654a0')
-    def test_l7rule_delete(self):
-        """Tests l7rule create and delete APIs.
-
-        * Creates a l7rule.
-        * Validates that other accounts cannot delete the l7rule
-        * Deletes the l7rule.
-        * Validates the l7rule is in the DELETED state.
-        """
-        l7rule_kwargs = {
-            const.L7POLICY_ID: self.l7policy_id,
-            const.TYPE: const.HEADER,
-            const.VALUE: 'myvalue-delete',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey-delete',
-        }
-        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule[const.ID], l7policy_id=self.l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the load balancer role cannot
-        # delete this l7rule
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.l7rule_client.delete_l7rule,
-                l7rule[const.ID], l7policy_id=self.l7policy_id)
-
-        # Test that a different user, with the load balancer member role
-        # cannot delete this l7rule
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.l7rule_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_l7rule,
-                              l7rule[const.ID], l7policy_id=self.l7policy_id)
-
-        self.mem_l7rule_client.delete_l7rule(l7rule[const.ID],
-                                             l7policy_id=self.l7policy_id)
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_l7rule_client.show_l7rule, l7rule[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout,
-            l7policy_id=self.l7policy_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
deleted file mode 100644
index 38da0ae..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ /dev/null
@@ -1,766 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-from uuid import UUID
-
-from dateutil import parser
-from oslo_utils import strutils
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class ListenerAPITest(test_base.LoadBalancerBaseTest):
-    """Test the listener object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(ListenerAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_listener")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-    @decorators.idempotent_id('88d0ec83-7b08-48d9-96e2-0df1d2f8cd98')
-    def test_listener_create(self):
-        """Tests listener create and basic show APIs.
-
-        * Tests that users without the loadbalancer member role cannot
-          create listeners.
-        * Create a fully populated listener.
-        * Show listener details.
-        * Validate the show reflects the requested values.
-        """
-        listener_name = data_utils.rand_name("lb_member_listener1-create")
-        listener_description = data_utils.arbitrary_string(size=255)
-
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.DESCRIPTION: listener_description,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 80,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
-            # Don't test with a default pool -- we'll do that in the scenario,
-            # but this will allow us to test that the field isn't mandatory,
-            # as well as not conflate pool failures with listener test failures
-            # const.DEFAULT_POOL_ID: self.pool_id,
-
-            # TODO(rm_work): need to add TLS related stuff
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 1000,
-                const.TIMEOUT_MEMBER_CONNECT: 1000,
-                const.TIMEOUT_MEMBER_DATA: 1000,
-                const.TIMEOUT_TCP_INSPECT: 50,
-            })
-
-        # Test that a user without the load balancer role cannot
-        # create a listener
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.listener_client.create_listener,
-                **listener_kwargs)
-
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            listener = waiters.wait_for_status(
-                self.mem_listener_client.show_listener,
-                listener[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(listener_name, listener[const.NAME])
-        self.assertEqual(listener_description, listener[const.DESCRIPTION])
-        self.assertTrue(listener[const.ADMIN_STATE_UP])
-        parser.parse(listener[const.CREATED_AT])
-        parser.parse(listener[const.UPDATED_AT])
-        UUID(listener[const.ID])
-        # Operating status is a measured status, so no-op will not go online
-        if CONF.load_balancer.test_with_noop:
-            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, listener[const.PROTOCOL])
-        self.assertEqual(80, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
-
-    @decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
-    def test_listener_list(self):
-        """Tests listener list API and field filtering.
-
-        * Create a clean loadbalancer.
-        * Create three listeners.
-        * Validates that other accounts cannot list the listeners.
-        * List the listeners using the default sort order.
-        * List the listeners using descending sort order.
-        * List the listeners using ascending sort order.
-        * List the listeners returning one field at a time.
-        * List the listeners returning two fields.
-        * List the listeners filtering to one of the three.
-        * List the listeners filtered, one field, and sorted.
-        """
-        lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
-            vip_network_id=self.lb_member_vip_net[const.ID])
-        lb_id = lb[const.ID]
-        self.addCleanup(
-            self.mem_lb_client.cleanup_loadbalancer,
-            lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener1_name = data_utils.rand_name("lb_member_listener2-list")
-        listener1_desc = 'B'
-        listener1_kwargs = {
-            const.NAME: listener1_name,
-            const.DESCRIPTION: listener1_desc,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 80,
-            const.LOADBALANCER_ID: lb_id,
-        }
-        listener1 = self.mem_listener_client.create_listener(
-            **listener1_kwargs)
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener1[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        listener1 = waiters.wait_for_status(
-            self.mem_listener_client.show_listener, listener1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        listener2_name = data_utils.rand_name("lb_member_listener1-list")
-        listener2_desc = 'A'
-        listener2_kwargs = {
-            const.NAME: listener2_name,
-            const.DESCRIPTION: listener2_desc,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 81,
-            const.LOADBALANCER_ID: lb_id,
-        }
-        listener2 = self.mem_listener_client.create_listener(
-            **listener2_kwargs)
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        listener2 = waiters.wait_for_status(
-            self.mem_listener_client.show_listener, listener2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        listener3_name = data_utils.rand_name("lb_member_listener3-list")
-        listener3_desc = 'C'
-        listener3_kwargs = {
-            const.NAME: listener3_name,
-            const.DESCRIPTION: listener3_desc,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 82,
-            const.LOADBALANCER_ID: lb_id,
-        }
-        listener3 = self.mem_listener_client.create_listener(
-            **listener3_kwargs)
-        self.addCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener3[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        listener3 = waiters.wait_for_status(
-            self.mem_listener_client.show_listener, listener3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        if not CONF.load_balancer.test_with_noop:
-            # Wait for the enabled listeners to come ONLINE
-            listener1 = waiters.wait_for_status(
-                self.mem_listener_client.show_listener, listener1[const.ID],
-                const.OPERATING_STATUS, const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-            listener2 = waiters.wait_for_status(
-                self.mem_listener_client.show_listener, listener2[const.ID],
-                const.OPERATING_STATUS, const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        # Test that a different user cannot list listeners
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.listener_client
-            primary = member2_client.list_listeners(
-                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
-            self.assertEqual(0, len(primary))
-
-        # Test that a user without the lb member role cannot list load
-        # balancers
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.listener_client.list_listeners)
-
-        # Check the default sort order, created_at
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
-        self.assertEqual(listener1[const.DESCRIPTION],
-                         listeners[0][const.DESCRIPTION])
-        self.assertEqual(listener2[const.DESCRIPTION],
-                         listeners[1][const.DESCRIPTION])
-        self.assertEqual(listener3[const.DESCRIPTION],
-                         listeners[2][const.DESCRIPTION])
-
-        # Test sort descending by description
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'
-                         .format(lb_id=lb_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, desc=const.DESC))
-        self.assertEqual(listener1[const.DESCRIPTION],
-                         listeners[1][const.DESCRIPTION])
-        self.assertEqual(listener2[const.DESCRIPTION],
-                         listeners[2][const.DESCRIPTION])
-        self.assertEqual(listener3[const.DESCRIPTION],
-                         listeners[0][const.DESCRIPTION])
-
-        # Test sort ascending by description
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'
-                         .format(lb_id=lb_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, asc=const.ASC))
-        self.assertEqual(listener1[const.DESCRIPTION],
-                         listeners[1][const.DESCRIPTION])
-        self.assertEqual(listener2[const.DESCRIPTION],
-                         listeners[0][const.DESCRIPTION])
-        self.assertEqual(listener3[const.DESCRIPTION],
-                         listeners[2][const.DESCRIPTION])
-
-        # Test fields
-        show_listener_response_fields = const.SHOW_LISTENER_RESPONSE_FIELDS
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            show_listener_response_fields.append('timeout_client_data')
-            show_listener_response_fields.append('timeout_member_connect')
-            show_listener_response_fields.append('timeout_member_data')
-            show_listener_response_fields.append('timeout_tcp_inspect')
-        for field in show_listener_response_fields:
-            if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
-                continue
-            listeners = self.mem_listener_client.list_listeners(
-                query_params='loadbalancer_id={lb_id}&{fields}={field}'
-                             .format(lb_id=lb_id,
-                                     fields=const.FIELDS, field=field))
-            self.assertEqual(1, len(listeners[0]))
-            self.assertEqual(listener1[field], listeners[0][field])
-            self.assertEqual(listener2[field], listeners[1][field])
-            self.assertEqual(listener3[field], listeners[2][field])
-
-        # Test multiple fields at the same time
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
-                         '{fields}={created}'.format(
-                             lb_id=lb_id, fields=const.FIELDS,
-                             admin=const.ADMIN_STATE_UP,
-                             created=const.CREATED_AT))
-        self.assertEqual(2, len(listeners[0]))
-        self.assertTrue(listeners[0][const.ADMIN_STATE_UP])
-        parser.parse(listeners[0][const.CREATED_AT])
-        self.assertTrue(listeners[1][const.ADMIN_STATE_UP])
-        parser.parse(listeners[1][const.CREATED_AT])
-        self.assertFalse(listeners[2][const.ADMIN_STATE_UP])
-        parser.parse(listeners[2][const.CREATED_AT])
-
-        # Test filtering
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
-                lb_id=lb_id, desc=const.DESCRIPTION,
-                lb_desc=listener2[const.DESCRIPTION]))
-        self.assertEqual(1, len(listeners))
-        self.assertEqual(listener2[const.DESCRIPTION],
-                         listeners[0][const.DESCRIPTION])
-
-        # Test combined params
-        listeners = self.mem_listener_client.list_listeners(
-            query_params='loadbalancer_id={lb_id}&{admin}={true}&'
-                         '{fields}={descr}&{fields}={id}&'
-                         '{sort}={descr}:{desc}'.format(
-                             lb_id=lb_id, admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, descr=const.DESCRIPTION,
-                             id=const.ID, sort=const.SORT, desc=const.DESC))
-        # Should get two listeners
-        self.assertEqual(2, len(listeners))
-        # listeners should have two fields
-        self.assertEqual(2, len(listeners[0]))
-        # Should be in descending order
-        self.assertEqual(listener2[const.DESCRIPTION],
-                         listeners[1][const.DESCRIPTION])
-        self.assertEqual(listener1[const.DESCRIPTION],
-                         listeners[0][const.DESCRIPTION])
-
-    @decorators.idempotent_id('6e299eae-6907-4dfc-89c2-e57709d25d3d')
-    def test_listener_show(self):
-        """Tests listener show API.
-
-        * Create a fully populated listener.
-        * Show listener details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the listener.
-        """
-        listener_name = data_utils.rand_name("lb_member_listener1-show")
-        listener_description = data_utils.arbitrary_string(size=255)
-
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.DESCRIPTION: listener_description,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 81,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
-            # TODO(rm_work): need to finish the rest of this stuff
-            # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 1000,
-                const.TIMEOUT_MEMBER_CONNECT: 1000,
-                const.TIMEOUT_MEMBER_DATA: 1000,
-                const.TIMEOUT_TCP_INSPECT: 50,
-            })
-
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            listener = waiters.wait_for_status(
-                self.mem_listener_client.show_listener,
-                listener[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(listener_name, listener[const.NAME])
-        self.assertEqual(listener_description, listener[const.DESCRIPTION])
-        self.assertTrue(listener[const.ADMIN_STATE_UP])
-        parser.parse(listener[const.CREATED_AT])
-        parser.parse(listener[const.UPDATED_AT])
-        UUID(listener[const.ID])
-        # Operating status is a measured status, so no-op will not go online
-        if CONF.load_balancer.test_with_noop:
-            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, listener[const.PROTOCOL])
-        self.assertEqual(81, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
-
-        # Test that a user with lb_admin role can see the listener
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            listener_client = self.os_roles_lb_admin.listener_client
-            listener_adm = listener_client.show_listener(listener[const.ID])
-            self.assertEqual(listener_name, listener_adm[const.NAME])
-
-        # Test that a user with cloud admin role can see the listener
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.listener_client.show_listener(
-                listener[const.ID])
-            self.assertEqual(listener_name, adm[const.NAME])
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this listener
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.listener_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_listener,
-                              listener[const.ID])
-
-        # Test that a user, without the load balancer member role, cannot
-        # show listeners
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.listener_client.show_listener,
-                listener[const.ID])
-
-    @decorators.idempotent_id('aaae0298-5778-4c7e-a27a-01549a71b319')
-    def test_listener_update(self):
-        """Tests listener update and show APIs.
-
-        * Create a fully populated listener.
-        * Show listener details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the listener.
-        * Update the listener details.
-        * Show listener details.
-        * Validate the show reflects the updated values.
-        """
-        listener_name = data_utils.rand_name("lb_member_listener1-update")
-        listener_description = data_utils.arbitrary_string(size=255)
-
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.DESCRIPTION: listener_description,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 82,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
-            # TODO(rm_work): need to finish the rest of this stuff
-            # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 1000,
-                const.TIMEOUT_MEMBER_CONNECT: 1000,
-                const.TIMEOUT_MEMBER_DATA: 1000,
-                const.TIMEOUT_TCP_INSPECT: 50,
-            })
-
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(listener_name, listener[const.NAME])
-        self.assertEqual(listener_description, listener[const.DESCRIPTION])
-        self.assertFalse(listener[const.ADMIN_STATE_UP])
-        parser.parse(listener[const.CREATED_AT])
-        parser.parse(listener[const.UPDATED_AT])
-        UUID(listener[const.ID])
-        # Operating status will be OFFLINE while admin_state_up = False
-        self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, listener[const.PROTOCOL])
-        self.assertEqual(82, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.listener_client.update_listener,
-                listener[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        listener_check = self.mem_listener_client.show_listener(
-            listener[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         listener_check[const.PROVISIONING_STATUS])
-        self.assertFalse(listener_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the load balancer member role, cannot
-        # update this listener
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.listener_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_listener,
-                              listener[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        listener_check = self.mem_listener_client.show_listener(
-            listener[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         listener_check[const.PROVISIONING_STATUS])
-        self.assertFalse(listener_check[const.ADMIN_STATE_UP])
-
-        new_name = data_utils.rand_name("lb_member_listener1-UPDATED")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        listener_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.CONNECTION_LIMIT: 400,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "false",
-                const.X_FORWARDED_PORT: "false"
-            },
-            # TODO(rm_work): need to finish the rest of this stuff
-            # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_update_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 2000,
-                const.TIMEOUT_MEMBER_CONNECT: 2000,
-                const.TIMEOUT_MEMBER_DATA: 2000,
-                const.TIMEOUT_TCP_INSPECT: 100,
-            })
-
-        listener = self.mem_listener_client.update_listener(
-            listener[const.ID], **listener_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            listener = waiters.wait_for_status(
-                self.mem_listener_client.show_listener,
-                listener[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, listener[const.NAME])
-        self.assertEqual(new_description, listener[const.DESCRIPTION])
-        self.assertTrue(listener[const.ADMIN_STATE_UP])
-        # Operating status is a measured status, so no-op will not go online
-        if CONF.load_balancer.test_with_noop:
-            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
-
-    @decorators.idempotent_id('16f11c82-f069-4592-8954-81b35a98e3b7')
-    def test_listener_delete(self):
-        """Tests listener create and delete APIs.
-
-        * Creates a listener.
-        * Validates that other accounts cannot delete the listener
-        * Deletes the listener.
-        * Validates the listener is in the DELETED state.
-        """
-        listener_name = data_utils.rand_name("lb_member_listener1-delete")
-
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 83,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the load balancer role cannot
-        # delete this listener
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.listener_client.delete_listener,
-                listener[const.ID])
-
-        # Test that a different user, with the load balancer member role
-        # cannot delete this listener
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.listener_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_listener,
-                              listener[const.ID])
-
-        self.mem_listener_client.delete_listener(listener[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_listener_client.show_listener, listener[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 35f2dde..935f7a9 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -236,6 +236,7 @@
         return [lb for i, lb in enumerate(lbs) if i not in indexes]
 
     @decorators.idempotent_id('6546ef3c-c0e2-46af-b892-f795f4d01119')
+    @decorators.attr(type='smoke')
     def test_load_balancer_list(self):
         """Tests load balancer list API and field filtering.
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
deleted file mode 100644
index 18073cc..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ /dev/null
@@ -1,924 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-import time
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class MemberAPITest(test_base.LoadBalancerBaseTest):
-    """Test the member object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(MemberAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_member")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_member")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_member")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LISTENER_ID: cls.listener_id,
-        }
-
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    # Note: This test also covers basic member show API
-    @decorators.idempotent_id('0623aa1f-753d-44e7-afa1-017d274eace7')
-    def test_member_ipv4_create(self):
-        self._test_member_create(4)
-
-    # Note: This test also covers basic member show API
-    @decorators.idempotent_id('141944cc-5e2c-4e83-88f8-f61a6797c9b7')
-    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
-                          'IPv6 testing is disabled')
-    def test_member_ipv6_create(self):
-        self._test_member_create(6)
-
-    def _test_member_create(self, ip_version):
-        """Tests member create and basic show APIs.
-
-        * Tests that users without the loadbalancer member role cannot
-          create members.
-        * Create a fully populated member.
-        * Show member details.
-        * Validate the show reflects the requested values.
-        """
-        if ip_version == 4:
-            member_address = '192.0.2.1'
-            member_monitor_address = '192.0.2.2'
-        else:
-            member_address = '2001:db8:0:0:0:0:0:1'
-            member_monitor_address = '2001:db8:0:0:0:0:0:2'
-
-        member_name = data_utils.rand_name("lb_member_member1-create")
-        member_kwargs = {
-            const.NAME: member_name,
-            const.ADMIN_STATE_UP: True,
-            const.POOL_ID: self.pool_id,
-            const.ADDRESS: member_address,
-            const.PROTOCOL_PORT: 80,
-            const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: member_monitor_address,
-            const.MONITOR_PORT: 8080,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_kwargs.update({
-                const.BACKUP: False,
-            })
-
-        if self.lb_member_vip_subnet:
-            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        # Test that a user without the load balancer role cannot
-        # create a member
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.create_member,
-                **member_kwargs)
-
-        member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        parser.parse(member[const.CREATED_AT])
-        parser.parse(member[const.UPDATED_AT])
-        UUID(member[const.ID])
-        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-    @decorators.idempotent_id('9ce7ad78-915b-42ce-b0d8-44d88a929f3d')
-    def test_member_list(self):
-        """Tests member list API and field filtering.
-
-        * Create a clean pool.
-        * Create three members.
-        * Validates that other accounts cannot list the members.
-        * List the members using the default sort order.
-        * List the members using descending sort order.
-        * List the members using ascending sort order.
-        * List the members returning one field at a time.
-        * List the members returning two fields.
-        * List the members filtering to one of the three.
-        * List the members filtered, one field, and sorted.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool2_member-list")
-        pool = self.mem_pool_client.create_pool(
-            name=pool_name, loadbalancer_id=self.lb_id,
-            protocol=const.HTTP, lb_algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
-        pool_id = pool[const.ID]
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool, pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        member1_name = data_utils.rand_name("lb_member_member2-list")
-        member1_kwargs = {
-            const.POOL_ID: pool_id,
-            const.NAME: member1_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 101,
-        }
-        member1 = self.mem_member_client.create_member(
-            **member1_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        member1 = waiters.wait_for_status(
-            self.mem_member_client.show_member, member1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=pool_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        member2_name = data_utils.rand_name("lb_member_member1-list")
-        member2_kwargs = {
-            const.POOL_ID: pool_id,
-            const.NAME: member2_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 100,
-        }
-        member2 = self.mem_member_client.create_member(
-            **member2_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        member2 = waiters.wait_for_status(
-            self.mem_member_client.show_member, member2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=pool_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        member3_name = data_utils.rand_name("lb_member_member3-list")
-        member3_kwargs = {
-            const.POOL_ID: pool_id,
-            const.NAME: member3_name,
-            const.ADMIN_STATE_UP: False,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 102,
-        }
-        member3 = self.mem_member_client.create_member(
-            **member3_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member3[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        member3 = waiters.wait_for_status(
-            self.mem_member_client.show_member, member3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=pool_id)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.check_interval,
-                                CONF.load_balancer.check_timeout)
-
-        # Test that a different user cannot list members
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.member_client
-            self.assertRaises(
-                exceptions.Forbidden,
-                member2_client.list_members,
-                pool_id)
-
-        # Test that a user without the lb member role cannot list load
-        # balancers
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.list_members,
-                pool_id)
-
-        # Check the default sort order, created_at
-        members = self.mem_member_client.list_members(pool_id)
-        self.assertEqual(member1[const.PROTOCOL_PORT],
-                         members[0][const.PROTOCOL_PORT])
-        self.assertEqual(member2[const.PROTOCOL_PORT],
-                         members[1][const.PROTOCOL_PORT])
-        self.assertEqual(member3[const.PROTOCOL_PORT],
-                         members[2][const.PROTOCOL_PORT])
-
-        # Test sort descending by protocol_port
-        members = self.mem_member_client.list_members(
-            pool_id, query_params='{sort}={descr}:{desc}'.format(
-                sort=const.SORT, descr=const.PROTOCOL_PORT, desc=const.DESC))
-        self.assertEqual(member1[const.PROTOCOL_PORT],
-                         members[1][const.PROTOCOL_PORT])
-        self.assertEqual(member2[const.PROTOCOL_PORT],
-                         members[2][const.PROTOCOL_PORT])
-        self.assertEqual(member3[const.PROTOCOL_PORT],
-                         members[0][const.PROTOCOL_PORT])
-
-        # Test sort ascending by protocol_port
-        members = self.mem_member_client.list_members(
-            pool_id, query_params='{sort}={descr}:{asc}'.format(
-                sort=const.SORT, descr=const.PROTOCOL_PORT, asc=const.ASC))
-        self.assertEqual(member1[const.PROTOCOL_PORT],
-                         members[1][const.PROTOCOL_PORT])
-        self.assertEqual(member2[const.PROTOCOL_PORT],
-                         members[0][const.PROTOCOL_PORT])
-        self.assertEqual(member3[const.PROTOCOL_PORT],
-                         members[2][const.PROTOCOL_PORT])
-
-        # Test fields
-        show_member_response_fields = const.SHOW_MEMBER_RESPONSE_FIELDS
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            show_member_response_fields.append('backup')
-        for field in show_member_response_fields:
-            members = self.mem_member_client.list_members(
-                pool_id, query_params='{fields}={field}'.format(
-                    fields=const.FIELDS, field=field))
-            self.assertEqual(1, len(members[0]))
-            self.assertEqual(member1[field], members[0][field])
-            self.assertEqual(member2[field], members[1][field])
-            self.assertEqual(member3[field], members[2][field])
-
-        # Test multiple fields at the same time
-        members = self.mem_member_client.list_members(
-            pool_id,
-            query_params='{fields}={admin}&{fields}={created}'.format(
-                fields=const.FIELDS, admin=const.ADMIN_STATE_UP,
-                created=const.CREATED_AT))
-        self.assertEqual(2, len(members[0]))
-        self.assertTrue(members[0][const.ADMIN_STATE_UP])
-        parser.parse(members[0][const.CREATED_AT])
-        self.assertTrue(members[1][const.ADMIN_STATE_UP])
-        parser.parse(members[1][const.CREATED_AT])
-        self.assertFalse(members[2][const.ADMIN_STATE_UP])
-        parser.parse(members[2][const.CREATED_AT])
-
-        # Test filtering
-        members = self.mem_member_client.list_members(
-            pool_id,
-            query_params='{desc}={lb_desc}'.format(
-                desc=const.PROTOCOL_PORT,
-                lb_desc=member2[const.PROTOCOL_PORT]))
-        self.assertEqual(1, len(members))
-        self.assertEqual(member2[const.PROTOCOL_PORT],
-                         members[0][const.PROTOCOL_PORT])
-
-        # Test combined params
-        members = self.mem_member_client.list_members(
-            pool_id,
-            query_params='{admin}={true}&'
-                         '{fields}={descr}&{fields}={id}&'
-                         '{sort}={descr}:{desc}'.format(
-                             admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, descr=const.PROTOCOL_PORT,
-                             id=const.ID, sort=const.SORT, desc=const.DESC))
-        # Should get two members
-        self.assertEqual(2, len(members))
-        # members should have two fields
-        self.assertEqual(2, len(members[0]))
-        # Should be in descending order
-        self.assertEqual(member2[const.PROTOCOL_PORT],
-                         members[1][const.PROTOCOL_PORT])
-        self.assertEqual(member1[const.PROTOCOL_PORT],
-                         members[0][const.PROTOCOL_PORT])
-
-    @decorators.idempotent_id('7674ae04-7e92-44ef-9adf-40718d7ec705')
-    def test_member_show(self):
-        """Tests member show API.
-
-        * Create a fully populated member.
-        * Show member details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the member.
-        """
-        member_name = data_utils.rand_name("lb_member_member1-show")
-        member_kwargs = {
-            const.NAME: member_name,
-            const.ADMIN_STATE_UP: True,
-            const.POOL_ID: self.pool_id,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 81,
-            const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_kwargs.update({
-                const.BACKUP: False,
-            })
-        if self.lb_member_vip_subnet:
-            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        parser.parse(member[const.CREATED_AT])
-        parser.parse(member[const.UPDATED_AT])
-        UUID(member[const.ID])
-        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-        # Test that a user with lb_admin role can see the member
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            member_client = self.os_roles_lb_admin.member_client
-            member_adm = member_client.show_member(
-                member[const.ID], pool_id=self.pool_id)
-            self.assertEqual(member_name, member_adm[const.NAME])
-
-        # Test that a user with cloud admin role can see the member
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.member_client.show_member(
-                member[const.ID], pool_id=self.pool_id)
-            self.assertEqual(member_name, adm[const.NAME])
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this member
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.member_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_member,
-                              member[const.ID], pool_id=self.pool_id)
-
-        # Test that a user, without the load balancer member role, cannot
-        # show members
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.show_member,
-                member[const.ID], pool_id=self.pool_id)
-
-    @decorators.idempotent_id('c07572b8-e853-48f3-a8ea-37fc293a4724')
-    def test_member_update(self):
-        """Tests member show API and field filtering.
-
-        * Create a fully populated member.
-        * Show member details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the member.
-        * Update the member details.
-        * Show member details.
-        * Validate the show reflects the initial values.
-        """
-        member_name = data_utils.rand_name("lb_member_member1-update")
-        member_kwargs = {
-            const.NAME: member_name,
-            const.ADMIN_STATE_UP: False,
-            const.POOL_ID: self.pool_id,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 82,
-            const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_kwargs.update({
-                const.BACKUP: False,
-            })
-
-        if self.lb_member_vip_subnet:
-            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        if not CONF.load_balancer.test_with_noop:
-            member = waiters.wait_for_status(
-                self.mem_member_client.show_member,
-                member[const.ID], const.OPERATING_STATUS,
-                const.OFFLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout,
-                pool_id=self.pool_id)
-
-        parser.parse(member[const.CREATED_AT])
-        parser.parse(member[const.UPDATED_AT])
-        UUID(member[const.ID])
-
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-        if CONF.load_balancer.test_with_noop:
-            # Operating status with noop will stay in NO_MONITOR
-            self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
-        else:
-            # Operating status will be OFFLINE while admin_state_up = False
-            self.assertEqual(const.OFFLINE, member[const.OPERATING_STATUS])
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.update_member,
-                member[const.ID], pool_id=self.pool_id, admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        member_check = self.mem_member_client.show_member(
-            member[const.ID], pool_id=self.pool_id)
-        self.assertEqual(const.ACTIVE,
-                         member_check[const.PROVISIONING_STATUS])
-        self.assertEqual(member_kwargs[const.ADMIN_STATE_UP],
-                         member_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the load balancer member role, cannot
-        # update this member
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.member_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_member,
-                              member[const.ID], pool_id=self.pool_id,
-                              admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        member_check = self.mem_member_client.show_member(
-            member[const.ID], pool_id=self.pool_id)
-        self.assertEqual(const.ACTIVE,
-                         member_check[const.PROVISIONING_STATUS])
-        self.assertEqual(member_kwargs[const.ADMIN_STATE_UP],
-                         member_check[const.ADMIN_STATE_UP])
-
-        new_name = data_utils.rand_name("lb_member_member1-UPDATED")
-        member_update_kwargs = {
-            const.POOL_ID: member_kwargs[const.POOL_ID],
-            const.NAME: new_name,
-            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
-            const.WEIGHT: member[const.WEIGHT] + 1,
-            const.MONITOR_ADDRESS: '192.0.2.3',
-            const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_update_kwargs.update({
-                const.BACKUP: not member[const.BACKUP]
-            })
-
-        member = self.mem_member_client.update_member(
-            member[const.ID], **member_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        if not CONF.load_balancer.test_with_noop:
-            member = waiters.wait_for_status(
-                self.mem_member_client.show_member,
-                member[const.ID], const.OPERATING_STATUS,
-                const.NO_MONITOR,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout,
-                pool_id=self.pool_id)
-
-        # Operating status will be NO_MONITOR regardless of noop
-        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
-
-        # Test changed items
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        for item in equal_items:
-            self.assertEqual(member_update_kwargs[item], member[item])
-
-        # Test unchanged items
-        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-    @decorators.idempotent_id('83e0a9f2-491f-46a8-b3ce-6969d70a4e9f')
-    def test_member_batch_update(self):
-        """Tests member batch update.
-
-        * Create two members.
-        * Batch update the members so one is deleted, created, and updated
-        * Validate the member list is correct.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool3_member-batch")
-        pool = self.mem_pool_client.create_pool(
-            name=pool_name, loadbalancer_id=self.lb_id,
-            protocol=const.HTTP, lb_algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
-        pool_id = pool[const.ID]
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool, pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        member1_name = data_utils.rand_name("lb_member_member1-batch")
-        member1_kwargs = {
-            const.NAME: member1_name,
-            const.ADMIN_STATE_UP: True,
-            const.POOL_ID: pool_id,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 80,
-            const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member1_kwargs.update({
-                const.BACKUP: False,
-            })
-
-        if self.lb_member_vip_subnet:
-            member1_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member1 = self.mem_member_client.create_member(**member1_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        member2_name = data_utils.rand_name("lb_member_member2-batch")
-        member2_kwargs = {
-            const.NAME: member2_name,
-            const.ADMIN_STATE_UP: True,
-            const.POOL_ID: pool_id,
-            const.ADDRESS: '192.0.2.3',
-            const.PROTOCOL_PORT: 81,
-            const.WEIGHT: 51,
-            const.MONITOR_ADDRESS: '192.0.2.4',
-            const.MONITOR_PORT: 8081,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member2_kwargs.update({
-                const.BACKUP: True,
-            })
-
-        if self.lb_member_vip_subnet:
-            member2_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member2 = self.mem_member_client.create_member(**member2_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        member3_name = data_utils.rand_name("lb_member_member3-batch")
-        member3_kwargs = {
-            const.NAME: member3_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: '192.0.2.5',
-            const.PROTOCOL_PORT: 82,
-            const.WEIGHT: 52,
-            const.MONITOR_ADDRESS: '192.0.2.6',
-            const.MONITOR_PORT: 8082,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member3_kwargs.update({
-                const.BACKUP: True,
-            })
-
-        if self.lb_member_vip_subnet:
-            member3_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member2_name_update = data_utils.rand_name("lb_member_member2-new")
-        member2_kwargs[const.NAME] = member2_name_update
-        member2_kwargs.pop(const.POOL_ID)
-        batch_update_list = [member2_kwargs, member3_kwargs]
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.update_members,
-                pool_id=self.pool_id, members_list=batch_update_list)
-
-        # Assert we didn't go into PENDING_*
-        member_check = self.mem_member_client.show_member(
-            member2[const.ID], pool_id=pool_id)
-        self.assertEqual(const.ACTIVE, member_check[const.PROVISIONING_STATUS])
-        self.assertEqual(member2_name, member_check[const.NAME])
-
-        self.mem_member_client.update_members(
-            pool_id=pool_id, members_list=batch_update_list)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        members = self.mem_member_client.list_members(
-            pool_id,
-            query_params='{sort}={port}:{asc}'.format(
-                sort=const.SORT, port=const.PROTOCOL_PORT, asc=const.ASC))
-        for m in members:
-            self.addClassResourceCleanup(
-                self.mem_member_client.cleanup_member,
-                m[const.ID], pool_id=pool_id,
-                lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        # We should have two members: member2 and member3, in that order
-        self.assertEqual(2, len(members))
-        # Member2 is the same ID
-        self.assertEqual(member2[const.ID], members[0][const.ID])
-        # Member3 will have a different ID (not member1)
-        self.assertNotEqual(member1[const.ID], members[1][const.ID])
-
-        # Member2's name should be updated, and member3 should exist
-        self.assertEqual(member2_name_update, members[0][const.NAME])
-        self.assertEqual(member3_name, members[1][const.NAME])
-
-    @decorators.idempotent_id('f129ba5e-a16e-4178-924f-6a9c5b8b1589')
-    def test_member_delete(self):
-        """Tests member create and delete APIs.
-
-        * Creates a member.
-        * Validates that other accounts cannot delete the member
-        * Deletes the member.
-        * Validates the member is in the DELETED state.
-        """
-        member_name = data_utils.rand_name("lb_member_member1-delete")
-        member_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member_name,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 83,
-        }
-        member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the load balancer role cannot
-        # delete this member
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.member_client.delete_member,
-                member[const.ID], pool_id=self.pool_id)
-
-        # Test that a different user, with the load balancer member role
-        # cannot delete this member
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.member_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_member,
-                              member[const.ID], pool_id=self.pool_id)
-
-        self.mem_member_client.delete_member(member[const.ID],
-                                             pool_id=self.pool_id)
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_member_client.show_member, member[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout,
-            pool_id=self.pool_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
deleted file mode 100644
index 28b95b6..0000000
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ /dev/null
@@ -1,720 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import time
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class PoolAPITest(test_base.LoadBalancerBaseTest):
-    """Test the pool object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(PoolAPITest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_pool")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_pool")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('7587fe48-87ba-4538-9f03-190911f100ff')
-    def test_pool_create_standalone(self):
-        self._test_pool_create(has_listener=False)
-
-    @decorators.idempotent_id('c9c0df79-f07e-428c-ae57-b9d4078eec79')
-    def test_pool_create_with_listener(self):
-        self._test_pool_create(has_listener=True)
-
-    def _test_pool_create(self, has_listener):
-        """Tests pool create and basic show APIs.
-
-        * Tests that users without the loadbalancer member role cannot
-          create pools.
-        * Create a fully populated pool.
-        * Show pool details.
-        * Validate the show reflects the requested values.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1-create")
-        pool_description = data_utils.arbitrary_string(size=255)
-        pool_sp_cookie_name = 'my_cookie'
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.DESCRIPTION: pool_description,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool_sp_cookie_name,
-            },
-        }
-        if has_listener:
-            pool_kwargs[const.LISTENER_ID] = self.listener_id
-        else:
-            pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
-
-        # Test that a user without the load balancer role cannot
-        # create a pool
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.pool_client.create_pool,
-                **pool_kwargs)
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if has_listener and not CONF.load_balancer.test_with_noop:
-            pool = waiters.wait_for_status(
-                self.mem_pool_client.show_pool,
-                pool[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(pool_name, pool[const.NAME])
-        self.assertEqual(pool_description, pool[const.DESCRIPTION])
-        self.assertTrue(pool[const.ADMIN_STATE_UP])
-        parser.parse(pool[const.CREATED_AT])
-        parser.parse(pool[const.UPDATED_AT])
-        UUID(pool[const.ID])
-        # Operating status for a pool without members will be:
-        if has_listener and not CONF.load_balancer.test_with_noop:
-            # ONLINE if it is attached to a listener and is a live test
-            self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
-        else:
-            # OFFLINE if it is just on the LB directly or is in noop mode
-            self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, pool[const.PROTOCOL])
-        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
-        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        if has_listener:
-            self.assertEqual(1, len(pool[const.LISTENERS]))
-            self.assertEqual(self.listener_id,
-                             pool[const.LISTENERS][0][const.ID])
-        else:
-            self.assertEmpty(pool[const.LISTENERS])
-        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertEqual(pool_sp_cookie_name,
-                         pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
-
-    @decorators.idempotent_id('6959a32e-fb34-4f3e-be68-8880c6450016')
-    def test_pool_list(self):
-        """Tests pool list API and field filtering.
-
-        * Create a clean loadbalancer.
-        * Create three pools.
-        * Validates that other accounts cannot list the pools.
-        * List the pools using the default sort order.
-        * List the pools using descending sort order.
-        * List the pools using ascending sort order.
-        * List the pools returning one field at a time.
-        * List the pools returning two fields.
-        * List the pools filtering to one of the three.
-        * List the pools filtered, one field, and sorted.
-        """
-        lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
-            vip_network_id=self.lb_member_vip_net[const.ID])
-        lb_id = lb[const.ID]
-        self.addCleanup(
-            self.mem_lb_client.cleanup_loadbalancer,
-            lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        pool1_name = data_utils.rand_name("lb_member_pool2-list")
-        pool1_desc = 'B'
-        pool1_sp_cookie_name = 'my_cookie1'
-        pool1_kwargs = {
-            const.NAME: pool1_name,
-            const.DESCRIPTION: pool1_desc,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool1_sp_cookie_name,
-            },
-            const.LOADBALANCER_ID: lb_id,
-        }
-        pool1 = self.mem_pool_client.create_pool(
-            **pool1_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool1[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        pool1 = waiters.wait_for_status(
-            self.mem_pool_client.show_pool, pool1[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        pool2_name = data_utils.rand_name("lb_member_pool1-list")
-        pool2_desc = 'A'
-        pool2_sp_cookie_name = 'my_cookie2'
-        pool2_kwargs = {
-            const.NAME: pool2_name,
-            const.DESCRIPTION: pool2_desc,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool2_sp_cookie_name,
-            },
-            const.LOADBALANCER_ID: lb_id,
-        }
-        pool2 = self.mem_pool_client.create_pool(
-            **pool2_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        pool2 = waiters.wait_for_status(
-            self.mem_pool_client.show_pool, pool2[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-        # Time resolution for created_at is only to the second, and we need to
-        # ensure that each object has a distinct creation time. Delaying one
-        # second is both a simple and a reliable way to accomplish this.
-        time.sleep(1)
-
-        pool3_name = data_utils.rand_name("lb_member_pool3-list")
-        pool3_desc = 'C'
-        pool3_kwargs = {
-            const.NAME: pool3_name,
-            const.DESCRIPTION: pool3_desc,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            # No session persistence, just so there's one test for that
-            const.LOADBALANCER_ID: lb_id,
-        }
-        pool3 = self.mem_pool_client.create_pool(
-            **pool3_kwargs)
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool3[const.ID],
-            lb_client=self.mem_lb_client, lb_id=lb_id)
-        pool3 = waiters.wait_for_status(
-            self.mem_pool_client.show_pool, pool3[const.ID],
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        # Test that a different user cannot list pools
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.pool_client
-            primary = member2_client.list_pools(
-                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
-            self.assertEqual(0, len(primary))
-
-        # Test that a user without the lb member role cannot list load
-        # balancers
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.pool_client.list_pools)
-
-        # Check the default sort order, created_at
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
-        self.assertEqual(pool1[const.DESCRIPTION],
-                         pools[0][const.DESCRIPTION])
-        self.assertEqual(pool2[const.DESCRIPTION],
-                         pools[1][const.DESCRIPTION])
-        self.assertEqual(pool3[const.DESCRIPTION],
-                         pools[2][const.DESCRIPTION])
-
-        # Test sort descending by description
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'
-                         .format(lb_id=lb_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, desc=const.DESC))
-        self.assertEqual(pool1[const.DESCRIPTION],
-                         pools[1][const.DESCRIPTION])
-        self.assertEqual(pool2[const.DESCRIPTION],
-                         pools[2][const.DESCRIPTION])
-        self.assertEqual(pool3[const.DESCRIPTION],
-                         pools[0][const.DESCRIPTION])
-
-        # Test sort ascending by description
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'
-                         .format(lb_id=lb_id, sort=const.SORT,
-                                 descr=const.DESCRIPTION, asc=const.ASC))
-        self.assertEqual(pool1[const.DESCRIPTION],
-                         pools[1][const.DESCRIPTION])
-        self.assertEqual(pool2[const.DESCRIPTION],
-                         pools[0][const.DESCRIPTION])
-        self.assertEqual(pool3[const.DESCRIPTION],
-                         pools[2][const.DESCRIPTION])
-
-        # Test fields
-        for field in const.SHOW_POOL_RESPONSE_FIELDS:
-            pools = self.mem_pool_client.list_pools(
-                query_params='loadbalancer_id={lb_id}&{fields}={field}'
-                             .format(lb_id=lb_id,
-                                     fields=const.FIELDS, field=field))
-            self.assertEqual(1, len(pools[0]))
-            self.assertEqual(pool1[field], pools[0][field])
-            self.assertEqual(pool2[field], pools[1][field])
-            self.assertEqual(pool3[field], pools[2][field])
-
-        # Test multiple fields at the same time
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
-                         '{fields}={created}'.format(
-                             lb_id=lb_id, fields=const.FIELDS,
-                             admin=const.ADMIN_STATE_UP,
-                             created=const.CREATED_AT))
-        self.assertEqual(2, len(pools[0]))
-        self.assertTrue(pools[0][const.ADMIN_STATE_UP])
-        parser.parse(pools[0][const.CREATED_AT])
-        self.assertTrue(pools[1][const.ADMIN_STATE_UP])
-        parser.parse(pools[1][const.CREATED_AT])
-        self.assertFalse(pools[2][const.ADMIN_STATE_UP])
-        parser.parse(pools[2][const.CREATED_AT])
-
-        # Test filtering
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
-                lb_id=lb_id, desc=const.DESCRIPTION,
-                lb_desc=pool2[const.DESCRIPTION]))
-        self.assertEqual(1, len(pools))
-        self.assertEqual(pool2[const.DESCRIPTION],
-                         pools[0][const.DESCRIPTION])
-
-        # Test combined params
-        pools = self.mem_pool_client.list_pools(
-            query_params='loadbalancer_id={lb_id}&{admin}={true}&'
-                         '{fields}={descr}&{fields}={id}&'
-                         '{sort}={descr}:{desc}'.format(
-                             lb_id=lb_id, admin=const.ADMIN_STATE_UP,
-                             true=const.ADMIN_STATE_UP_TRUE,
-                             fields=const.FIELDS, descr=const.DESCRIPTION,
-                             id=const.ID, sort=const.SORT, desc=const.DESC))
-        # Should get two pools
-        self.assertEqual(2, len(pools))
-        # pools should have two fields
-        self.assertEqual(2, len(pools[0]))
-        # Should be in descending order
-        self.assertEqual(pool2[const.DESCRIPTION],
-                         pools[1][const.DESCRIPTION])
-        self.assertEqual(pool1[const.DESCRIPTION],
-                         pools[0][const.DESCRIPTION])
-
-    @decorators.idempotent_id('b7932438-1aea-4175-a50c-984fee1c0cad')
-    def test_pool_show(self):
-        """Tests pool show API.
-
-        * Create a fully populated pool.
-        * Show pool details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the pool.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1-show")
-        pool_description = data_utils.arbitrary_string(size=255)
-        pool_sp_cookie_name = 'my_cookie'
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.DESCRIPTION: pool_description,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool_sp_cookie_name,
-            },
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(pool_name, pool[const.NAME])
-        self.assertEqual(pool_description, pool[const.DESCRIPTION])
-        self.assertTrue(pool[const.ADMIN_STATE_UP])
-        parser.parse(pool[const.CREATED_AT])
-        parser.parse(pool[const.UPDATED_AT])
-        UUID(pool[const.ID])
-        # Operating status for pools will always be offline without members
-        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, pool[const.PROTOCOL])
-        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
-        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        self.assertEmpty(pool[const.LISTENERS])
-        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertEqual(pool_sp_cookie_name,
-                         pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
-
-        # Test that a user with lb_admin role can see the pool
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            pool_client = self.os_roles_lb_admin.pool_client
-            pool_adm = pool_client.show_pool(pool[const.ID])
-            self.assertEqual(pool_name, pool_adm[const.NAME])
-
-        # Test that a user with cloud admin role can see the pool
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.pool_client.show_pool(
-                pool[const.ID])
-            self.assertEqual(pool_name, adm[const.NAME])
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this pool
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.pool_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.show_pool,
-                              pool[const.ID])
-
-        # Test that a user, without the load balancer member role, cannot
-        # show pools
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.pool_client.show_pool,
-                pool[const.ID])
-
-    @decorators.idempotent_id('7bd0a6bf-57b4-46a6-83ef-f9991896658a')
-    def test_pool_update(self):
-        """Tests pool update and show APIs.
-
-        * Create a fully populated pool.
-        * Show pool details.
-        * Validate the show reflects the initial values.
-        * Validates that other accounts cannot update the pool.
-        * Update the pool details.
-        * Show pool details.
-        * Validate the show reflects the updated values.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1-update")
-        pool_description = data_utils.arbitrary_string(size=255)
-        pool_sp_cookie_name = 'my_cookie'
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.DESCRIPTION: pool_description,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool_sp_cookie_name,
-            },
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(pool_name, pool[const.NAME])
-        self.assertEqual(pool_description, pool[const.DESCRIPTION])
-        self.assertFalse(pool[const.ADMIN_STATE_UP])
-        parser.parse(pool[const.CREATED_AT])
-        parser.parse(pool[const.UPDATED_AT])
-        UUID(pool[const.ID])
-        # Operating status for pools will always be offline without members
-        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, pool[const.PROTOCOL])
-        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
-        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        self.assertEmpty(pool[const.LISTENERS])
-        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertEqual(pool_sp_cookie_name,
-                         pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
-
-        # Test that a user, without the load balancer member role, cannot
-        # use this command
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.pool_client.update_pool,
-                pool[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        pool_check = self.mem_pool_client.show_pool(
-            pool[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         pool_check[const.PROVISIONING_STATUS])
-        self.assertFalse(pool_check[const.ADMIN_STATE_UP])
-
-        # Test that a user, without the load balancer member role, cannot
-        # update this pool
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.pool_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.update_pool,
-                              pool[const.ID], admin_state_up=True)
-
-        # Assert we didn't go into PENDING_*
-        pool_check = self.mem_pool_client.show_pool(
-            pool[const.ID])
-        self.assertEqual(const.ACTIVE,
-                         pool_check[const.PROVISIONING_STATUS])
-        self.assertFalse(pool_check[const.ADMIN_STATE_UP])
-
-        new_name = data_utils.rand_name("lb_member_pool1-UPDATED")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        pool_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE,
-            },
-        }
-        pool = self.mem_pool_client.update_pool(
-            pool[const.ID], **pool_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, pool[const.NAME])
-        self.assertEqual(new_description, pool[const.DESCRIPTION])
-        self.assertTrue(pool[const.ADMIN_STATE_UP])
-        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertIsNone(
-            pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
-
-        # Also test removing a Session Persistence
-        pool_update_kwargs = {
-            const.SESSION_PERSISTENCE: None,
-        }
-        pool = self.mem_pool_client.update_pool(
-            pool[const.ID], **pool_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        self.assertIsNone(pool.get(const.SESSION_PERSISTENCE))
-
-    @decorators.idempotent_id('35ed3800-7a4a-47a6-9b94-c1033fff1112')
-    def test_pool_delete(self):
-        """Tests pool create and delete APIs.
-
-        * Creates a pool.
-        * Validates that other accounts cannot delete the pool
-        * Deletes the pool.
-        * Validates the pool is in the DELETED state.
-        """
-        pool_name = data_utils.rand_name("lb_member_pool1-delete")
-        pool_sp_cookie_name = 'my_cookie'
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool_sp_cookie_name,
-            },
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test that a user without the load balancer role cannot
-        # delete this pool
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.pool_client.delete_pool,
-                pool[const.ID])
-
-        # Test that a different user, with the load balancer member role
-        # cannot delete this pool
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.pool_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.delete_pool,
-                              pool[const.ID])
-
-        self.mem_pool_client.delete_pool(pool[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_pool_client.show_pool, pool[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
deleted file mode 100644
index 4f1480c..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class AmphoraScenarioTest(test_base.LoadBalancerBaseTest):
-    """Test the amphora object API."""
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(AmphoraScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_amphora")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-    def _expected_amp_count(self, amp_list):
-        self.assertNotEmpty(amp_list)
-        if amp_list[0][const.ROLE] in (const.ROLE_MASTER, const.ROLE_BACKUP):
-            return 2
-        return 1
-
-    @decorators.idempotent_id('a0e9ff99-2c4f-45d5-81c9-78d3107c236f')
-    def test_amphora_list_and_show(self):
-        """Tests amphora show API.
-
-        * Show amphora details.
-        * Validate the show reflects the requested values.
-        * Validates that other accounts cannot see the amphora.
-        """
-        lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
-            vip_network_id=self.lb_member_vip_net[const.ID])
-        lb_id = lb[const.ID]
-        self.addCleanup(
-            self.mem_lb_client.cleanup_loadbalancer,
-            lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                lb_id,
-                                const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        # Test that a user with lb_admin role can list the amphora
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            amphora_client = self.os_roles_lb_admin.amphora_client
-            amphora_adm = amphora_client.list_amphorae()
-            self.assertTrue(
-                len(amphora_adm) >= 2 * self._expected_amp_count(amphora_adm))
-
-        # Test that a different user, with load balancer member role, cannot
-        # see this amphora
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            member2_client = self.os_roles_lb_member2.amphora_client
-            self.assertRaises(exceptions.Forbidden,
-                              member2_client.list_amphorae)
-
-        # Test that a user, without the load balancer member role, cannot
-        # list amphorae
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            self.assertRaises(
-                exceptions.Forbidden,
-                self.os_primary.amphora_client.list_amphorae)
-
-        # Test that a user with cloud admin role can list the amphorae
-        if not CONF.load_balancer.RBAC_test_type == const.NONE:
-            adm = self.os_admin.amphora_client.list_amphorae()
-            self.assertTrue(len(adm) >= 2 * self._expected_amp_count(adm))
-
-        # Get an actual list of the amphorae
-        amphorae = self.os_admin.amphora_client.list_amphorae()
-
-        # There should be AT LEAST 2, there may be more depending on the
-        # configured topology, or if there are other LBs created besides ours
-        self.assertTrue(
-            len(amphorae) >= 2 * self._expected_amp_count(amphorae))
-
-        show_amphora_response_fields = const.SHOW_AMPHORA_RESPONSE_FIELDS
-        if self.mem_amphora_client.is_version_supported(
-                self.api_version, '2.1'):
-            show_amphora_response_fields.append('created_at')
-            show_amphora_response_fields.append('updated_at')
-            show_amphora_response_fields.append('image_id')
-
-        for amp in amphorae:
-
-            # Make sure all of the fields exist on the amp list records
-            for field in show_amphora_response_fields:
-                self.assertIn(field, amp)
-
-            amp_id = amp[const.ID]
-            amp_obj = self.os_admin.amphora_client.show_amphora(
-                amphora_id=amp_id)
-
-            # Make sure all of the fields exist on the amp show record
-            for field in show_amphora_response_fields:
-                self.assertIn(field, amp_obj)
-
-            # Verify a few of the fields are the right type
-            if self.mem_amphora_client.is_version_supported(
-                    self.api_version, '2.1'):
-                parser.parse(amp_obj[const.CREATED_AT])
-                parser.parse(amp_obj[const.UPDATED_AT])
-            UUID(amp_obj[const.ID])
-            self.assertIn(amp_obj[const.STATUS], const.AMPHORA_STATUSES)
-
-            # We might have gotten unassigned/spare amps?
-            if amp_obj[const.STATUS] == const.STATUS_ALLOCATED:
-                # Only check the state of fields for the LB we created,
-                # otherwise some fields (HA_PORT_ID) may not yet be
-                # populated in amps for parallel tests.
-                if lb_id == amp_obj[const.LOADBALANCER_ID]:
-                    UUID(amp_obj[const.HA_PORT_ID])
-                    UUID(amp_obj[const.LOADBALANCER_ID])
-                    UUID(amp_obj[const.COMPUTE_ID])
-                    UUID(amp_obj[const.VRRP_PORT_ID])
-                    self.assertIn(amp_obj[const.ROLE], const.AMPHORA_ROLES)
-            else:
-                self.assertIsNone(amp_obj[const.ROLE])
-
-            # Test that all of the fields from the amp list match those
-            # from a show
-            for field in show_amphora_response_fields:
-                self.assertEqual(amp[field], amp_obj[field])
-
-        # Test filtering by loadbalancer_id
-        amphorae = self.os_admin.amphora_client.list_amphorae(
-            query_params='{loadbalancer_id}={lb_id}'.format(
-                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
-        self.assertEqual(self._expected_amp_count(amphorae), len(amphorae))
-        self.assertEqual(self.lb_id, amphorae[0][const.LOADBALANCER_ID])
-
-        amphorae = self.os_admin.amphora_client.list_amphorae(
-            query_params='{loadbalancer_id}={lb_id}'.format(
-                loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb_id))
-        self.assertEqual(self._expected_amp_count(amphorae), len(amphorae))
-        self.assertEqual(lb_id, amphorae[0][const.LOADBALANCER_ID])
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
deleted file mode 100644
index 7b769e1..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def skip_checks(cls):
-        super(HealthMonitorScenarioTest, cls).skip_checks()
-        if not CONF.loadbalancer_feature_enabled.health_monitor_enabled:
-            cls.skip('Health Monitors not supported')
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(HealthMonitorScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_hm")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_hm")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('a51e09aa-6e44-4c67-a9e4-df70d0e08f96')
-    def test_healthmonitor_CRUD(self):
-        """Tests healthmonitor create, read, update, delete, and member status
-
-        * Create a fully populated healthmonitor.
-        * Show healthmonitor details.
-        * Update the healthmonitor.
-        * Delete the healthmonitor.
-        """
-        # Healthmonitor create
-        hm_name = data_utils.rand_name("lb_member_hm1-CRUD")
-        hm_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 2,
-            const.MAX_RETRIES: 2,
-            const.MAX_RETRIES_DOWN: 2,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200',
-            const.ADMIN_STATE_UP: True,
-        }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        parser.parse(hm[const.CREATED_AT])
-        parser.parse(hm[const.UPDATED_AT])
-        UUID(hm[const.ID])
-        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
-
-        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-        # Healthmonitor update
-        new_name = data_utils.rand_name("lb_member_hm1-update")
-        hm_update_kwargs = {
-            const.NAME: new_name,
-            const.DELAY: hm_kwargs[const.DELAY] + 1,
-            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
-            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
-            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
-            const.HTTP_METHOD: const.POST,
-            const.URL_PATH: '/test',
-            const.EXPECTED_CODES: '201,202',
-            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
-        }
-        hm = self.mem_healthmonitor_client.update_healthmonitor(
-            hm[const.ID], **hm_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Test changed items
-        equal_items = [const.NAME, const.DELAY, const.TIMEOUT,
-                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
-                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
-                       const.ADMIN_STATE_UP]
-
-        for item in equal_items:
-            self.assertEqual(hm_update_kwargs[item], hm[item])
-
-        # Test unchanged items
-        equal_items = [const.TYPE]
-        for item in equal_items:
-            self.assertEqual(hm_kwargs[item], hm[item])
-
-        # Healthmonitor delete
-        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
deleted file mode 100644
index 98d3bc6..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class L7PolicyScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(L7PolicyScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_l7policy")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_l7policy")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_l7policy")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('ffd598d9-d8cd-4586-a749-cde4897e64dd')
-    def test_l7policy_CRUD(self):
-        """Tests l7policy create, read, update, delete
-
-        * Create a fully populated l7policy.
-        * Show l7policy details.
-        * Update the l7policy.
-        * Delete the l7policy.
-        """
-
-        # L7Policy create
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1-CRUD")
-        l7policy_description = data_utils.arbitrary_string(size=255)
-        l7policy_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy_name,
-            const.DESCRIPTION: l7policy_description,
-            const.ADMIN_STATE_UP: False,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_POOL,
-            const.REDIRECT_POOL_ID: self.pool_id,
-        }
-
-        l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(l7policy_name, l7policy[const.NAME])
-        self.assertEqual(l7policy_description, l7policy[const.DESCRIPTION])
-        self.assertFalse(l7policy[const.ADMIN_STATE_UP])
-        parser.parse(l7policy[const.CREATED_AT])
-        parser.parse(l7policy[const.UPDATED_AT])
-        UUID(l7policy[const.ID])
-        # Operating status will be OFFLINE while admin_state_up = False
-        self.assertEqual(const.OFFLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
-        self.assertEqual(1, l7policy[const.POSITION])
-        self.assertEqual(const.REDIRECT_TO_POOL, l7policy[const.ACTION])
-        self.assertEqual(self.pool_id, l7policy[const.REDIRECT_POOL_ID])
-        self.assertIsNone(l7policy.pop(const.REDIRECT_URL, None))
-
-        # L7Policy update
-        new_name = data_utils.rand_name("lb_member_l7policy1-update")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        redirect_url = 'http://localhost'
-        l7policy_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 2,
-            const.ACTION: const.REDIRECT_TO_URL,
-            const.REDIRECT_URL: redirect_url,
-        }
-        l7policy = self.mem_l7policy_client.update_l7policy(
-            l7policy[const.ID], **l7policy_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7policy = waiters.wait_for_status(
-            self.mem_l7policy_client.show_l7policy,
-            l7policy[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, l7policy[const.NAME])
-        self.assertEqual(new_description, l7policy[const.DESCRIPTION])
-        self.assertTrue(l7policy[const.ADMIN_STATE_UP])
-        # Operating status for a l7policy will be ONLINE if it is enabled:
-        self.assertEqual(const.ONLINE, l7policy[const.OPERATING_STATUS])
-        self.assertEqual(self.listener_id, l7policy[const.LISTENER_ID])
-        # Position will have recalculated to 1
-        self.assertEqual(1, l7policy[const.POSITION])
-        self.assertEqual(const.REDIRECT_TO_URL, l7policy[const.ACTION])
-        self.assertEqual(redirect_url, l7policy[const.REDIRECT_URL])
-        self.assertIsNone(l7policy.pop(const.REDIRECT_POOL_ID, None))
-
-        # L7Policy delete
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-        self.mem_l7policy_client.delete_l7policy(l7policy[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_l7policy_client.show_l7policy, l7policy[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
deleted file mode 100644
index 3e14a74..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class L7RuleScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(L7RuleScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_l7rule")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_l7rule")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        l7policy_name = data_utils.rand_name("lb_member_l7policy1_l7rule")
-        l7policy_kwargs = {
-            const.NAME: l7policy_name,
-            const.LISTENER_ID: cls.listener_id,
-            const.ACTION: const.REJECT,
-        }
-        l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        cls.l7policy_id = l7policy[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_l7policy_client.cleanup_l7policy,
-            cls.l7policy_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('a1c268b9-5304-48c7-9a34-0ef0e8e9307e')
-    def test_l7rule_CRUD(self):
-        """Tests l7rule create, read, update, delete
-
-        * Create a fully populated l7rule.
-        * Show l7rule details.
-        * Update the l7rule.
-        * Delete the l7rule.
-        """
-
-        # L7Rule create
-        l7rule_kwargs = {
-            const.ADMIN_STATE_UP: False,
-            const.L7POLICY_ID: self.l7policy_id,
-            const.TYPE: const.HEADER,
-            const.VALUE: 'myvalue-create',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.KEY: 'mykey-create',
-            const.INVERT: False,
-        }
-
-        l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule[const.ID], l7policy_id=self.l7policy_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-
-        parser.parse(l7rule[const.CREATED_AT])
-        parser.parse(l7rule[const.UPDATED_AT])
-        UUID(l7rule[const.ID])
-        # Operating status will be OFFLINE while admin_state_up = False
-        self.assertEqual(const.OFFLINE, l7rule[const.OPERATING_STATUS])
-
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-
-        for item in equal_items:
-            self.assertEqual(l7rule_kwargs[item], l7rule[item])
-
-        # L7Rule update
-        l7rule_update_kwargs = {
-            const.L7POLICY_ID: self.l7policy_id,
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.COOKIE,
-            const.VALUE: 'myvalue-UPDATED',
-            const.COMPARE_TYPE: const.CONTAINS,
-            const.KEY: 'mykey-UPDATED',
-            const.INVERT: True,
-        }
-        l7rule = self.mem_l7rule_client.update_l7rule(
-            l7rule[const.ID], **l7rule_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        l7rule = waiters.wait_for_status(
-            self.mem_l7rule_client.show_l7rule,
-            l7rule[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            l7policy_id=self.l7policy_id)
-
-        # Operating status for a l7rule will be ONLINE if it is enabled:
-        self.assertEqual(const.ONLINE, l7rule[const.OPERATING_STATUS])
-
-        # Test changed items (which is all of them, for l7rules)
-        equal_items = [const.ADMIN_STATE_UP, const.TYPE, const.VALUE,
-                       const.COMPARE_TYPE, const.KEY, const.INVERT]
-        for item in equal_items:
-            self.assertEqual(l7rule_update_kwargs[item], l7rule[item])
-
-        # L7Rule delete
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-        self.mem_l7rule_client.delete_l7rule(l7rule[const.ID],
-                                             l7policy_id=self.l7policy_id)
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_l7rule_client.show_l7rule, l7rule[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout,
-            l7policy_id=self.l7policy_id)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
deleted file mode 100644
index 685c200..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from oslo_utils import strutils
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class ListenerScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(ListenerScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_listener")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        pool1_name = data_utils.rand_name("lb_member_pool1_listener")
-        pool1_kwargs = {
-            const.NAME: pool1_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
-        cls.pool1_id = pool1[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool1_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool2_name = data_utils.rand_name("lb_member_pool2_listener")
-        pool2_kwargs = {
-            const.NAME: pool2_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
-        cls.pool2_id = pool2[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool2_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('4a874014-b7d1-49a4-ac9a-2400b3434700')
-    def test_listener_CRUD(self):
-        """Tests listener create, read, update, delete
-
-        * Create a fully populated listener.
-        * Show listener details.
-        * Update the listener.
-        * Delete the listener.
-        """
-
-        # Listener create
-        listener_name = data_utils.rand_name("lb_member_listener1-CRUD")
-        listener_description = data_utils.arbitrary_string(size=255)
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.DESCRIPTION: listener_description,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: 80,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
-            const.DEFAULT_POOL_ID: self.pool1_id,
-            # TODO(rm_work): need to finish the rest of this stuff
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 1000,
-                const.TIMEOUT_MEMBER_CONNECT: 1000,
-                const.TIMEOUT_MEMBER_DATA: 1000,
-                const.TIMEOUT_TCP_INSPECT: 50,
-            })
-
-        listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(listener_name, listener[const.NAME])
-        self.assertEqual(listener_description, listener[const.DESCRIPTION])
-        self.assertFalse(listener[const.ADMIN_STATE_UP])
-        parser.parse(listener[const.CREATED_AT])
-        parser.parse(listener[const.UPDATED_AT])
-        UUID(listener[const.ID])
-        # Operating status will be OFFLINE while admin_state_up = False
-        self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, listener[const.PROTOCOL])
-        self.assertEqual(80, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
-
-        # Listener update
-        new_name = data_utils.rand_name("lb_member_listener1-update")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        listener_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.CONNECTION_LIMIT: 400,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "false",
-                const.X_FORWARDED_PORT: "false"
-            },
-            const.DEFAULT_POOL_ID: self.pool2_id,
-            # TODO(rm_work): need to finish the rest of this stuff
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
-        }
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            listener_update_kwargs.update({
-                const.TIMEOUT_CLIENT_DATA: 2000,
-                const.TIMEOUT_MEMBER_CONNECT: 2000,
-                const.TIMEOUT_MEMBER_DATA: 2000,
-                const.TIMEOUT_TCP_INSPECT: 100,
-            })
-
-        listener = self.mem_listener_client.update_listener(
-            listener[const.ID], **listener_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        listener = waiters.wait_for_status(
-            self.mem_listener_client.show_listener,
-            listener[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        if not CONF.load_balancer.test_with_noop:
-            listener = waiters.wait_for_status(
-                self.mem_listener_client.show_listener,
-                listener[const.ID], const.OPERATING_STATUS,
-                const.ONLINE,
-                CONF.load_balancer.build_interval,
-                CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, listener[const.NAME])
-        self.assertEqual(new_description, listener[const.DESCRIPTION])
-        self.assertTrue(listener[const.ADMIN_STATE_UP])
-        # Operating status is a measured status, so no-op will not go online
-        if CONF.load_balancer.test_with_noop:
-            self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
-        else:
-            self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, listener[const.PROTOCOL])
-        self.assertEqual(80, listener[const.PROTOCOL_PORT])
-        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(self.pool2_id, listener[const.DEFAULT_POOL_ID])
-        if self.mem_listener_client.is_version_supported(
-                self.api_version, '2.1'):
-            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
-            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
-            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
-            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
-
-        # Listener delete
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-        self.mem_listener_client.delete_listener(listener[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_listener_client.show_listener, listener[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
deleted file mode 100644
index b30d651..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class MemberScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(MemberScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_member")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_member")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_member")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LISTENER_ID: cls.listener_id,
-        }
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('15c8c3e3-569c-4029-95df-a9f72049e267')
-    def test_member_CRUD(self):
-        """Tests member create, read, update, delete
-
-        * Create a fully populated member.
-        * Show member details.
-        * Update the member.
-        * Delete the member.
-        """
-        # Member create
-        member_name = data_utils.rand_name("lb_member_member1-CRUD")
-        member_kwargs = {
-            const.NAME: member_name,
-            const.ADMIN_STATE_UP: True,
-            const.POOL_ID: self.pool_id,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 80,
-            const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_kwargs.update({
-                const.BACKUP: False,
-            })
-
-        if self.lb_member_vip_subnet:
-            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
-                const.ID]
-
-        member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        parser.parse(member[const.CREATED_AT])
-        parser.parse(member[const.UPDATED_AT])
-        UUID(member[const.ID])
-
-        # Members may be in a transitional state initially
-        # like DOWN or MAINT, give it some time to stablize on
-        # NO_MONITOR. This is LIVE status.
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.OPERATING_STATUS,
-            const.NO_MONITOR,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout,
-            pool_id=self.pool_id)
-
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-        # Member update
-        new_name = data_utils.rand_name("lb_member_member1-update")
-        member_update_kwargs = {
-            const.POOL_ID: member_kwargs[const.POOL_ID],
-            const.NAME: new_name,
-            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
-            const.WEIGHT: member[const.WEIGHT] + 1,
-            const.MONITOR_ADDRESS: '192.0.2.3',
-            const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
-        }
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            member_update_kwargs.update({
-                const.BACKUP: not member[const.BACKUP],
-            })
-
-        member = self.mem_member_client.update_member(
-            member[const.ID], **member_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        member = waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        # Test changed items
-        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
-                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
-        if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
-            equal_items.append(const.BACKUP)
-
-        for item in equal_items:
-            self.assertEqual(member_update_kwargs[item], member[item])
-
-        # Test unchanged items
-        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
-        if const.SUBNET_ID in member_kwargs:
-            equal_items.append(const.SUBNET_ID)
-        else:
-            self.assertIsNone(member.get(const.SUBNET_ID))
-
-        for item in equal_items:
-            self.assertEqual(member_kwargs[item], member[item])
-
-        # Member delete
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-        self.mem_member_client.delete_member(member[const.ID],
-                                             pool_id=self.pool_id)
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_member_client.show_member, member[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout,
-            pool_id=self.pool_id)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
deleted file mode 100644
index 1ddc8b2..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from uuid import UUID
-
-from dateutil import parser
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-
-
-class PoolScenarioTest(test_base.LoadBalancerBaseTest):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(PoolScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_pool")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        cls._setup_lb_network_kwargs(lb_kwargs)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_pool")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @decorators.idempotent_id('dfa120bf-81b9-4f22-bb5e-7df660c18173')
-    def test_pool_standalone_CRUD(self):
-        self._test_pool_CRUD(has_listener=False)
-
-    @decorators.idempotent_id('087da8ab-79c7-48ba-871c-5769185cea3e')
-    def test_pool_with_listener_CRUD(self):
-        self._test_pool_CRUD(has_listener=True)
-
-    def _test_pool_CRUD(self, has_listener):
-        """Tests pool create, read, update, delete
-
-        * Create a fully populated pool.
-        * Show pool details.
-        * Update the pool.
-        * Delete the pool.
-        """
-        # Pool create
-        pool_name = data_utils.rand_name("lb_member_pool1-CRUD")
-        pool_description = data_utils.arbitrary_string(size=255)
-        pool_sp_cookie_name = 'my_cookie'
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.DESCRIPTION: pool_description,
-            const.ADMIN_STATE_UP: False,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
-                const.COOKIE_NAME: pool_sp_cookie_name,
-            },
-        }
-        if has_listener:
-            pool_kwargs[const.LISTENER_ID] = self.listener_id
-        else:
-            pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
-
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(pool_name, pool[const.NAME])
-        self.assertEqual(pool_description, pool[const.DESCRIPTION])
-        self.assertFalse(pool[const.ADMIN_STATE_UP])
-        parser.parse(pool[const.CREATED_AT])
-        parser.parse(pool[const.UPDATED_AT])
-        UUID(pool[const.ID])
-        self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
-        self.assertEqual(const.HTTP, pool[const.PROTOCOL])
-        self.assertEqual(1, len(pool[const.LOADBALANCERS]))
-        self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        if has_listener:
-            self.assertEqual(1, len(pool[const.LISTENERS]))
-            self.assertEqual(self.listener_id,
-                             pool[const.LISTENERS][0][const.ID])
-        else:
-            self.assertEmpty(pool[const.LISTENERS])
-        self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertEqual(pool_sp_cookie_name,
-                         pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
-
-        # Pool update
-        new_name = data_utils.rand_name("lb_member_pool1-update")
-        new_description = data_utils.arbitrary_string(size=255,
-                                                      base_text='new')
-        pool_update_kwargs = {
-            const.NAME: new_name,
-            const.DESCRIPTION: new_description,
-            const.ADMIN_STATE_UP: True,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_LEAST_CONNECTIONS,
-            const.SESSION_PERSISTENCE: {
-                const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE,
-            },
-        }
-        pool = self.mem_pool_client.update_pool(
-            pool[const.ID], **pool_update_kwargs)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        pool = waiters.wait_for_status(
-            self.mem_pool_client.show_pool,
-            pool[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        self.assertEqual(new_name, pool[const.NAME])
-        self.assertEqual(new_description, pool[const.DESCRIPTION])
-        self.assertTrue(pool[const.ADMIN_STATE_UP])
-        self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
-                         pool[const.LB_ALGORITHM])
-        self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
-        self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
-                         pool[const.SESSION_PERSISTENCE][const.TYPE])
-        self.assertIsNone(
-            pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
-
-        # Pool delete
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer,
-            self.lb_id, const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-        self.mem_pool_client.delete_pool(pool[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_pool_client.show_pool, pool[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
deleted file mode 100644
index 627c261..0000000
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ /dev/null
@@ -1,622 +0,0 @@
-# Copyright 2018 GoDaddy
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-from oslo_log import log as logging
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-from octavia_tempest_plugin.common import constants as const
-from octavia_tempest_plugin.tests import test_base
-from octavia_tempest_plugin.tests import waiters
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-@testtools.skipUnless(
-    CONF.validation.run_validation,
-    'Traffic tests will not work without run_validation enabled.')
-class TrafficOperationsScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
-
-    @classmethod
-    def resource_setup(cls):
-        """Setup resources needed by the tests."""
-        super(TrafficOperationsScenarioTest, cls).resource_setup()
-
-        lb_name = data_utils.rand_name("lb_member_lb1_operations")
-        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
-                     const.NAME: lb_name}
-
-        # TODO(rm_work): Make this work with ipv6 and split this test for both
-        ip_version = 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
-
-        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
-        cls.lb_id = lb[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
-
-        if CONF.validation.connect_method == 'floating':
-            port_id = lb[const.VIP_PORT_ID]
-            result = cls.lb_mem_float_ip_client.create_floatingip(
-                floating_network_id=CONF.network.public_network_id,
-                port_id=port_id)
-            floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
-            cls.addClassResourceCleanup(
-                waiters.wait_for_not_found,
-                cls.lb_mem_float_ip_client.delete_floatingip,
-                cls.lb_mem_float_ip_client.show_floatingip,
-                floatingip_id=floating_ip['id'])
-            cls.lb_vip_address = floating_ip['floating_ip_address']
-        else:
-            cls.lb_vip_address = lb[const.VIP_ADDRESS]
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.lb_build_interval,
-                                CONF.load_balancer.lb_build_timeout)
-
-        listener_name = data_utils.rand_name("lb_member_listener1_operations")
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
-            const.LOADBALANCER_ID: cls.lb_id,
-        }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        pool_name = data_utils.rand_name("lb_member_pool1_operations")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LISTENER_ID: cls.listener_id,
-        }
-        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
-        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
-                                cls.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-    @testtools.skipIf(CONF.load_balancer.test_with_noop,
-                      'Traffic tests will not work in noop mode.')
-    @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
-    def test_basic_traffic(self):
-        """Tests sending traffic through a loadbalancer
-
-        * Set up members on a loadbalancer.
-        * Test traffic to ensure it is balanced properly.
-        """
-        # Set up Member 1 for Webserver 1
-        member1_name = data_utils.rand_name("lb_member_member1-traffic")
-        member1_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member1_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver1_ip,
-            const.PROTOCOL_PORT: 80,
-        }
-        if self.lb_member_1_subnet:
-            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
-
-        member1 = self.mem_member_client.create_member(
-            **member1_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Set up Member 2 for Webserver 2
-        member2_name = data_utils.rand_name("lb_member_member2-traffic")
-        member2_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member2_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver2_ip,
-            const.PROTOCOL_PORT: 80,
-        }
-        if self.lb_member_2_subnet:
-            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
-
-        member2 = self.mem_member_client.create_member(
-            **member2_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Send some traffic
-        self.check_members_balanced(self.lb_vip_address)
-
-    @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
-    def test_healthmonitor_traffic(self):
-        """Tests traffic is correctly routed based on healthmonitor status
-
-        * Create three members:
-          * One should be working, and ONLINE with a healthmonitor (passing)
-          * One should be working, and ERROR with a healthmonitor (failing)
-          * One should be disabled, and OFFLINE with a healthmonitor
-        * Verify members are in their correct respective operating statuses.
-        * Verify that traffic is balanced evenly between the working members.
-        * Create a fully populated healthmonitor.
-        * Verify members are in their correct respective operating statuses.
-        * Verify that traffic is balanced *unevenly*.
-        * Delete the healthmonitor.
-        * Verify members are in their correct respective operating statuses.
-        * Verify that traffic is balanced evenly between the working members.
-        """
-        member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
-        member1_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member1_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver1_ip,
-            const.PROTOCOL_PORT: 80,
-        }
-        if self.lb_member_1_subnet:
-            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
-
-        member1 = self.mem_member_client.create_member(
-            **member1_kwargs)
-        member1_id = member1[const.ID]
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member1_id, pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Set up Member 2 for Webserver 2
-        member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
-        member2_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member2_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver2_ip,
-            const.PROTOCOL_PORT: 80,
-            const.MONITOR_PORT: 9999,  # We want this to go offline with a HM
-        }
-        if self.lb_member_2_subnet:
-            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
-
-        member2 = self.mem_member_client.create_member(
-            **member2_kwargs)
-        member2_id = member2[const.ID]
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member2_id, pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Set up Member 3 as a non-existent disabled node
-        member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
-        member3_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member3_name,
-            const.ADMIN_STATE_UP: False,
-            const.ADDRESS: '192.0.2.1',
-            const.PROTOCOL_PORT: 80,
-        }
-
-        member3 = self.mem_member_client.create_member(
-            **member3_kwargs)
-        member3_id = member3[const.ID]
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member3_id, pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Wait for members to adjust to the correct OPERATING_STATUS
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member1_id, const.OPERATING_STATUS,
-            const.NO_MONITOR,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member2_id, const.OPERATING_STATUS,
-            const.NO_MONITOR,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member3_id, const.OPERATING_STATUS,
-            const.OFFLINE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        # Send some traffic and verify it is balanced
-        self.check_members_balanced(self.lb_vip_address,
-                                    traffic_member_count=2)
-
-        # Create the healthmonitor
-        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
-        hm_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 2,
-            const.MAX_RETRIES: 2,
-            const.MAX_RETRIES_DOWN: 2,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200',
-            const.ADMIN_STATE_UP: True,
-        }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Wait for members to adjust to the correct OPERATING_STATUS
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member1_id, const.OPERATING_STATUS,
-            const.ONLINE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member2_id, const.OPERATING_STATUS,
-            const.ERROR,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member3_id, const.OPERATING_STATUS,
-            const.OFFLINE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        # Send some traffic and verify it is *unbalanced*, as expected
-        self.check_members_balanced(self.lb_vip_address,
-                                    traffic_member_count=1)
-
-        # Delete the healthmonitor
-        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Wait for members to adjust to the correct OPERATING_STATUS
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member1_id, const.OPERATING_STATUS,
-            const.NO_MONITOR,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member2_id, const.OPERATING_STATUS,
-            const.NO_MONITOR,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-        waiters.wait_for_status(
-            self.mem_member_client.show_member,
-            member3_id, const.OPERATING_STATUS,
-            const.OFFLINE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
-
-        # Send some traffic and verify it is balanced again
-        self.check_members_balanced(self.lb_vip_address)
-
-    @decorators.idempotent_id('3558186d-6dcd-4d9d-b7f7-adc190b66149')
-    def test_l7policies_and_l7rules(self):
-        """Tests sending traffic through a loadbalancer with l7rules
-
-        * Create an extra pool.
-        * Put one member on the default pool, and one on the second pool.
-        * Create a policy/rule to redirect to the second pool.
-        * Create a policy/rule to redirect to the identity URI.
-        * Create a policy/rule to reject connections.
-        * Test traffic to ensure it goes to the correct place.
-        """
-        # Create a second pool
-        pool_name = data_utils.rand_name("lb_member_pool2_l7redirect")
-        pool_kwargs = {
-            const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
-            const.LOADBALANCER_ID: self.lb_id,
-        }
-        pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        pool_id = pool[const.ID]
-        self.addCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
-                                self.lb_id, const.PROVISIONING_STATUS,
-                                const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
-
-        # Set up Member 1 for Webserver 1 on the default pool
-        member1_name = data_utils.rand_name("lb_member_member1-l7redirect")
-        member1_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: member1_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver1_ip,
-            const.PROTOCOL_PORT: 80,
-        }
-        if self.lb_member_1_subnet:
-            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
-
-        member1 = self.mem_member_client.create_member(
-            **member1_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Set up Member 2 for Webserver 2 on the alternate pool
-        member2_name = data_utils.rand_name("lb_member_member2-l7redirect")
-        member2_kwargs = {
-            const.POOL_ID: pool_id,
-            const.NAME: member2_name,
-            const.ADMIN_STATE_UP: True,
-            const.ADDRESS: self.webserver2_ip,
-            const.PROTOCOL_PORT: 80,
-        }
-        if self.lb_member_2_subnet:
-            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
-
-        member2 = self.mem_member_client.create_member(
-            **member2_kwargs)
-        self.addCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
-
-        # Create the l7policy to redirect to the alternate pool
-        l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect")
-        l7policy1_description = data_utils.arbitrary_string(size=255)
-        l7policy1_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy1_name,
-            const.DESCRIPTION: l7policy1_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_POOL,
-            const.REDIRECT_POOL_ID: pool_id,
-        }
-        l7policy1 = self.mem_l7policy_client.create_l7policy(
-            **l7policy1_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy1[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Redirect slow queries to the alternate pool
-        l7rule1_kwargs = {
-            const.L7POLICY_ID: l7policy1[const.ID],
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.PATH,
-            const.VALUE: '/slow',
-            const.COMPARE_TYPE: const.STARTS_WITH,
-            const.INVERT: False,
-        }
-
-        l7rule1 = self.mem_l7rule_client.create_l7rule(**l7rule1_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule1[const.ID], l7policy_id=l7rule1_kwargs[const.L7POLICY_ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Create the l7policy to redirect to the identity URI
-        l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect")
-        l7policy2_description = data_utils.arbitrary_string(size=255)
-        l7policy2_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy2_name,
-            const.DESCRIPTION: l7policy2_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REDIRECT_TO_URL,
-            const.REDIRECT_URL: CONF.identity.uri_v3,
-        }
-        l7policy2 = self.mem_l7policy_client.create_l7policy(
-            **l7policy2_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Redirect queries for 'turtles' to identity
-        l7rule2_kwargs = {
-            const.L7POLICY_ID: l7policy2[const.ID],
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.PATH,
-            const.VALUE: '/turtles',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.INVERT: False,
-        }
-
-        l7rule2 = self.mem_l7rule_client.create_l7rule(**l7rule2_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule2[const.ID], l7policy_id=l7rule2_kwargs[const.L7POLICY_ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Create the l7policy to reject requests
-        l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect")
-        l7policy3_description = data_utils.arbitrary_string(size=255)
-        l7policy3_kwargs = {
-            const.LISTENER_ID: self.listener_id,
-            const.NAME: l7policy3_name,
-            const.DESCRIPTION: l7policy3_description,
-            const.ADMIN_STATE_UP: True,
-            const.POSITION: 1,
-            const.ACTION: const.REJECT,
-        }
-        l7policy3 = self.mem_l7policy_client.create_l7policy(
-            **l7policy3_kwargs)
-        self.addCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy3[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Reject requests that include the header data 'reject=true'
-        l7rule3_kwargs = {
-            const.L7POLICY_ID: l7policy3[const.ID],
-            const.ADMIN_STATE_UP: True,
-            const.TYPE: const.HEADER,
-            const.KEY: 'reject',
-            const.VALUE: 'true',
-            const.COMPARE_TYPE: const.EQUAL_TO,
-            const.INVERT: False,
-        }
-
-        l7rule3 = self.mem_l7rule_client.create_l7rule(**l7rule3_kwargs)
-        self.addCleanup(
-            self.mem_l7rule_client.cleanup_l7rule,
-            l7rule3[const.ID], l7policy_id=l7rule3_kwargs[const.L7POLICY_ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-
-        # Assert that normal traffic goes to pool1->member1
-        url_for_member1 = 'http://{}/'.format(self.lb_vip_address)
-        self.assertConsistentResponse((200, self.webserver1_response),
-                                      url_for_member1)
-
-        # Assert that slow traffic goes to pool2->member2
-        url_for_member2 = 'http://{}/slow?delay=1s'.format(self.lb_vip_address)
-        self.assertConsistentResponse((200, self.webserver2_response),
-                                      url_for_member2)
-
-        # Assert that /turtles is redirected to identity
-        url_for_identity = 'http://{}/turtles'.format(self.lb_vip_address)
-        self.assertConsistentResponse((302, CONF.identity.uri_v3),
-                                      url_for_identity,
-                                      redirect=True)
-
-        # Assert that traffic with header 'reject=true' is rejected
-        self.assertConsistentResponse((403, None),
-                                      url_for_member1,
-                                      headers={'reject': 'true'})