[TF] Add support of tungstenfabric driver for octavia

Were skipped unsupported tests, avoided unsupported parameters
for loadbalancers, listeners, pools and members. Were met contrail
restrictions.

Related-PROD: PRODX-7072
Change-Id: I736f55fa4186464424e1ebc05c650f9a43375c62
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
index 5655b3f..b4df6c9 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -171,6 +171,8 @@
             CONF.load_balancer.check_interval,
             CONF.load_balancer.check_timeout)
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Not supported by TungstenFabric")
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Active/Standby tests will not work in noop mode.')
     @decorators.idempotent_id('e591fa7a-0eee-485a-8ca0-5cf1a556bdf0')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
index 023d5f5..c40c0bd 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
@@ -32,6 +32,12 @@
     """Test the availability zone object API."""
 
     @classmethod
+    def skip_checks(cls):
+        super(AvailabilityZoneAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
+    @classmethod
     def resource_setup(cls):
         """Setup resources needed by the tests."""
         super(AvailabilityZoneAPITest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
index 4a12057..582d6b8 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
@@ -26,6 +26,12 @@
 class AvailabilityZoneCapabilitiesAPITest(test_base.LoadBalancerBaseTest):
     """Test the provider availability zone capabilities API."""
 
+    @classmethod
+    def skip_checks(cls):
+        super(AvailabilityZoneCapabilitiesAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
     @decorators.idempotent_id('cb3e4c59-4114-420b-9837-2666d4d5fef4')
     def test_availability_zone_capabilities_list(self):
         """Tests provider availability zone capabilities list API/filtering.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
index 86ae066..414f5d2 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
@@ -32,6 +32,12 @@
 class AvailabilityZoneProfileAPITest(test_base.LoadBalancerBaseTest):
     """Test the availability zone profile object API."""
 
+    @classmethod
+    def skip_checks(cls):
+        super(AvailabilityZoneProfileAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
     @decorators.idempotent_id('e512b580-ef32-44c3-bbd2-efdc27ba2ea6')
     def test_availability_zone_profile_create(self):
         """Tests availability zone profile create and basic show APIs.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor.py b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
index 4dd896c..6706d21 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
@@ -32,6 +32,12 @@
     """Test the flavor object API."""
 
     @classmethod
+    def skip_checks(cls):
+        super(FlavorAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
+    @classmethod
     def resource_setup(cls):
         """Setup resources needed by the tests."""
         super(FlavorAPITest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
index 7f9da51..3b6b67e 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
@@ -25,6 +25,12 @@
 class FlavorCapabilitiesAPITest(test_base.LoadBalancerBaseTest):
     """Test the provider flavor capabilities API."""
 
+    @classmethod
+    def skip_checks(cls):
+        super(FlavorCapabilitiesAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
     @decorators.idempotent_id('df837ee3-ca4b-4a4d-a7a3-27fa57cf3a33')
     def test_flavor_capabilities_list(self):
         """Tests provider flavor capabilities list API and field filtering.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
index c936922..38b2a29 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
@@ -31,6 +31,12 @@
 class FlavorProfileAPITest(test_base.LoadBalancerBaseTest):
     """Test the flavor profile object API."""
 
+    @classmethod
+    def skip_checks(cls):
+        super(FlavorProfileAPITest, cls).skip_checks()
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            raise cls.skipException('Not supported by TungstenFabric.')
+
     @decorators.idempotent_id('d0e3a08e-d58a-4460-83ed-34307ca04cde')
     @decorators.attr(type='smoke')
     def test_flavor_profile_create(self):
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index 1feb9d9..740b7d8 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -62,6 +62,11 @@
                                 CONF.load_balancer.lb_build_interval,
                                 CONF.load_balancer.lb_build_timeout)
 
+    def _create_tf_listener(self, name, port):
+        listener = super(HealthMonitorAPITest, self)._create_tf_listener(
+            name, const.HTTP, port, self.lb_id)
+        return listener
+
     @decorators.idempotent_id('30288670-5772-40c2-92e6-6d4a6d62d029')
     def test_healthmonitor_create(self):
         """Tests healthmonitor create and basic show APIs.
@@ -78,8 +83,18 @@
             const.NAME: pool_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-create")
+            listener = self._create_tf_listener(listener_name, 80)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         self.addCleanup(
@@ -181,8 +196,18 @@
             const.NAME: pool1_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-list")
+            listener1 = self._create_tf_listener(listener_name, 81)
+            listener1_id = listener1[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener1_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool1_kwargs.update({const.LISTENER_ID: listener1_id})
+        else:
+            pool1_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool1 = self.mem_pool_client.create_pool(**pool1_kwargs)
         pool1_id = pool1[const.ID]
@@ -203,6 +228,17 @@
             const.LB_ALGORITHM: self.lb_algorithm,
             const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener2-list")
+            listener2 = self._create_tf_listener(listener_name, 82)
+            listener2_id = listener2[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener2_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool2_kwargs.update({const.LISTENER_ID: listener2_id})
+        else:
+            pool2_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool2 = self.mem_pool_client.create_pool(**pool2_kwargs)
         pool2_id = pool2[const.ID]
@@ -221,8 +257,18 @@
             const.NAME: pool3_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-list")
+            listener3 = self._create_tf_listener(listener_name, 83)
+            listener3_id = listener3[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener3_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool3_kwargs.update({const.LISTENER_ID: listener3_id})
+        else:
+            pool3_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool3 = self.mem_pool_client.create_pool(**pool3_kwargs)
         pool3_id = pool3[const.ID]
@@ -470,8 +516,18 @@
             const.NAME: pool_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-create")
+            listener = self._create_tf_listener(listener_name, 84)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         self.addCleanup(
@@ -578,8 +634,18 @@
             const.NAME: pool_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-create")
+            listener = self._create_tf_listener(listener_name, 85)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         self.addCleanup(
@@ -735,8 +801,18 @@
             const.NAME: pool_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-create")
+            listener = self._create_tf_listener(listener_name, 86)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
 
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 3a45656..6bd1a7e 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -80,10 +80,6 @@
             const.PROTOCOL_PORT: 80,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
             # Don't test with a default pool -- we'll do that in the scenario,
             # but this will allow us to test that the field isn't mandatory,
             # as well as not conflate pool failures with listener test failures
@@ -93,6 +89,13 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "true",
+                    const.X_FORWARDED_PORT: "true"
+                }
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -149,11 +152,14 @@
         self.assertEqual(self.protocol, listener[const.PROTOCOL])
         self.assertEqual(80, listener[const.PROTOCOL_PORT])
         self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            insert_headers = listener[const.INSERT_HEADERS]
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_FOR]))
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_PORT]))
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
@@ -311,9 +317,16 @@
         * List the listeners filtered, one field, and sorted.
         """
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.NAME: lb_name,
+                     const.PROVIDER: CONF.load_balancer.provider}
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         lb_id = lb[const.ID]
         self.addCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
@@ -562,15 +575,18 @@
             const.PROTOCOL_PORT: 81,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "true",
+                    const.X_FORWARDED_PORT: "true"
+                }
+            })
 
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
@@ -620,11 +636,14 @@
         self.assertEqual(self.protocol, listener[const.PROTOCOL])
         self.assertEqual(81, listener[const.PROTOCOL_PORT])
         self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            insert_headers = listener[const.INSERT_HEADERS]
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_FOR]))
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_PORT]))
 
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
@@ -684,15 +703,18 @@
             const.PROTOCOL_PORT: 82,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "true",
+                    const.X_FORWARDED_PORT: "true"
+                }
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -731,11 +753,14 @@
         self.assertEqual(self.protocol, listener[const.PROTOCOL])
         self.assertEqual(82, listener[const.PROTOCOL_PORT])
         self.assertEqual(200, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            insert_headers = listener[const.INSERT_HEADERS]
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_FOR]))
+            self.assertTrue(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_PORT]))
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
@@ -781,15 +806,18 @@
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
             const.CONNECTION_LIMIT: 400,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "false",
-                const.X_FORWARDED_PORT: "false"
-            },
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_update_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "false",
+                    const.X_FORWARDED_PORT: "false"
+                }
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_update_kwargs.update({
@@ -830,11 +858,14 @@
         else:
             self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
         self.assertEqual(400, listener[const.CONNECTION_LIMIT])
-        insert_headers = listener[const.INSERT_HEADERS]
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertFalse(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            insert_headers = listener[const.INSERT_HEADERS]
+            self.assertFalse(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_FOR]))
+            self.assertFalse(
+                strutils.bool_from_string(
+                    insert_headers[const.X_FORWARDED_PORT]))
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 66b24dd..a5a99b0 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -150,9 +150,15 @@
         * Validates the load balancer is in the DELETED state.
         """
         lb_name = data_utils.rand_name("lb_member_lb1-delete")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.NAME: lb_name,
+                     const.PROVIDER: CONF.load_balancer.provider}
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -187,6 +193,8 @@
             CONF.load_balancer.lb_build_interval,
             CONF.load_balancer.lb_build_timeout)
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Tungstenfabric driver doesn't support cascade deletion")
     @decorators.idempotent_id('643ef031-c800-45f2-b229-3c8f8b37c829')
     def test_load_balancer_delete_cascade(self):
         """Tests load balancer create and cascade delete APIs.
@@ -243,6 +251,11 @@
     def _filter_lbs_by_index(self, lbs, indexes):
         return [lb for i, lb in enumerate(lbs) if i not in indexes]
 
+    def _setup_lb_kwargs(self, lb_kwargs):
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            lb_kwargs.pop(const.VIP_NETWORK_ID)
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+
     @decorators.idempotent_id('6546ef3c-c0e2-46af-b892-f795f4d01119')
     def test_load_balancer_list(self):
         """Tests load balancer list API and field filtering.
@@ -264,17 +277,16 @@
 
         lb_name = data_utils.rand_name("lb_member_lb2-list")
         lb_description = data_utils.rand_name('B')
+        lb_kwargs2 = {
+            const.ADMIN_STATE_UP: True,
+            const.DESCRIPTION: lb_description,
+            const.NAME: lb_name,
+            const.PROVIDER: CONF.load_balancer.provider,
+            const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+        }
+        self._setup_lb_kwargs(lb_kwargs2)
 
-        lb = self.mem_lb_client.create_loadbalancer(
-            admin_state_up=True,
-            description=lb_description,
-            # TODO(johnsom) Fix test to use a real flavor
-            # flavor=lb_flavor,
-            provider=CONF.load_balancer.provider,
-            name=lb_name,
-            # TODO(johnsom) Add QoS
-            # vip_qos_policy_id=lb_qos_policy_id)
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs2)
         self.addCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -299,13 +311,16 @@
 
         lb_name = data_utils.rand_name("lb_member_lb1-list")
         lb_description = data_utils.rand_name('A')
+        lb_kwargs1 = {
+            const.ADMIN_STATE_UP: True,
+            const.DESCRIPTION: lb_description,
+            const.NAME: lb_name,
+            const.PROVIDER: CONF.load_balancer.provider,
+            const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+        }
+        self._setup_lb_kwargs(lb_kwargs1)
 
-        lb = self.mem_lb_client.create_loadbalancer(
-            admin_state_up=True,
-            description=lb_description,
-            provider=CONF.load_balancer.provider,
-            name=lb_name,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs1)
         self.addCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -330,13 +345,16 @@
 
         lb_name = data_utils.rand_name("lb_member_lb3-list")
         lb_description = data_utils.rand_name('C')
+        lb_kwargs3 = {
+            const.ADMIN_STATE_UP: False,
+            const.DESCRIPTION: lb_description,
+            const.NAME: lb_name,
+            const.PROVIDER: CONF.load_balancer.provider,
+            const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+        }
 
-        lb = self.mem_lb_client.create_loadbalancer(
-            admin_state_up=False,
-            description=lb_description,
-            provider=CONF.load_balancer.provider,
-            name=lb_name,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        self._setup_lb_kwargs(lb_kwargs3)
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs3)
         self.addCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -668,9 +686,14 @@
         * Validate the show reflects the expected values.
         """
         lb_name = data_utils.rand_name("lb_member_lb1-show_stats")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {
+            const.NAME: lb_name,
+            const.PROVIDER: CONF.load_balancer.provider,
+            const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+        }
+        self._setup_lb_kwargs(lb_kwargs)
+
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -730,9 +753,14 @@
         * Validate the show reflects the expected values.
         """
         lb_name = data_utils.rand_name("lb_member_lb1-status")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {
+            const.NAME: lb_name,
+            const.PROVIDER: CONF.load_balancer.provider,
+            const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+        }
+        self._setup_lb_kwargs(lb_kwargs)
+
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -793,6 +821,9 @@
         except Exception:
             pass
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Tungstenfabric provider does not support failing over "
+                      "load balancers.")
     @decorators.idempotent_id('fc2e07a6-9776-4559-90c9-141170d4c397')
     def test_load_balancer_failover(self):
         """Tests load balancer failover API.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index a279495..ec3fc10 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -144,11 +144,15 @@
             const.ADDRESS: member_address,
             const.PROTOCOL_PORT: 80,
             const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: member_monitor_address,
-            const.MONITOR_PORT: 8080,
         }
+        provider = CONF.load_balancer.provider
+        if provider != 'tungstenfabric':
+            member_kwargs.update({
+                const.MONITOR_ADDRESS: member_monitor_address,
+                const.MONITOR_PORT: 8080,
+            })
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_kwargs.update({
                 const.BACKUP: False,
             })
@@ -194,10 +198,10 @@
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
                        const.PROTOCOL_PORT, const.WEIGHT]
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
-        if monitor:
+        if monitor and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
@@ -223,10 +227,25 @@
         * List the members filtered, one field, and sorted.
         """
         pool_name = data_utils.rand_name("lb_member_pool2_member-list")
-        pool = self.mem_pool_client.create_pool(
-            name=pool_name, loadbalancer_id=self.lb_id,
-            protocol=self.protocol,
-            lb_algorithm=self.lb_algorithm)
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_member_listener-list")
+            listener = self._create_tf_listener(
+                listener_name, self.protocol, 81, self.lb_id
+            )
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool = self.mem_pool_client.create_pool(
+                name=pool_name, listener_id=listener_id,
+                protocol=self.protocol,
+                lb_algorithm=self.lb_algorithm)
+        else:
+            pool = self.mem_pool_client.create_pool(
+                name=pool_name, loadbalancer_id=self.lb_id,
+                protocol=self.protocol,
+                lb_algorithm=self.lb_algorithm)
         pool_id = pool[const.ID]
         self.addCleanup(
             self.mem_pool_client.cleanup_pool, pool_id,
@@ -450,11 +469,15 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 81,
             const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
         }
+        provider = CONF.load_balancer.provider
+        if provider != 'tungstenfabric':
+            member_kwargs.update({
+                const.MONITOR_ADDRESS: '192.0.2.2',
+                const.MONITOR_PORT: 8080,
+            })
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_kwargs.update({
                 const.BACKUP: False,
             })
@@ -492,10 +515,10 @@
                        const.PROTOCOL_PORT, const.WEIGHT]
 
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
-        if monitor:
+        if monitor and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
@@ -554,11 +577,15 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 82,
             const.WEIGHT: 50,
-            const.MONITOR_ADDRESS: '192.0.2.2',
-            const.MONITOR_PORT: 8080,
         }
+        provider = CONF.load_balancer.provider
+        if provider != 'tungstenfabric':
+            member_kwargs.update({
+                const.MONITOR_ADDRESS: '192.0.2.2',
+                const.MONITOR_PORT: 8080,
+            })
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_kwargs.update({
                 const.BACKUP: False,
             })
@@ -606,10 +633,10 @@
                        const.PROTOCOL_PORT, const.WEIGHT]
 
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
-        if monitor:
+        if monitor and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
@@ -668,11 +695,11 @@
             const.WEIGHT: member[const.WEIGHT] + 1,
         }
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_update_kwargs.update({
                 const.BACKUP: not member[const.BACKUP]
             })
-        if monitor:
+        if monitor and provider != 'tungstenfabric':
             member_update_kwargs[const.MONITOR_ADDRESS] = '192.0.2.3'
             member_update_kwargs[const.MONITOR_PORT] = member[
                 const.MONITOR_PORT] + 1
@@ -706,10 +733,10 @@
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT]
 
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
-        if monitor:
+        if monitor and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         for item in equal_items:
             self.assertEqual(member_update_kwargs[item], member[item])
@@ -724,6 +751,9 @@
         for item in equal_items:
             self.assertEqual(member_kwargs[item], member[item])
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Tungstenfabric provider does not support batch "
+                      "updating members.")
     @decorators.idempotent_id('83e0a9f2-491f-46a8-b3ce-6969d70a4e9f')
     def test_member_batch_update(self):
         """Tests member batch update.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index fd3053d..e37e862 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -12,6 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
 import time
 from uuid import UUID
 
@@ -77,6 +78,9 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Tungstenfabric doesn't support pool creation without "
+                      "listener_id.")
     @decorators.idempotent_id('7587fe48-87ba-4538-9f03-190911f100ff')
     def test_pool_create_standalone(self):
         self._test_pool_create(has_listener=False)
@@ -196,9 +200,16 @@
         * List the pools filtered, one field, and sorted.
         """
         lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.NAME: lb_name,
+                     const.PROVIDER: CONF.load_balancer.provider}
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         lb_id = lb[const.ID]
         self.addCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
@@ -220,8 +231,25 @@
             const.ADMIN_STATE_UP: True,
             const.PROTOCOL: self.protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener1_name = data_utils.rand_name("lb_member_listener1-list")
+            listener_kwargs = {
+                const.NAME: listener1_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '81',
+                const.LOADBALANCER_ID: lb_id,
+            }
+            listener1 = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener1_id = listener1[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener1_id,
+                lb_client=self.mem_lb_client, lb_id=lb_id)
+
+            pool1_kwargs.update({const.LISTENER_ID: listener1_id})
+        else:
+            pool1_kwargs.update({const.LOADBALANCER_ID: lb_id})
         if self.lb_feature_enabled.session_persistence_enabled:
             pool1_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
@@ -260,6 +288,24 @@
             const.LB_ALGORITHM: self.lb_algorithm,
             const.LOADBALANCER_ID: lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener2_name = data_utils.rand_name("lb_member_listener2-list")
+            listener_kwargs = {
+                const.NAME: listener2_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '82',
+                const.LOADBALANCER_ID: lb_id,
+            }
+            listener2 = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener2_id = listener2[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener2_id,
+                lb_client=self.mem_lb_client, lb_id=lb_id)
+
+            pool2_kwargs.update({const.LISTENER_ID: listener2_id})
+        else:
+            pool2_kwargs.update({const.LOADBALANCER_ID: lb_id})
         if self.lb_feature_enabled.session_persistence_enabled:
             pool2_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
@@ -296,8 +342,25 @@
             const.PROTOCOL: self.protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
             # No session persistence, just so there's one test for that
-            const.LOADBALANCER_ID: lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener3_name = data_utils.rand_name("lb_member_listener3-list")
+            listener_kwargs = {
+                const.NAME: listener3_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '83',
+                const.LOADBALANCER_ID: lb_id,
+            }
+            listener3 = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener3_id = listener3[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener3_id,
+                lb_client=self.mem_lb_client, lb_id=lb_id)
+
+            pool3_kwargs.update({const.LISTENER_ID: listener3_id})
+        else:
+            pool3_kwargs.update({const.LOADBALANCER_ID: lb_id})
         pool3 = self.mem_pool_client.create_pool(
             **pool3_kwargs)
         self.addCleanup(
@@ -436,8 +499,26 @@
             const.ADMIN_STATE_UP: True,
             const.PROTOCOL: self.protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        provider = CONF.load_balancer.provider
+        if provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_member_listener-show")
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '84',
+                const.LOADBALANCER_ID: self.lb_id,
+            }
+            listener = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener_id = listener[const.ID]
+            self.addClassResourceCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
         if self.lb_feature_enabled.session_persistence_enabled:
             pool_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
@@ -473,7 +554,8 @@
         self.assertEqual(self.protocol, pool[const.PROTOCOL])
         self.assertEqual(1, len(pool[const.LOADBALANCERS]))
         self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        self.assertEmpty(pool[const.LISTENERS])
+        if provider != 'tungstenfabric':
+            self.assertEmpty(pool[const.LISTENERS])
         self.assertEqual(self.lb_algorithm,
                          pool[const.LB_ALGORITHM])
         if self.lb_feature_enabled.session_persistence_enabled:
@@ -533,8 +615,25 @@
             const.ADMIN_STATE_UP: False,
             const.PROTOCOL: self.protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_member_listener-update")
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '85',
+                const.LOADBALANCER_ID: self.lb_id,
+            }
+            listener = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
         if self.lb_feature_enabled.session_persistence_enabled:
             pool_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
@@ -569,7 +668,8 @@
         self.assertEqual(self.protocol, pool[const.PROTOCOL])
         self.assertEqual(1, len(pool[const.LOADBALANCERS]))
         self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
-        self.assertEmpty(pool[const.LISTENERS])
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            self.assertEmpty(pool[const.LISTENERS])
         self.assertEqual(self.lb_algorithm,
                          pool[const.LB_ALGORITHM])
         if self.lb_feature_enabled.session_persistence_enabled:
@@ -687,8 +787,25 @@
             const.NAME: pool_name,
             const.PROTOCOL: self.protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
-            const.LOADBALANCER_ID: self.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_member_listener-delete")
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.PROTOCOL: self.protocol,
+                const.PROTOCOL_PORT: '86',
+                const.LOADBALANCER_ID: self.lb_id,
+            }
+            listener = self.mem_listener_client.create_listener(
+                **listener_kwargs)
+            listener_id = listener[const.ID]
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener, listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: self.lb_id})
         if self.lb_feature_enabled.session_persistence_enabled:
             pool_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index fa39c01..e138cef 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -62,8 +62,26 @@
             const.NAME: pool_name,
             const.PROTOCOL: const.HTTP,
             const.LB_ALGORITHM: cls.lb_algorithm,
-            const.LOADBALANCER_ID: cls.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener_name = data_utils.rand_name("lb_hm_listener1-create")
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.PROTOCOL: const.HTTP,
+                const.PROTOCOL_PORT: '80',
+                const.LOADBALANCER_ID: cls.lb_id,
+            }
+            listener = cls.mem_listener_client.create_listener(
+                **listener_kwargs)
+            cls.listener_id = listener[const.ID]
+            cls.addClassResourceCleanup(
+                cls.mem_listener_client.cleanup_listener, cls.listener_id,
+                lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+            pool_kwargs.update({const.LISTENER_ID: cls.listener_id})
+        else:
+            pool_kwargs.update({const.LOADBALANCER_ID: cls.lb_id})
+
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
         cls.addClassResourceCleanup(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index bb3df64..6157d91 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -61,8 +61,19 @@
             const.NAME: pool1_name,
             const.PROTOCOL: cls.protocol,
             const.LB_ALGORITHM: cls.lb_algorithm,
-            const.LOADBALANCER_ID: cls.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener1_name = data_utils.rand_name("lb_member_listener1")
+            listener1 = cls._create_tf_listener(
+                listener1_name, cls.protocol, 81, cls.lb_id)
+            listener1_id = listener1[const.ID]
+            cls.addClassResourceCleanup(
+                cls.mem_listener_client.cleanup_listener, listener1_id,
+                lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+            pool1_kwargs.update({const.LISTENER_ID: listener1_id})
+        else:
+            pool1_kwargs.update({const.LOADBALANCER_ID: cls.lb_id})
         pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
         cls.pool1_id = pool1[const.ID]
         cls.addClassResourceCleanup(
@@ -81,8 +92,19 @@
             const.NAME: pool2_name,
             const.PROTOCOL: cls.protocol,
             const.LB_ALGORITHM: cls.lb_algorithm,
-            const.LOADBALANCER_ID: cls.lb_id,
         }
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            listener2_name = data_utils.rand_name("lb_member_listener2")
+            listener2 = cls._create_tf_listener(
+                listener2_name, cls.protocol, 82, cls.lb_id)
+            listener2_id = listener2[const.ID]
+            cls.addClassResourceCleanup(
+                cls.mem_listener_client.cleanup_listener, listener2_id,
+                lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+            pool2_kwargs.update({const.LISTENER_ID: listener2_id})
+        else:
+            pool2_kwargs.update({const.LOADBALANCER_ID: cls.lb_id})
         pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
         cls.pool2_id = pool2[const.ID]
         cls.addClassResourceCleanup(
@@ -117,15 +139,18 @@
             const.PROTOCOL_PORT: 80,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "true",
-                const.X_FORWARDED_PORT: "true"
-            },
-            const.DEFAULT_POOL_ID: self.pool1_id,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "true",
+                    const.X_FORWARDED_PORT: "true"
+                },
+                const.DEFAULT_POOL_ID: self.pool1_id
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -165,11 +190,12 @@
         self.assertEqual(80, listener[const.PROTOCOL_PORT])
         self.assertEqual(200, listener[const.CONNECTION_LIMIT])
         insert_headers = listener[const.INSERT_HEADERS]
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
-        self.assertTrue(
-            strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            self.assertTrue(strutils.bool_from_string(
+                insert_headers[const.X_FORWARDED_FOR]))
+            self.assertTrue(strutils.bool_from_string(
+                insert_headers[const.X_FORWARDED_PORT]))
+            self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
@@ -186,15 +212,18 @@
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
             const.CONNECTION_LIMIT: 400,
-            const.INSERT_HEADERS: {
-                const.X_FORWARDED_FOR: "false",
-                const.X_FORWARDED_PORT: "false"
-            },
             const.DEFAULT_POOL_ID: self.pool2_id,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if CONF.load_balancer.provider != 'tungstenfabric':
+            listener_update_kwargs.update({
+                const.INSERT_HEADERS: {
+                    const.X_FORWARDED_FOR: "false",
+                    const.X_FORWARDED_PORT: "false"
+                }
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_update_kwargs.update({
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 003b8b4..194b924 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -144,8 +144,9 @@
             const.MONITOR_ADDRESS: '192.0.2.2',
             const.MONITOR_PORT: 8080,
         }
+        provider = CONF.load_balancer.provider
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_kwargs.update({
                 const.BACKUP: False,
             })
@@ -154,7 +155,7 @@
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
         hm_enabled = CONF.loadbalancer_feature_enabled.health_monitor_enabled
-        if not hm_enabled:
+        if not hm_enabled or provider == 'tungstenfabric':
             del member_kwargs[const.MONITOR_ADDRESS]
             del member_kwargs[const.MONITOR_PORT]
         member = self.mem_member_client.create_member(**member_kwargs)
@@ -193,10 +194,10 @@
 
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
                        const.PROTOCOL_PORT, const.WEIGHT]
-        if hm_enabled:
+        if hm_enabled and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
         if const.SUBNET_ID in member_kwargs:
@@ -216,12 +217,12 @@
             const.WEIGHT: member[const.WEIGHT] + 1,
         }
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             member_update_kwargs.update({
                 const.BACKUP: not member[const.BACKUP],
             })
 
-        if hm_enabled:
+        if hm_enabled and provider != 'tungstenfabric':
             member_update_kwargs[const.MONITOR_ADDRESS] = '192.0.2.3'
             member_update_kwargs[const.MONITOR_PORT] = member[
                 const.MONITOR_PORT] + 1
@@ -243,10 +244,10 @@
 
         # Test changed items
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT]
-        if hm_enabled:
+        if hm_enabled and provider != 'tungstenfabric':
             equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
         if self.mem_member_client.is_version_supported(
-                self.api_version, '2.1'):
+                self.api_version, '2.1') and provider != 'tungstenfabric':
             equal_items.append(const.BACKUP)
 
         for item in equal_items:
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 5e0622c..91c2a07 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -12,6 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
 from uuid import UUID
 
 from dateutil import parser
@@ -75,6 +76,9 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
+    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                      "Tungstenfabric doesn't support pool creation without "
+                      "listener_id.")
     @decorators.idempotent_id('dfa120bf-81b9-4f22-bb5e-7df660c18173')
     def test_pool_standalone_CRUD(self):
         self._test_pool_CRUD(has_listener=False)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 741bb1c..0ffe498 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -507,6 +507,17 @@
             lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
             lb_kwargs[const.VIP_SUBNET_ID] = None
 
+    @classmethod
+    def _create_tf_listener(cls, name, proto, port, lb_id):
+        listener_kwargs = {
+            const.NAME: name,
+            const.PROTOCOL: proto,
+            const.PROTOCOL_PORT: port,
+            const.LOADBALANCER_ID: lb_id,
+        }
+        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+        return listener
+
 
 class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
     @classmethod