Merge "Add UDP test scenario"
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index e31c1a5..0424cc5 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -138,6 +138,7 @@
 HEALTH_MONITOR_HTTP = 'HTTP'
 HEALTH_MONITOR_HTTPS = 'HTTPS'
 HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO'
+HEALTH_MONITOR_UDP_CONNECT = 'UDP-CONNECT'
 
 # Session Persistence
 TYPE = 'type'
diff --git a/octavia_tempest_plugin/contrib/httpd/README.md b/octavia_tempest_plugin/contrib/httpd/README.md
deleted file mode 100644
index b34d48a..0000000
--- a/octavia_tempest_plugin/contrib/httpd/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-To build a statically linked binary for httpd on Ubuntu (can run anywhere):
-
-```sh
-sudo apt-get install -y golang
-go build -ldflags "-s -w -linkmode external -extldflags -static" -o httpd.bin httpd.go
-```
diff --git a/octavia_tempest_plugin/contrib/httpd/httpd.bin b/octavia_tempest_plugin/contrib/httpd/httpd.bin
deleted file mode 100755
index b55cc4f..0000000
--- a/octavia_tempest_plugin/contrib/httpd/httpd.bin
+++ /dev/null
Binary files differ
diff --git a/octavia_tempest_plugin/contrib/test_server/README.rst b/octavia_tempest_plugin/contrib/test_server/README.rst
new file mode 100644
index 0000000..da719b7
--- /dev/null
+++ b/octavia_tempest_plugin/contrib/test_server/README.rst
@@ -0,0 +1,26 @@
+====================
+Amphorae test server
+====================
+
+test_server is a static application that simulates an HTTP and a UDP server.
+
+
+Building
+--------
+
+To build a statically linked binary for test_server (can run anywhere):
+
+Install dependencies for Ubuntu/Debian:
+
+    sudo apt-get install -y golang
+
+Install dependencies for Centos (use golang 1.10 from go-toolset-7) and launch
+a shell into the new environment:
+
+    sudo yum install -y centos-release-scl
+    sudo yum install -y go-toolset-7-golang-bin glibc-static openssl-static zlib-static
+    scl enable go-toolset-7 bash
+
+Build the binary:
+
+    CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-s -w -extldflags -static' -o test_server.bin test_server.go
diff --git a/octavia_tempest_plugin/contrib/httpd/__init__.py b/octavia_tempest_plugin/contrib/test_server/__init__.py
similarity index 100%
rename from octavia_tempest_plugin/contrib/httpd/__init__.py
rename to octavia_tempest_plugin/contrib/test_server/__init__.py
diff --git a/octavia_tempest_plugin/contrib/test_server/test_server.bin b/octavia_tempest_plugin/contrib/test_server/test_server.bin
new file mode 100755
index 0000000..e3cc7ba
--- /dev/null
+++ b/octavia_tempest_plugin/contrib/test_server/test_server.bin
Binary files differ
diff --git a/octavia_tempest_plugin/contrib/httpd/httpd.go b/octavia_tempest_plugin/contrib/test_server/test_server.go
similarity index 74%
rename from octavia_tempest_plugin/contrib/httpd/httpd.go
rename to octavia_tempest_plugin/contrib/test_server/test_server.go
index 58d66f2..8139580 100644
--- a/octavia_tempest_plugin/contrib/httpd/httpd.go
+++ b/octavia_tempest_plugin/contrib/test_server/test_server.go
@@ -4,6 +4,7 @@
 	"flag"
 	"fmt"
 	"io"
+	"net"
 	"net/http"
 	"sync"
 	"time"
@@ -88,20 +89,51 @@
 	fmt.Fprintf(w, "reset\n")
 }
 
-func main() {
-	portPtr := flag.Int("port", 8080, "TCP port to listen on")
-	idPtr := flag.String("id", "1", "Server ID")
-
-	flag.Parse()
-
-	resp = fmt.Sprintf("%s", *idPtr)
+func http_serve(port int, id string) {
 	sess_cookie.Name = "JSESSIONID"
-	sess_cookie.Value = *idPtr
+	sess_cookie.Value = id
 
 	http.HandleFunc("/", root_handler)
 	http.HandleFunc("/slow", slow_handler)
 	http.HandleFunc("/stats", stats_handler)
 	http.HandleFunc("/reset", reset_handler)
-	portStr := fmt.Sprintf(":%d", *portPtr)
+	portStr := fmt.Sprintf(":%d", port)
 	http.ListenAndServe(portStr, nil)
 }
+
+func udp_serve(port int, id string) {
+	portStr := fmt.Sprintf("0.0.0.0:%d", port)
+
+	pc, err := net.ListenPacket("udp", portStr)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+
+	buffer := make([]byte, 1500)
+
+	for {
+		_, addr, err := pc.ReadFrom(buffer)
+		if err != nil {
+			fmt.Println(err)
+			return
+		}
+		_, err = pc.WriteTo([]byte(resp), addr)
+		if err != nil {
+			fmt.Println(err)
+			return
+		}
+	}
+}
+
+func main() {
+	portPtr := flag.Int("port", 8080, "Port to listen on")
+	idPtr := flag.String("id", "1", "Server ID")
+
+	flag.Parse()
+
+	resp = fmt.Sprintf("%s", *idPtr)
+
+	go http_serve(*portPtr, *idPtr)
+	udp_serve(*portPtr, *idPtr)
+}
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 1bf1322..a1dc6bb 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -63,6 +63,28 @@
 
         cls.lb_vip_address = lb[const.VIP_ADDRESS]
 
+        # Per protocol listeners and pools IDs
+        cls.listener_ids = {}
+        cls.pool_ids = {}
+
+        cls.protocol = const.HTTP
+        lb_feature_enabled = CONF.loadbalancer_feature_enabled
+        if not lb_feature_enabled.l7_protocol_enabled:
+            cls.protocol = lb_feature_enabled.l4_protocol
+
+        # Don't use same ports for HTTP/l4_protocol and UDP because some
+        # releases (<=train) don't support it
+        cls._listener_pool_create(const.HTTP, 80)
+
+        cls._listener_pool_create(const.UDP, 8080)
+
+    @classmethod
+    def _listener_pool_create(cls, protocol, protocol_port):
+        if (protocol == const.UDP and
+                not cls.mem_listener_client.is_version_supported(
+                    cls.api_version, '2.1')):
+            return
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -72,15 +94,19 @@
         listener_name = data_utils.rand_name("lb_member_listener1_ipv6_ops")
         listener_kwargs = {
             const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
-            const.PROTOCOL_PORT: '80',
+            const.PROTOCOL: protocol,
+            const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: cls.lb_id,
+            # For branches that don't support multiple listeners in single
+            # haproxy process and use haproxy>=1.8:
+            const.CONNECTION_LIMIT: 200,
         }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
+        listener = cls.mem_listener_client.create_listener(
+            **listener_kwargs)
+        cls.listener_ids[protocol] = listener[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
+            cls.listener_ids[protocol],
             lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@@ -92,15 +118,15 @@
         pool_name = data_utils.rand_name("lb_member_pool1_ipv6_ops")
         pool_kwargs = {
             const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL: protocol,
             const.LB_ALGORITHM: cls.lb_algorithm,
-            const.LISTENER_ID: cls.listener_id,
+            const.LISTENER_ID: cls.listener_ids[protocol],
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
+        cls.pool_ids[protocol] = pool[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
+            cls.pool_ids[protocol],
             lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@@ -109,8 +135,8 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
-    @decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746')
-    def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
+    def _test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self, protocol,
+                                                       protocol_port):
         """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
 
         * Set up members on a loadbalancer.
@@ -119,7 +145,7 @@
         # Set up Member 1 for Webserver 1
         member1_name = data_utils.rand_name("lb_member_member1-traffic")
         member1_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member1_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver1_ip,
@@ -132,7 +158,7 @@
             **member1_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
+            member1[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -143,7 +169,7 @@
         # Set up Member 2 for Webserver 2
         member2_name = data_utils.rand_name("lb_member_member2-traffic")
         member2_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member2_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver2_ipv6,
@@ -157,7 +183,7 @@
             **member2_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
+            member2[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -166,4 +192,101 @@
             CONF.load_balancer.check_timeout)
 
         # Send some traffic
-        self.check_members_balanced(self.lb_vip_address)
+        self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol)
+
+    @decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746')
+    def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
+        self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self.protocol, 80)
+
+    @decorators.idempotent_id('c468434d-bc84-4bfa-825f-d4761daa0d76')
+    # Skipping test for amphora driver until "UDP load balancers cannot mix
+    # protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
+    # fixed
+    @decorators.skip_because(
+        bug='2003329',
+        bug_type='storyboard',
+        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
+    def test_ipv6_vip_mixed_ipv4_ipv6_members_udp_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        self._test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080)
+
+    def _test_ipv6_vip_ipv6_members_traffic(self, protocol, protocol_port):
+        """Tests traffic through a loadbalancer with IPv6 members.
+
+        * Set up members on a loadbalancer.
+        * Test traffic to ensure it is balanced properly.
+        """
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-traffic")
+        member1_kwargs = {
+            const.POOL_ID: self.pool_ids[protocol],
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ipv6,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_ipv6_subnet:
+            member1_kwargs[const.SUBNET_ID] = (
+                self.lb_member_1_ipv6_subnet[const.ID])
+
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=self.pool_ids[protocol],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-traffic")
+        member2_kwargs = {
+            const.POOL_ID: self.pool_ids[protocol],
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ipv6,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_2_ipv6_subnet:
+            member2_kwargs[const.SUBNET_ID] = (
+                self.lb_member_2_ipv6_subnet[const.ID])
+
+        member2 = self.mem_member_client.create_member(
+            **member2_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=self.pool_ids[protocol],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol)
+
+    @decorators.idempotent_id('dd75f41a-5b29-47ad-963d-3434f1056ca3')
+    def test_ipv6_vip_ipv6_members_traffic(self):
+        self._test_ipv6_vip_ipv6_members_traffic(self.protocol, 80)
+
+    @decorators.idempotent_id('26317013-a9b5-4a00-a993-d4c55b764e40')
+    def test_ipv6_vip_ipv6_members_udp_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        self._test_ipv6_vip_ipv6_members_traffic(const.UDP, 8080)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 57cdf74..003b8b4 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -322,6 +322,7 @@
     # fixed
     @decorators.skip_because(
         bug='2003329',
+        bug_type='storyboard',
         condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_mixed_udp_member_create(self):
         """Test the member creation with mixed IP protocol members/VIP."""
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index dc43a95..0d49d67 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -73,23 +73,44 @@
         else:
             cls.lb_vip_address = lb[const.VIP_ADDRESS]
 
-        protocol = const.HTTP
+        # Per protocol listeners and pools IDs
+        cls.listener_ids = {}
+        cls.pool_ids = {}
+
+        cls.protocol = const.HTTP
         lb_feature_enabled = CONF.loadbalancer_feature_enabled
         if not lb_feature_enabled.l7_protocol_enabled:
-            protocol = lb_feature_enabled.l4_protocol
+            cls.protocol = lb_feature_enabled.l4_protocol
+
+        # Don't use same ports for HTTP/l4_protocol and UDP because some
+        # releases (<=train) don't support it
+        cls._listener_pool_create(cls.protocol, 80)
+
+        cls._listener_pool_create(const.UDP, 8080)
+
+    @classmethod
+    def _listener_pool_create(cls, protocol, protocol_port):
+        if (protocol == const.UDP and
+                not cls.mem_listener_client.is_version_supported(
+                    cls.api_version, '2.1')):
+            return
 
         listener_name = data_utils.rand_name("lb_member_listener1_operations")
         listener_kwargs = {
             const.NAME: listener_name,
             const.PROTOCOL: protocol,
-            const.PROTOCOL_PORT: '80',
+            const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: cls.lb_id,
+            # For branches that don't support multiple listeners in single
+            # haproxy process and use haproxy>=1.8:
+            const.CONNECTION_LIMIT: 200,
         }
-        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.listener_id = listener[const.ID]
+        listener = cls.mem_listener_client.create_listener(
+            **listener_kwargs)
+        cls.listener_ids[protocol] = listener[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
+            cls.listener_ids[protocol],
             lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@@ -103,13 +124,13 @@
             const.NAME: pool_name,
             const.PROTOCOL: protocol,
             const.LB_ALGORITHM: cls.lb_algorithm,
-            const.LISTENER_ID: cls.listener_id,
+            const.LISTENER_ID: cls.listener_ids[protocol],
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.pool_id = pool[const.ID]
+        cls.pool_ids[protocol] = pool[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
+            cls.pool_ids[protocol],
             lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
@@ -118,10 +139,7 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
-    @testtools.skipIf(CONF.load_balancer.test_with_noop,
-                      'Traffic tests will not work in noop mode.')
-    @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
-    def test_basic_traffic(self):
+    def _test_basic_traffic(self, protocol, protocol_port):
         """Tests sending traffic through a loadbalancer
 
         * Set up members on a loadbalancer.
@@ -130,7 +148,7 @@
         # Set up Member 1 for Webserver 1
         member1_name = data_utils.rand_name("lb_member_member1-traffic")
         member1_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member1_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver1_ip,
@@ -143,7 +161,7 @@
             **member1_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
+            member1[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -154,7 +172,7 @@
         # Set up Member 2 for Webserver 2
         member2_name = data_utils.rand_name("lb_member_member2-traffic")
         member2_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member2_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver2_ip,
@@ -167,7 +185,7 @@
             **member2_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
+            member2[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -176,13 +194,28 @@
             CONF.load_balancer.check_timeout)
 
         # Send some traffic
-        self.check_members_balanced(self.lb_vip_address)
+        self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol)
 
-    @testtools.skipUnless(
-        CONF.loadbalancer_feature_enabled.health_monitor_enabled,
-        'Health monitor testing is disabled')
-    @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
-    def test_healthmonitor_traffic(self):
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Traffic tests will not work in noop mode.')
+    @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
+    def test_basic_traffic(self):
+        self._test_basic_traffic(self.protocol, 80)
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Traffic tests will not work in noop mode.')
+    @decorators.idempotent_id('1e459663-2315-4067-bb47-c8a72f4928f0')
+    def test_basic_udp_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        self._test_basic_traffic(const.UDP, 8080)
+
+    def _test_healthmonitor_traffic(self, protocol, protocol_port):
         """Tests traffic is correctly routed based on healthmonitor status
 
         * Create three members:
@@ -198,9 +231,10 @@
         * Verify members are in their correct respective operating statuses.
         * Verify that traffic is balanced evenly between the working members.
         """
+
         member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
         member1_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member1_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver1_ip,
@@ -214,7 +248,7 @@
         member1_id = member1[const.ID]
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member1_id, pool_id=self.pool_id,
+            member1_id, pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -225,7 +259,7 @@
         # Set up Member 2 for Webserver 2
         member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
         member2_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member2_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver2_ip,
@@ -240,7 +274,7 @@
         member2_id = member2[const.ID]
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member2_id, pool_id=self.pool_id,
+            member2_id, pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -251,7 +285,7 @@
         # Set up Member 3 as a non-existent disabled node
         member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
         member3_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member3_name,
             const.ADMIN_STATE_UP: False,
             const.ADDRESS: '192.0.2.1',
@@ -263,7 +297,7 @@
         member3_id = member3[const.ID]
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member3_id, pool_id=self.pool_id,
+            member3_id, pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -278,41 +312,60 @@
             const.NO_MONITOR,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member2_id, const.OPERATING_STATUS,
             const.NO_MONITOR,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member3_id, const.OPERATING_STATUS,
             const.OFFLINE,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
 
         # Send some traffic and verify it is balanced
         self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol,
                                     traffic_member_count=2)
 
         # Create the healthmonitor
         hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
-        hm_kwargs = {
-            const.POOL_ID: self.pool_id,
-            const.NAME: hm_name,
-            const.TYPE: const.HEALTH_MONITOR_HTTP,
-            const.DELAY: 2,
-            const.TIMEOUT: 2,
-            const.MAX_RETRIES: 2,
-            const.MAX_RETRIES_DOWN: 2,
-            const.HTTP_METHOD: const.GET,
-            const.URL_PATH: '/',
-            const.EXPECTED_CODES: '200',
-            const.ADMIN_STATE_UP: True,
-        }
+        if protocol != const.HTTP:
+            if protocol == const.UDP:
+                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
+            elif protocol == const.TCP:
+                hm_type = const.HEALTH_MONITOR_TCP
+
+            hm_kwargs = {
+                const.POOL_ID: self.pool_ids[protocol],
+                const.NAME: hm_name,
+                const.TYPE: hm_type,
+                const.DELAY: 3,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.ADMIN_STATE_UP: True,
+            }
+        else:
+            hm_kwargs = {
+                const.POOL_ID: self.pool_ids[protocol],
+                const.NAME: hm_name,
+                const.TYPE: const.HEALTH_MONITOR_HTTP,
+                const.DELAY: 2,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.HTTP_METHOD: const.GET,
+                const.URL_PATH: '/',
+                const.EXPECTED_CODES: '200',
+                const.ADMIN_STATE_UP: True,
+            }
 
         hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
         self.addCleanup(
@@ -339,24 +392,26 @@
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
             error_ok=True,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member2_id, const.OPERATING_STATUS,
             const.ERROR,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member3_id, const.OPERATING_STATUS,
             const.OFFLINE,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
 
         # Send some traffic and verify it is *unbalanced*, as expected
         self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol,
                                     traffic_member_count=1)
 
         # Delete the healthmonitor
@@ -375,24 +430,45 @@
             const.NO_MONITOR,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member2_id, const.OPERATING_STATUS,
             const.NO_MONITOR,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
         waiters.wait_for_status(
             self.mem_member_client.show_member,
             member3_id, const.OPERATING_STATUS,
             const.OFFLINE,
             CONF.load_balancer.build_interval,
             CONF.load_balancer.build_timeout,
-            pool_id=self.pool_id)
+            pool_id=self.pool_ids[protocol])
 
         # Send some traffic and verify it is balanced again
-        self.check_members_balanced(self.lb_vip_address)
+        self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol)
+
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.health_monitor_enabled,
+        'Health monitor testing is disabled')
+    @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
+    def test_healthmonitor_traffic(self):
+        self._test_healthmonitor_traffic(self.protocol, 80)
+
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.health_monitor_enabled,
+        'Health monitor testing is disabled')
+    @decorators.idempotent_id('80b86513-1a76-4e42-91c9-cb23c879e536')
+    def test_healthmonitor_udp_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        self._test_healthmonitor_traffic(const.UDP, 8080)
 
     @testtools.skipUnless(
         CONF.loadbalancer_feature_enabled.l7_protocol_enabled,
@@ -408,11 +484,13 @@
         * Create a policy/rule to reject connections.
         * Test traffic to ensure it goes to the correct place.
         """
+        protocol = const.HTTP
+
         # Create a second pool
         pool_name = data_utils.rand_name("lb_member_pool2_l7redirect")
         pool_kwargs = {
             const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL: protocol,
             const.LB_ALGORITHM: self.lb_algorithm,
             const.LOADBALANCER_ID: self.lb_id,
         }
@@ -432,7 +510,7 @@
         # Set up Member 1 for Webserver 1 on the default pool
         member1_name = data_utils.rand_name("lb_member_member1-l7redirect")
         member1_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member1_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver1_ip,
@@ -445,7 +523,7 @@
             **member1_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
+            member1[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -469,7 +547,7 @@
             **member2_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
+            member2[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -481,7 +559,7 @@
         l7policy1_name = data_utils.rand_name("lb_member_l7policy1-l7redirect")
         l7policy1_description = data_utils.arbitrary_string(size=255)
         l7policy1_kwargs = {
-            const.LISTENER_ID: self.listener_id,
+            const.LISTENER_ID: self.listener_ids[protocol],
             const.NAME: l7policy1_name,
             const.DESCRIPTION: l7policy1_description,
             const.ADMIN_STATE_UP: True,
@@ -526,7 +604,7 @@
         l7policy2_name = data_utils.rand_name("lb_member_l7policy2-l7redirect")
         l7policy2_description = data_utils.arbitrary_string(size=255)
         l7policy2_kwargs = {
-            const.LISTENER_ID: self.listener_id,
+            const.LISTENER_ID: self.listener_ids[protocol],
             const.NAME: l7policy2_name,
             const.DESCRIPTION: l7policy2_description,
             const.ADMIN_STATE_UP: True,
@@ -571,7 +649,7 @@
         l7policy3_name = data_utils.rand_name("lb_member_l7policy3-l7redirect")
         l7policy3_description = data_utils.arbitrary_string(size=255)
         l7policy3_kwargs = {
-            const.LISTENER_ID: self.listener_id,
+            const.LISTENER_ID: self.listener_ids[protocol],
             const.NAME: l7policy3_name,
             const.DESCRIPTION: l7policy3_description,
             const.ADMIN_STATE_UP: True,
@@ -633,21 +711,17 @@
                                       url_for_member1,
                                       headers={'reject': 'true'})
 
-    @testtools.skipIf(CONF.load_balancer.test_with_noop,
-                      'Traffic tests will not work in noop mode.')
-    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
-                          'Mixed IPv4/IPv6 member test requires IPv6.')
-    @decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9')
-    def test_mixed_ipv4_ipv6_members_traffic(self):
+    def _test_mixed_ipv4_ipv6_members_traffic(self, protocol, protocol_port):
         """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
 
         * Set up members on a loadbalancer.
         * Test traffic to ensure it is balanced properly.
         """
+
         # Set up Member 1 for Webserver 1
         member1_name = data_utils.rand_name("lb_member_member1-traffic")
         member1_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member1_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver1_ip,
@@ -660,7 +734,7 @@
             **member1_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
+            member1[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -671,7 +745,7 @@
         # Set up Member 2 for Webserver 2
         member2_name = data_utils.rand_name("lb_member_member2-traffic")
         member2_kwargs = {
-            const.POOL_ID: self.pool_id,
+            const.POOL_ID: self.pool_ids[protocol],
             const.NAME: member2_name,
             const.ADMIN_STATE_UP: True,
             const.ADDRESS: self.webserver2_ipv6,
@@ -685,7 +759,7 @@
             **member2_kwargs)
         self.addCleanup(
             self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
+            member2[const.ID], pool_id=self.pool_ids[protocol],
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -694,4 +768,34 @@
             CONF.load_balancer.check_timeout)
 
         # Send some traffic
-        self.check_members_balanced(self.lb_vip_address)
+        self.check_members_balanced(self.lb_vip_address,
+                                    protocol_port=protocol_port,
+                                    protocol=protocol)
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Traffic tests will not work in noop mode.')
+    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
+                          'Mixed IPv4/IPv6 member test requires IPv6.')
+    @decorators.idempotent_id('20b6b671-0101-4bed-a249-9af6ee3aa6d9')
+    def test_mixed_ipv4_ipv6_members_traffic(self):
+        self._test_mixed_ipv4_ipv6_members_traffic(self.protocol, 80)
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Traffic tests will not work in noop mode.')
+    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
+                          'Mixed IPv4/IPv6 member test requires IPv6.')
+    @decorators.idempotent_id('56823616-34e1-4e17-beb9-15dd6b1593af')
+    # Skipping test for amphora driver until "UDP load balancers cannot mix
+    # protocol versions" (https://storyboard.openstack.org/#!/story/2003329) is
+    # fixed
+    @decorators.skip_because(
+        bug='2003329',
+        bug_type='storyboard',
+        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
+    def test_mixed_ipv4_ipv6_members_udp_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        self._test_mixed_ipv4_ipv6_members_traffic(const.UDP, 8080)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 2f94bcc..aab78c1 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -12,12 +12,14 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import errno
 import ipaddress
 import pkg_resources
 import random
 import requests
 import shlex
 import six
+import socket
 import string
 import subprocess
 import tempfile
@@ -45,6 +47,9 @@
 RETRY_BACKOFF = 1
 RETRY_MAX = 5
 
+SRC_PORT_NUMBER_MIN = 32768
+SRC_PORT_NUMBER_MAX = 61000
+
 
 class LoadBalancerBaseTest(test.BaseTestCase):
     """Base class for load balancer tests."""
@@ -61,6 +66,8 @@
     webserver2_response = 5
     used_ips = []
 
+    src_port_number = SRC_PORT_NUMBER_MIN
+
     @classmethod
     def skip_checks(cls):
         """Check if we should skip all of the children tests."""
@@ -548,6 +555,34 @@
                 cls.lb_mem_SGr_client.delete_security_group_rule,
                 cls.lb_mem_SGr_client.show_security_group_rule,
                 SGr['id'])
+            # Create a security group rule to allow UDP 80-81 (test webservers)
+            SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                direction='ingress',
+                security_group_id=cls.lb_member_sec_group['id'],
+                protocol='udp',
+                ethertype='IPv4',
+                port_range_min=80,
+                port_range_max=81)['security_group_rule']
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_SGr_client.delete_security_group_rule,
+                cls.lb_mem_SGr_client.show_security_group_rule,
+                SGr['id'])
+            # Create a security group rule to allow UDP 9999 (test webservers)
+            # Port 9999 is used to illustrate health monitor ERRORs on closed
+            # ports.
+            SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                direction='ingress',
+                security_group_id=cls.lb_member_sec_group['id'],
+                protocol='udp',
+                ethertype='IPv4',
+                port_range_min=9999,
+                port_range_max=9999)['security_group_rule']
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_SGr_client.delete_security_group_rule,
+                cls.lb_mem_SGr_client.show_security_group_rule,
+                SGr['id'])
             # Create a security group rule to allow 22 (ssh)
             SGr = cls.lb_mem_SGr_client.create_security_group_rule(
                 direction='ingress',
@@ -575,6 +610,20 @@
                     cls.lb_mem_SGr_client.delete_security_group_rule,
                     cls.lb_mem_SGr_client.show_security_group_rule,
                     SGr['id'])
+                # Create a security group rule to allow UDP 80-81 (test
+                # webservers)
+                SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                    direction='ingress',
+                    security_group_id=cls.lb_member_sec_group['id'],
+                    protocol='udp',
+                    ethertype='IPv6',
+                    port_range_min=80,
+                    port_range_max=81)['security_group_rule']
+                cls.addClassResourceCleanup(
+                    waiters.wait_for_not_found,
+                    cls.lb_mem_SGr_client.delete_security_group_rule,
+                    cls.lb_mem_SGr_client.show_security_group_rule,
+                    SGr['id'])
                 # Create a security group rule to allow 22 (ssh)
                 SGr = cls.lb_mem_SGr_client.create_security_group_rule(
                     direction='ingress',
@@ -647,6 +696,10 @@
         cls._validate_webserver(cls.webserver1_public_ip,
                                 cls.webserver1_response)
 
+        # Validate udp server 1
+        cls._validate_udp_server(cls.webserver1_public_ip,
+                                 cls.webserver1_response)
+
         # Set up serving on webserver 2
         cls._install_start_webserver(cls.webserver2_public_ip,
                                      cls.lb_member_keypair['private_key'],
@@ -656,6 +709,10 @@
         cls._validate_webserver(cls.webserver2_public_ip,
                                 cls.webserver2_response)
 
+        # Validate udp server 2
+        cls._validate_udp_server(cls.webserver2_public_ip,
+                                 cls.webserver2_response)
+
     @classmethod
     def _create_networks(cls):
         super(LoadBalancerBaseTestWithCompute, cls)._create_networks()
@@ -796,8 +853,8 @@
     @classmethod
     def _install_start_webserver(cls, ip_address, ssh_key, start_id):
         local_file = pkg_resources.resource_filename(
-            'octavia_tempest_plugin.contrib.httpd', 'httpd.bin')
-        dest_file = '/dev/shm/httpd.bin'
+            'octavia_tempest_plugin.contrib.test_server', 'test_server.bin')
+        dest_file = '/dev/shm/test_server.bin'
 
         linux_client = remote_client.RemoteClient(
             ip_address, CONF.validation.image_ssh_user, pkey=ssh_key)
@@ -859,20 +916,101 @@
         URL = 'http://{0}:81'.format(ip_address)
         validators.validate_URL_response(URL, expected_body=str(start_id + 1))
 
-    def _wait_for_lb_functional(self, vip_address,
-                                protocol='http', verify=True):
-        session = requests.Session()
+    @classmethod
+    def _validate_udp_server(cls, ip_address, start_id):
+        res = cls._udp_request(ip_address, 80)
+        if res != str(start_id):
+            raise Exception("Response from test server doesn't match the "
+                            "expected value ({0} != {1}).".format(
+                                res, str(start_id)))
+
+        res = cls._udp_request(ip_address, 81)
+        if res != str(start_id + 1):
+            raise Exception("Response from test server doesn't match the "
+                            "expected value ({0} != {1}).".format(
+                                res, str(start_id + 1)))
+
+    @classmethod
+    def _udp_request(cls, vip_address, port=80, timeout=None):
+        if ipaddress.ip_address(vip_address).version == 6:
+            family = socket.AF_INET6
+        else:
+            family = socket.AF_INET
+
+        sock = socket.socket(family, socket.SOCK_DGRAM)
+
+        # Force the use of an incremental port number for source to avoid
+        # re-use of a previous source port that will affect the round-robin
+        # dispatch
+        while True:
+            port_number = cls.src_port_number
+            cls.src_port_number += 1
+            if cls.src_port_number >= SRC_PORT_NUMBER_MAX:
+                cls.src_port_number = SRC_PORT_NUMBER_MIN
+
+            # catch and skip already used ports on the host
+            try:
+                sock.bind(('', port_number))
+            except OSError as e:
+                # if error is 'Address already in use', try next port number
+                if e.errno != errno.EADDRINUSE:
+                    raise e
+            else:
+                # successfully bind the socket
+                break
+
+        server_address = (vip_address, port)
+        data = b"data\n"
+
+        if timeout is not None:
+            sock.settimeout(timeout)
+
+        sock.sendto(data, server_address)
+        data, addr = sock.recvfrom(4096)
+
+        sock.close()
+
+        return data.decode('utf-8')
+
+    def _wait_for_lb_functional(self, vip_address, traffic_member_count,
+                                protocol_port, protocol, verify):
+        if protocol != const.UDP:
+            session = requests.Session()
         start = time.time()
 
+        response_counts = {}
+
+        # Send requests to the load balancer until at least
+        # "traffic_member_count" members have replied (ensure network
+        # connectivity is functional between the load balancer and the membesr)
         while time.time() - start < CONF.load_balancer.build_timeout:
             try:
-                session.get("{0}://{1}".format(protocol, vip_address),
-                            timeout=2, verify=verify)
-                time.sleep(1)
-                return
+                if protocol != const.UDP:
+                    url = "{0}://{1}{2}{3}".format(
+                        protocol.lower(),
+                        vip_address,
+                        ':' if protocol_port else '',
+                        protocol_port or '')
+                    r = session.get(url, timeout=2, verify=verify)
+                    data = r.content
+                else:
+                    data = self._udp_request(vip_address, port=protocol_port,
+                                             timeout=2)
+                if data in response_counts:
+                    response_counts[data] += 1
+                else:
+                    response_counts[data] = 1
+
+                if traffic_member_count == len(response_counts):
+                    LOG.debug('Loadbalancer response totals: %s',
+                              response_counts)
+                    time.sleep(1)
+                    return
             except Exception:
                 LOG.warning('Server is not passing initial traffic. Waiting.')
                 time.sleep(1)
+
+        LOG.debug('Loadbalancer response totals: %s', response_counts)
         LOG.error('Server did not begin passing traffic within the timeout '
                   'period. Failing test.')
         raise Exception()
@@ -880,16 +1018,27 @@
     def _send_lb_request(self, handler, protocol, vip_address,
                          verify, protocol_port, num=20):
         response_counts = {}
+
         # Send a number requests to lb vip
         for i in range(num):
             try:
-                r = handler.get('{0}://{1}:{2}'.format(protocol, vip_address,
-                                                       protocol_port),
-                                timeout=2, verify=verify)
-                if r.content in response_counts:
-                    response_counts[r.content] += 1
+                if protocol != const.UDP:
+                    url = "{0}://{1}{2}{3}".format(
+                        protocol.lower(),
+                        vip_address,
+                        ':' if protocol_port else '',
+                        protocol_port or '')
+                    r = handler.get(url, timeout=2, verify=verify)
+                    data = r.content
                 else:
-                    response_counts[r.content] = 1
+                    data = self._udp_request(vip_address, port=protocol_port,
+                                             timeout=2)
+
+                if data in response_counts:
+                    response_counts[data] += 1
+                else:
+                    response_counts[data] = 1
+
             except Exception:
                 LOG.exception('Failed to send request to loadbalancer vip')
                 raise Exception('Failed to connect to lb')
@@ -897,7 +1046,7 @@
         return response_counts
 
     def _check_members_balanced_round_robin(
-            self, vip_address, traffic_member_count=2, protocol='http',
+            self, vip_address, traffic_member_count=2, protocol=const.HTTP,
             verify=True, protocol_port=80):
 
         handler = requests.Session()
@@ -912,7 +1061,7 @@
         self.assertEqual(1, len(set(response_counts.values())))
 
     def _check_members_balanced_source_ip_port(
-            self, vip_address, traffic_member_count=2, protocol='http',
+            self, vip_address, traffic_member_count=2, protocol=const.HTTP,
             verify=True, protocol_port=80):
 
         handler = requests
@@ -931,11 +1080,14 @@
             self.assertEqual(1, len(response_counts))
 
     def check_members_balanced(self, vip_address, traffic_member_count=2,
-                               protocol='http', verify=True, protocol_port=80):
+                               protocol=const.HTTP, verify=True,
+                               protocol_port=80):
 
-        if ipaddress.ip_address(vip_address).version == 6:
+        if (ipaddress.ip_address(vip_address).version == 6 and
+                protocol != const.UDP):
             vip_address = '[{}]'.format(vip_address)
-        self._wait_for_lb_functional(vip_address, protocol, verify)
+        self._wait_for_lb_functional(vip_address, traffic_member_count,
+                                     protocol_port, protocol, verify)
 
         validate_func = '_check_members_balanced_%s' % self.lb_algorithm
         validate_func = getattr(self, validate_func.lower())
diff --git a/releasenotes/notes/add-udp-test-scenario-cdd131d1ef7bf8e9.yaml b/releasenotes/notes/add-udp-test-scenario-cdd131d1ef7bf8e9.yaml
new file mode 100644
index 0000000..2d1952d
--- /dev/null
+++ b/releasenotes/notes/add-udp-test-scenario-cdd131d1ef7bf8e9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added test scenarios for UDP traffic.