Create api+scenario tests for healthmonitors

This patch implements healthmonitor tests for the Octavia
Tempest Plugin.

Depends-On: https://review.openstack.org/#/c/571107/
Change-Id: Ib7cd9eef7c9aeb705c56df3f88612d1bf2039163
Story: 2001387
Task: 5975
diff --git a/octavia_tempest_plugin/clients.py b/octavia_tempest_plugin/clients.py
index 7b766be..1bf1ebf 100644
--- a/octavia_tempest_plugin/clients.py
+++ b/octavia_tempest_plugin/clients.py
@@ -16,6 +16,8 @@
 from tempest import config
 
 from octavia_tempest_plugin.services.load_balancer.v2 import (
+    healthmonitor_client)
+from octavia_tempest_plugin.services.load_balancer.v2 import (
     listener_client)
 from octavia_tempest_plugin.services.load_balancer.v2 import (
     loadbalancer_client)
@@ -41,3 +43,5 @@
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
         self.member_client = member_client.MemberClient(
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+        self.healthmonitor_client = healthmonitor_client.HealthMonitorClient(
+            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index cf38c5a..0b51eb1 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -69,6 +69,57 @@
 MONITOR_ADDRESS = 'monitor_address'
 MONITOR_PORT = 'monitor_port'
 
+DELAY = 'delay'
+TIMEOUT = 'timeout'
+MAX_RETRIES = 'max_retries'
+MAX_RETRIES_DOWN = 'max_retries_down'
+HTTP_METHOD = 'http_method'
+URL_PATH = 'url_path'
+EXPECTED_CODES = 'expected_codes'
+
+# Other constants
+ACTIVE = 'ACTIVE'
+ADMIN_STATE_UP_TRUE = 'true'
+ASC = 'asc'
+DELETED = 'DELETED'
+DESC = 'desc'
+FIELDS = 'fields'
+OFFLINE = 'OFFLINE'
+ONLINE = 'ONLINE'
+NO_MONITOR = 'NO_MONITOR'
+ERROR = 'ERROR'
+SORT = 'sort'
+
+# Protocols
+HTTP = 'HTTP'
+HTTPS = 'HTTPS'
+TCP = 'TCP'
+
+# HTTP Methods
+GET = 'GET'
+POST = 'POST'
+PUT = 'PUT'
+DELETE = 'DELETE'
+
+# HM Types
+HEALTH_MONITOR_PING = 'PING'
+HEALTH_MONITOR_TCP = 'TCP'
+HEALTH_MONITOR_HTTP = 'HTTP'
+HEALTH_MONITOR_HTTPS = 'HTTPS'
+HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO'
+
+# Session Persistence
+TYPE = 'type'
+COOKIE_NAME = 'cookie_name'
+SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
+SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
+SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
+
+# RBAC options
+ADVANCED = 'advanced'
+OWNERADMIN = 'owner_or_admin'
+NONE = 'none'
+
 # API valid fields
 SHOW_LOAD_BALANCER_RESPONSE_FIELDS = (
     ADMIN_STATE_UP, CREATED_AT, DESCRIPTION, FLAVOR_ID, ID, LISTENERS, NAME,
@@ -96,31 +147,8 @@
     ADDRESS, PROTOCOL_PORT, WEIGHT, BACKUP, MONITOR_PORT, MONITOR_ADDRESS
 )
 
-# Other constants
-ACTIVE = 'ACTIVE'
-ADMIN_STATE_UP_TRUE = 'true'
-ASC = 'asc'
-DELETED = 'DELETED'
-DESC = 'desc'
-FIELDS = 'fields'
-OFFLINE = 'OFFLINE'
-ONLINE = 'ONLINE'
-NO_MONITOR = 'NO_MONITOR'
-SORT = 'sort'
-
-# Protocols
-HTTP = 'HTTP'
-HTTPS = 'HTTPS'
-TCP = 'TCP'
-
-# Session Persistence
-TYPE = 'type'
-COOKIE_NAME = 'cookie_name'
-SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
-SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
-SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
-
-# RBAC options
-ADVANCED = 'advanced'
-OWNERADMIN = 'owner_or_admin'
-NONE = 'none'
+SHOW_HEALTHMONITOR_RESPONSE_FIELDS = (
+    ID, NAME, PROVISIONING_STATUS, OPERATING_STATUS, ADMIN_STATE_UP,
+    TYPE, DELAY, TIMEOUT, MAX_RETRIES, MAX_RETRIES_DOWN, HTTP_METHOD,
+    URL_PATH, EXPECTED_CODES, CREATED_AT, UPDATED_AT
+)
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/healthmonitor_client.py b/octavia_tempest_plugin/services/load_balancer/v2/healthmonitor_client.py
new file mode 100644
index 0000000..70dce4c
--- /dev/null
+++ b/octavia_tempest_plugin/services/load_balancer/v2/healthmonitor_client.py
@@ -0,0 +1,263 @@
+#   Copyright 2018 GoDaddy
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+from tempest import config
+
+from octavia_tempest_plugin.services.load_balancer.v2 import base_client
+
+CONF = config.CONF
+Unset = base_client.Unset
+
+
+class HealthMonitorClient(base_client.BaseLBaaSClient):
+
+    root_tag = 'healthmonitor'
+    list_root_tag = 'healthmonitors'
+    resource_name = 'healthmonitor'
+
+    def create_healthmonitor(self, pool_id, type, delay, timeout, max_retries,
+                             max_retries_down=Unset, name=Unset,
+                             http_method=Unset, url_path=Unset,
+                             expected_codes=Unset, admin_state_up=Unset,
+                             return_object_only=True):
+        """Create a healthmonitor.
+
+        :param pool_id: The ID of the pool.
+        :param type: The type of health monitor.
+        :param delay: The time, in seconds, between sending probes to members.
+        :param timeout: The maximum time, in seconds, that a monitor waits to
+                        connect before it times out.
+        :param max_retries: The number of successful checks before changing the
+                            operating status of the member to ONLINE.
+        :param max_retries_down: The number of allowed check failures before
+                                 changing the operating status of the member to
+                                 ERROR.
+        :param name: Human-readable name of the resource.
+        :param http_method: The HTTP method that the health monitor uses for
+                            requests.
+        :param url_path: The HTTP URL path of the request sent by the monitor
+                         to test the health of a backend member.
+        :param expected_codes: The list of HTTP status codes expected in
+                               response from the member to declare it healthy.
+        :param admin_state_up: The administrative state of the resource, which
+                               is up (true) or down (false).
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A healthmonitor object.
+        """
+        kwargs = {arg: value for arg, value in locals().items()
+                  if arg != 'self' and value is not Unset}
+        return self._create_object(**kwargs)
+
+    def show_healthmonitor(self, healthmonitor_id, query_params=None,
+                           return_object_only=True):
+        """Get healthmonitor details.
+
+        :param healthmonitor_id: The healthmonitor ID to query.
+        :param query_params: The optional query parameters to append to the
+                             request. Ex. fields=id&fields=name
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A healthmonitor object.
+        """
+        return self._show_object(obj_id=healthmonitor_id,
+                                 query_params=query_params,
+                                 return_object_only=return_object_only)
+
+    def list_healthmonitors(self, query_params=None, return_object_only=True):
+        """Get a list of healthmonitor objects.
+
+        :param query_params: The optional query parameters to append to the
+                             request. Ex. fields=id&fields=name
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A list of healthmonitor objects.
+        """
+        return self._list_objects(query_params=query_params,
+                                  return_object_only=return_object_only)
+
+    def update_healthmonitor(self, healthmonitor_id, delay=Unset,
+                             timeout=Unset, max_retries=Unset,
+                             max_retries_down=Unset, name=Unset,
+                             http_method=Unset, url_path=Unset,
+                             expected_codes=Unset, admin_state_up=Unset,
+                             return_object_only=True):
+        """Update a healthmonitor.
+
+        :param healthmonitor_id: The healthmonitor ID to update.
+        :param delay: The time, in seconds, between sending probes to members.
+        :param timeout: The maximum time, in seconds, that a monitor waits to
+                        connect before it times out.
+        :param max_retries: The number of successful checks before changing the
+                            operating status of the member to ONLINE.
+        :param max_retries_down: The number of allowed check failures before
+                                 changing the operating status of the member to
+                                 ERROR.
+        :param name: Human-readable name of the resource.
+        :param http_method: The HTTP method that the health monitor uses for
+                            requests.
+        :param url_path: The HTTP URL path of the request sent by the monitor
+                         to test the health of a backend member.
+        :param expected_codes: The list of HTTP status codes expected in
+                               response from the member to declare it healthy.
+        :param admin_state_up: The administrative state of the resource, which
+                               is up (true) or down (false).
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A healthmonitor object.
+        """
+        kwargs = {arg: value for arg, value in locals().items()
+                  if arg != 'self' and value is not Unset}
+        kwargs['obj_id'] = kwargs.pop('healthmonitor_id')
+        return self._update_object(**kwargs)
+
+    def delete_healthmonitor(self, healthmonitor_id, ignore_errors=False):
+        """Delete a healthmonitor.
+
+        :param healthmonitor_id: The healthmonitor ID to delete.
+        :param ignore_errors: True if errors should be ignored.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: None if ignore_errors is True, the response status code
+                  if not.
+        """
+        return self._delete_obj(obj_id=healthmonitor_id,
+                                ignore_errors=ignore_errors)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
new file mode 100644
index 0000000..a1bb301
--- /dev/null
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -0,0 +1,790 @@
+# Copyright 2018 GoDaddy
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import time
+from uuid import UUID
+
+from dateutil import parser
+from oslo_log import log as logging
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class HealthMonitorAPITest(test_base.LoadBalancerBaseTest):
+    """Test the healthmonitor object API."""
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(HealthMonitorAPITest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_hm")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        cls._setup_lb_network_kwargs(lb_kwargs)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+    @decorators.idempotent_id('30288670-5772-40c2-92e6-6d4a6d62d029')
+    def test_healthmonitor_create(self):
+        """Tests healthmonitor create and basic show APIs.
+
+        * Create a clean pool to use for the healthmonitor.
+        * Tests that users without the loadbalancer member role cannot
+          create healthmonitors.
+        * Create a fully populated healthmonitor.
+        * Show healthmonitor details.
+        * Validate the show reflects the requested values.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool1_hm-create")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-create")
+        hm_kwargs = {
+            const.POOL_ID: pool[const.ID],
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: True,
+        }
+
+        # Test that a user without the loadbalancer role cannot
+        # create a healthmonitor
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.healthmonitor_client.create_healthmonitor,
+                **hm_kwargs)
+
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        parser.parse(hm[const.CREATED_AT])
+        parser.parse(hm[const.UPDATED_AT])
+        UUID(hm[const.ID])
+
+        # Healthmonitors are always ONLINE
+        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+    # Helper functions for test healthmonitor list
+    def _filter_hms_by_pool_id(self, hms, pool_ids):
+        return [hm for hm in hms
+                if hm[const.POOLS][0][const.ID] in pool_ids]
+
+    def _filter_hms_by_index(self, hms, indexes):
+        return [hm for i, hm in enumerate(hms) if i not in indexes]
+
+    @decorators.idempotent_id('c9a9f20c-3680-4ae8-b657-33c687258fea')
+    def test_healthmonitor_list(self):
+        """Tests healthmonitor list API and field filtering.
+
+        * Create three clean pools to use for the healthmonitors.
+        * Create three healthmonitors.
+        * Validates that other accounts cannot list the healthmonitors.
+        * List the healthmonitors using the default sort order.
+        * List the healthmonitors using descending sort order.
+        * List the healthmonitors using ascending sort order.
+        * List the healthmonitors returning one field at a time.
+        * List the healthmonitors returning two fields.
+        * List the healthmonitors filtering to one of the three.
+        * List the healthmonitors filtered, one field, and sorted.
+        """
+        # Get a list of pre-existing HMs to filter from test data
+        pretest_hms = self.mem_healthmonitor_client.list_healthmonitors()
+        # Store their IDs for easy access
+        pretest_hm_ids = [hm['id'] for hm in pretest_hms]
+
+        pool1_name = data_utils.rand_name("lb_member_pool1_hm-list")
+        pool1_kwargs = {
+            const.NAME: pool1_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool1 = self.mem_pool_client.create_pool(**pool1_kwargs)
+        pool1_id = pool1[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool1_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool2_name = data_utils.rand_name("lb_member_pool2_hm-list")
+        pool2_kwargs = {
+            const.NAME: pool2_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool2 = self.mem_pool_client.create_pool(**pool2_kwargs)
+        pool2_id = pool2[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool2_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool3_name = data_utils.rand_name("lb_member_pool3_hm-list")
+        pool3_kwargs = {
+            const.NAME: pool3_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool3 = self.mem_pool_client.create_pool(**pool3_kwargs)
+        pool3_id = pool3[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool3_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm1_name = data_utils.rand_name("lb_member_hm2-list")
+        hm1_kwargs = {
+            const.POOL_ID: pool1_id,
+            const.NAME: hm1_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/B',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: True,
+        }
+        hm1 = self.mem_healthmonitor_client.create_healthmonitor(
+            **hm1_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm1[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        hm1 = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor, hm1[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        # Time resolution for created_at is only to the second, and we need to
+        # ensure that each object has a distinct creation time. Delaying one
+        # second is both a simple and a reliable way to accomplish this.
+        time.sleep(1)
+
+        hm2_name = data_utils.rand_name("lb_member_hm1-list")
+        hm2_kwargs = {
+            const.POOL_ID: pool2_id,
+            const.NAME: hm2_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/A',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: True,
+        }
+        hm2 = self.mem_healthmonitor_client.create_healthmonitor(
+            **hm2_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm2[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        hm2 = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor, hm2[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        # Time resolution for created_at is only to the second, and we need to
+        # ensure that each object has a distinct creation time. Delaying one
+        # second is both a simple and a reliable way to accomplish this.
+        time.sleep(1)
+
+        hm3_name = data_utils.rand_name("lb_member_hm3-list")
+        hm3_kwargs = {
+            const.POOL_ID: pool3_id,
+            const.NAME: hm3_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/C',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: False,
+        }
+        hm3 = self.mem_healthmonitor_client.create_healthmonitor(
+            **hm3_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm3[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        hm3 = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor, hm3[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that a different user cannot list healthmonitors
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.healthmonitor_client
+            primary = member2_client.list_healthmonitors(
+                query_params='pool_id={pool_id}'.format(pool_id=pool1_id))
+            self.assertEqual(0, len(primary))
+
+        # Test that users without the lb member role cannot list healthmonitors
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.healthmonitor_client.list_healthmonitors)
+
+        # Check the default sort order, created_at
+        hms = self.mem_healthmonitor_client.list_healthmonitors()
+        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
+        self.assertEqual(hm1[const.URL_PATH],
+                         hms[0][const.URL_PATH])
+        self.assertEqual(hm2[const.URL_PATH],
+                         hms[1][const.URL_PATH])
+        self.assertEqual(hm3[const.URL_PATH],
+                         hms[2][const.URL_PATH])
+
+        # Test sort descending by description
+        hms = self.mem_healthmonitor_client.list_healthmonitors(
+            query_params='{sort}={url_path}:{desc}'
+                         .format(sort=const.SORT,
+                                 url_path=const.URL_PATH, desc=const.DESC))
+        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
+        self.assertEqual(hm1[const.URL_PATH],
+                         hms[1][const.URL_PATH])
+        self.assertEqual(hm2[const.URL_PATH],
+                         hms[2][const.URL_PATH])
+        self.assertEqual(hm3[const.URL_PATH],
+                         hms[0][const.URL_PATH])
+
+        # Test sort ascending by description
+        hms = self.mem_healthmonitor_client.list_healthmonitors(
+            query_params='{sort}={url_path}:{asc}'
+                         .format(sort=const.SORT,
+                                 url_path=const.URL_PATH, asc=const.ASC))
+        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
+        self.assertEqual(hm1[const.URL_PATH],
+                         hms[1][const.URL_PATH])
+        self.assertEqual(hm2[const.URL_PATH],
+                         hms[0][const.URL_PATH])
+        self.assertEqual(hm3[const.URL_PATH],
+                         hms[2][const.URL_PATH])
+
+        # Determine indexes of pretest HMs in default sort
+        pretest_hm_indexes = []
+        hms = self.mem_healthmonitor_client.list_healthmonitors()
+        for i, hm in enumerate(hms):
+            if hm['id'] in pretest_hm_ids:
+                pretest_hm_indexes.append(i)
+
+        # Test fields
+        for field in const.SHOW_HEALTHMONITOR_RESPONSE_FIELDS:
+            hms = self.mem_healthmonitor_client.list_healthmonitors(
+                query_params='{fields}={field}'
+                             .format(fields=const.FIELDS, field=field))
+            hms = self._filter_hms_by_index(hms, pretest_hm_indexes)
+            self.assertEqual(1, len(hms[0]))
+            self.assertEqual(hm1[field], hms[0][field])
+            self.assertEqual(hm2[field], hms[1][field])
+            self.assertEqual(hm3[field], hms[2][field])
+
+        # Test multiple fields at the same time
+        hms = self.mem_healthmonitor_client.list_healthmonitors(
+            query_params='{fields}={admin}&'
+                         '{fields}={created}&'
+                         '{fields}={pools}'.format(
+                             fields=const.FIELDS,
+                             admin=const.ADMIN_STATE_UP,
+                             created=const.CREATED_AT,
+                             pools=const.POOLS))
+        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
+        self.assertEqual(3, len(hms[0]))
+        self.assertTrue(hms[0][const.ADMIN_STATE_UP])
+        parser.parse(hms[0][const.CREATED_AT])
+        self.assertTrue(hms[1][const.ADMIN_STATE_UP])
+        parser.parse(hms[1][const.CREATED_AT])
+        self.assertFalse(hms[2][const.ADMIN_STATE_UP])
+        parser.parse(hms[2][const.CREATED_AT])
+
+        # Test filtering
+        hms = self.mem_healthmonitor_client.list_healthmonitors(
+            query_params='{name}={hm_name}'.format(
+                name=const.NAME,
+                hm_name=hm2[const.NAME]))
+        self.assertEqual(1, len(hms))
+        self.assertEqual(hm2[const.NAME],
+                         hms[0][const.NAME])
+
+        # Test combined params
+        hms = self.mem_healthmonitor_client.list_healthmonitors(
+            query_params='{admin}={true}&'
+                         '{fields}={name}&{fields}={pools}&'
+                         '{sort}={name}:{desc}'.format(
+                             admin=const.ADMIN_STATE_UP,
+                             true=const.ADMIN_STATE_UP_TRUE,
+                             fields=const.FIELDS, name=const.NAME,
+                             pools=const.POOLS, sort=const.SORT,
+                             desc=const.DESC))
+        hms = self._filter_hms_by_pool_id(hms, (pool1_id, pool2_id, pool3_id))
+        # Should get two healthmonitors
+        self.assertEqual(2, len(hms))
+        # healthmonitors should have two fields
+        self.assertEqual(2, len(hms[0]))
+        # Should be in descending order
+        self.assertEqual(hm2[const.NAME],
+                         hms[1][const.NAME])
+        self.assertEqual(hm1[const.NAME],
+                         hms[0][const.NAME])
+
+    @decorators.idempotent_id('284e8d3b-7b2d-4697-9e41-580b3423c0b4')
+    def test_healthmonitor_show(self):
+        """Tests healthmonitor show API.
+
+        * Create a clean pool to use for the healthmonitor.
+        * Create a fully populated healthmonitor.
+        * Show healthmonitor details.
+        * Validate the show reflects the requested values.
+        * Validates that other accounts cannot see the healthmonitor.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool1_hm-show")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-show")
+        hm_kwargs = {
+            const.POOL_ID: pool[const.ID],
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: True,
+        }
+
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        parser.parse(hm[const.CREATED_AT])
+        parser.parse(hm[const.UPDATED_AT])
+        UUID(hm[const.ID])
+
+        # Healthmonitors are always ONLINE
+        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+        # Test that a user with lb_admin role can see the healthmonitor
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            healthmonitor_client = self.os_roles_lb_admin.healthmonitor_client
+            hm_adm = healthmonitor_client.show_healthmonitor(hm[const.ID])
+            self.assertEqual(hm_name, hm_adm[const.NAME])
+
+        # Test that a user with cloud admin role can see the healthmonitor
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            adm = self.os_admin.healthmonitor_client.show_healthmonitor(
+                hm[const.ID])
+            self.assertEqual(hm_name, adm[const.NAME])
+
+        # Test that a different user, with loadbalancer member role, cannot
+        # see this healthmonitor
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.healthmonitor_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.show_healthmonitor,
+                              hm[const.ID])
+
+        # Test that a user, without the loadbalancer member role, cannot
+        # show healthmonitors
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.healthmonitor_client.show_healthmonitor,
+                hm[const.ID])
+
+    @decorators.idempotent_id('fa584b2c-f179-4c4e-ad2e-ff51fd1c5973')
+    def test_healthmonitor_update(self):
+        """Tests healthmonitor update and show APIs.
+
+        * Create a clean pool to use for the healthmonitor.
+        * Create a fully populated healthmonitor.
+        * Show healthmonitor details.
+        * Validate the show reflects the initial values.
+        * Validates that other accounts cannot update the healthmonitor.
+        * Update the healthmonitor details.
+        * Show healthmonitor details.
+        * Validate the show reflects the updated values.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool1_hm-update")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-update")
+        hm_kwargs = {
+            const.POOL_ID: pool[const.ID],
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+            const.MAX_RETRIES_DOWN: 5,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200-204',
+            const.ADMIN_STATE_UP: False,
+        }
+
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        parser.parse(hm[const.CREATED_AT])
+        parser.parse(hm[const.UPDATED_AT])
+        UUID(hm[const.ID])
+
+        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
+        if hm_kwargs[const.ADMIN_STATE_UP]:
+            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
+        else:
+            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+        # Test that a user, without the loadbalancer member role, cannot
+        # use this command
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.healthmonitor_client.update_healthmonitor,
+                hm[const.ID], admin_state_up=True)
+
+        # Assert we didn't go into PENDING_*
+        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
+            hm[const.ID])
+        self.assertEqual(const.ACTIVE,
+                         hm_check[const.PROVISIONING_STATUS])
+        self.assertFalse(hm_check[const.ADMIN_STATE_UP])
+
+        # Test that a user, without the loadbalancer member role, cannot
+        # update this healthmonitor
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.healthmonitor_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.update_healthmonitor,
+                              hm[const.ID], admin_state_up=True)
+
+        # Assert we didn't go into PENDING_*
+        hm_check = self.mem_healthmonitor_client.show_healthmonitor(
+            hm[const.ID])
+        self.assertEqual(const.ACTIVE,
+                         hm_check[const.PROVISIONING_STATUS])
+        self.assertFalse(hm_check[const.ADMIN_STATE_UP])
+
+        new_name = data_utils.rand_name("lb_member_hm1-UPDATED")
+        hm_update_kwargs = {
+            const.NAME: new_name,
+            const.DELAY: hm_kwargs[const.DELAY] + 1,
+            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
+            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
+            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
+            const.HTTP_METHOD: const.POST,
+            const.URL_PATH: '/test',
+            const.EXPECTED_CODES: '201,202',
+            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
+        }
+        hm = self.mem_healthmonitor_client.update_healthmonitor(
+            hm[const.ID], **hm_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Healthmonitors are ONLINE if admin_state_up = True, else OFFLINE
+        if hm_update_kwargs[const.ADMIN_STATE_UP]:
+            self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
+        else:
+            self.assertEqual(const.OFFLINE, hm[const.OPERATING_STATUS])
+
+        # Test changed items
+        equal_items = [const.NAME, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_update_kwargs[item], hm[item])
+
+        # Test unchanged items
+        equal_items = [const.TYPE]
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+    @decorators.idempotent_id('a7bab4ac-340c-4776-ab9d-9fcb66869432')
+    def test_healthmonitor_delete(self):
+        """Tests healthmonitor create and delete APIs.
+
+        * Create a clean pool to use for the healthmonitor.
+        * Creates a healthmonitor.
+        * Validates that other accounts cannot delete the healthmonitor
+        * Deletes the healthmonitor.
+        * Validates the healthmonitor is in the DELETED state.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool1_hm-delete")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-delete")
+        hm_kwargs = {
+            const.POOL_ID: pool[const.ID],
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_TCP,
+            const.DELAY: 2,
+            const.TIMEOUT: 3,
+            const.MAX_RETRIES: 4,
+        }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Test that a user without the loadbalancer role cannot
+        # delete this healthmonitor
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.healthmonitor_client.delete_healthmonitor,
+                hm[const.ID])
+
+        # Test that a different user, with the loadbalancer member role
+        # cannot delete this healthmonitor
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.healthmonitor_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.delete_healthmonitor,
+                              hm[const.ID])
+
+        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 12a045f..baae6f3 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -41,8 +41,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 4b556c7..7c7a5c4 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -41,8 +41,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 77c267b..f73a67c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -40,8 +40,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
new file mode 100644
index 0000000..02cec6e
--- /dev/null
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -0,0 +1,178 @@
+# Copyright 2018 GoDaddy
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from uuid import UUID
+
+from dateutil import parser
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(HealthMonitorScenarioTest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_hm")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        cls._setup_lb_network_kwargs(lb_kwargs)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1_hm")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LOADBALANCER_ID: cls.lb_id,
+        }
+        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+        cls.pool_id = pool[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+    @decorators.idempotent_id('a51e09aa-6e44-4c67-a9e4-df70d0e08f96')
+    def test_healthmonitor_CRUD(self):
+        """Tests healthmonitor create, read, update, delete, and member status
+
+        * Create a fully populated healthmonitor.
+        * Show healthmonitor details.
+        * Update the healthmonitor.
+        * Delete the healthmonitor.
+        """
+        # Healthmonitor create
+        hm_name = data_utils.rand_name("lb_member_hm1-CRUD")
+        hm_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 2,
+            const.MAX_RETRIES: 2,
+            const.MAX_RETRIES_DOWN: 2,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200',
+            const.ADMIN_STATE_UP: True,
+        }
+
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        parser.parse(hm[const.CREATED_AT])
+        parser.parse(hm[const.UPDATED_AT])
+        UUID(hm[const.ID])
+        self.assertEqual(const.ONLINE, hm[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.TYPE, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+        # Healthmonitor update
+        new_name = data_utils.rand_name("lb_member_hm1-update")
+        hm_update_kwargs = {
+            const.NAME: new_name,
+            const.DELAY: hm_kwargs[const.DELAY] + 1,
+            const.TIMEOUT: hm_kwargs[const.TIMEOUT] + 1,
+            const.MAX_RETRIES: hm_kwargs[const.MAX_RETRIES] + 1,
+            const.MAX_RETRIES_DOWN: hm_kwargs[const.MAX_RETRIES_DOWN] + 1,
+            const.HTTP_METHOD: const.POST,
+            const.URL_PATH: '/test',
+            const.EXPECTED_CODES: '201,202',
+            const.ADMIN_STATE_UP: not hm_kwargs[const.ADMIN_STATE_UP],
+        }
+        hm = self.mem_healthmonitor_client.update_healthmonitor(
+            hm[const.ID], **hm_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Test changed items
+        equal_items = [const.NAME, const.DELAY, const.TIMEOUT,
+                       const.MAX_RETRIES, const.MAX_RETRIES_DOWN,
+                       const.HTTP_METHOD, const.URL_PATH, const.EXPECTED_CODES,
+                       const.ADMIN_STATE_UP]
+
+        for item in equal_items:
+            self.assertEqual(hm_update_kwargs[item], hm[item])
+
+        # Test unchanged items
+        equal_items = [const.TYPE]
+        for item in equal_items:
+            self.assertEqual(hm_kwargs[item], hm[item])
+
+        # Healthmonitor delete
+        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 9560937..1b6578a 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -38,8 +38,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 16610a8..6fb53f2 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -37,8 +37,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 6015223..eef80d5 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -37,8 +37,7 @@
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
 
-        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
-        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index 8014f88..0a0f20e 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -115,7 +115,7 @@
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Traffic tests will not work in noop mode.')
     @decorators.idempotent_id('6751135d-e15a-4e22-89f4-bfcc3408d424')
-    def test_traffic(self):
+    def test_basic_traffic(self):
         """Tests sending traffic through a loadbalancer
 
         * Create a fully populated loadbalancer.
@@ -135,7 +135,7 @@
 
         member1 = self.mem_member_client.create_member(
             **member1_kwargs)
-        self.addClassResourceCleanup(
+        self.addCleanup(
             self.mem_member_client.cleanup_member,
             member1[const.ID], pool_id=self.pool_id,
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
@@ -159,7 +159,7 @@
 
         member2 = self.mem_member_client.create_member(
             **member2_kwargs)
-        self.addClassResourceCleanup(
+        self.addCleanup(
             self.mem_member_client.cleanup_member,
             member2[const.ID], pool_id=self.pool_id,
             lb_client=self.mem_lb_client, lb_id=self.lb_id)
@@ -171,3 +171,215 @@
 
         # Send some traffic
         self._check_members_balanced(self.lb_vip_address)
+
+    @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
+    def test_healthmonitor_traffic(self):
+        """Tests traffic is correctly routed based on healthmonitor status
+
+        * Create three members:
+          * One should be working, and ONLINE with a healthmonitor (passing)
+          * One should be working, and ERROR with a healthmonitor (failing)
+          * One should be disabled, and OFFLINE with a healthmonitor
+        * Verify members are in their correct respective operating statuses.
+        * Verify that traffic is balanced evenly between the working members.
+        * Create a fully populated healthmonitor.
+        * Verify members are in their correct respective operating statuses.
+        * Verify that traffic is balanced *unevenly*.
+        * Delete the healthmonitor.
+        * Verify members are in their correct respective operating statuses.
+        * Verify that traffic is balanced evenly between the working members.
+        """
+        member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
+        member1_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        member1_id = member1[const.ID]
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1_id, pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-hm-traffic")
+        member2_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+            const.MONITOR_PORT: 9999,  # We want this to go offline with a HM
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(
+            **member2_kwargs)
+        member2_id = member2[const.ID]
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2_id, pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 3 as a non-existent disabled node
+        member3_name = data_utils.rand_name("lb_member_member3-hm-traffic")
+        member3_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member3_name,
+            const.ADMIN_STATE_UP: False,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 80,
+        }
+
+        member3 = self.mem_member_client.create_member(
+            **member3_kwargs)
+        member3_id = member3[const.ID]
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member3_id, pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Wait for members to adjust to the correct OPERATING_STATUS
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member1_id, const.OPERATING_STATUS,
+            const.NO_MONITOR,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member2_id, const.OPERATING_STATUS,
+            const.NO_MONITOR,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member3_id, const.OPERATING_STATUS,
+            const.OFFLINE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        # Send some traffic and verify it is balanced
+        self._check_members_balanced(self.lb_vip_address,
+                                     traffic_member_count=2)
+
+        # Create the healthmonitor
+        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
+        hm_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTP,
+            const.DELAY: 2,
+            const.TIMEOUT: 2,
+            const.MAX_RETRIES: 2,
+            const.MAX_RETRIES_DOWN: 2,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200',
+            const.ADMIN_STATE_UP: True,
+        }
+
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Wait for members to adjust to the correct OPERATING_STATUS
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member1_id, const.OPERATING_STATUS,
+            const.ONLINE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member2_id, const.OPERATING_STATUS,
+            const.ERROR,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member3_id, const.OPERATING_STATUS,
+            const.OFFLINE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        # Send some traffic and verify it is *unbalanced*, as expected
+        self._check_members_balanced(self.lb_vip_address,
+                                     traffic_member_count=1)
+
+        # Delete the healthmonitor
+        self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_healthmonitor_client.show_healthmonitor, hm[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Wait for members to adjust to the correct OPERATING_STATUS
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member1_id, const.OPERATING_STATUS,
+            const.NO_MONITOR,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member2_id, const.OPERATING_STATUS,
+            const.NO_MONITOR,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member3_id, const.OPERATING_STATUS,
+            const.OFFLINE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        # Send some traffic and verify it is balanced again
+        self._check_members_balanced(self.lb_vip_address)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index ff4dce7..596e7fb 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -108,6 +108,8 @@
         cls.mem_listener_client = cls.os_roles_lb_member.listener_client
         cls.mem_pool_client = cls.os_roles_lb_member.pool_client
         cls.mem_member_client = cls.os_roles_lb_member.member_client
+        cls.mem_healthmonitor_client = (
+            cls.os_roles_lb_member.healthmonitor_client)
 
     @classmethod
     def resource_setup(cls):
@@ -353,7 +355,9 @@
                 cls.lb_member_2_ipv6_subnet['id'])
 
     @classmethod
-    def _setup_lb_network_kwargs(cls, lb_kwargs, ip_version):
+    def _setup_lb_network_kwargs(cls, lb_kwargs, ip_version=None):
+        if not ip_version:
+            ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
         if cls.lb_member_vip_subnet:
             ip_index = data_utils.rand_int_id(start=10, end=100)
             if ip_version == 4:
@@ -711,7 +715,7 @@
                   'period. Failing test.')
         raise Exception()
 
-    def _check_members_balanced(self, vip_address):
+    def _check_members_balanced(self, vip_address, traffic_member_count=2):
         session = requests.Session()
         response_counts = {}
 
@@ -734,7 +738,7 @@
 
         LOG.debug('Loadbalancer response totals: %s', response_counts)
         # Ensure the correct number of members
-        self.assertEqual(2, len(response_counts))
+        self.assertEqual(traffic_member_count, len(response_counts))
 
         # Ensure both members got the same number of responses
         self.assertEqual(1, len(set(response_counts.values())))