Create api+scenario tests for members

This patch implements member tests for the Octavia
Tempest Plugin.

Depends-On: https://review.openstack.org/#/c/567955/
Change-Id: I8813f90538b9ec9047a1777b6cab76485a150498
Story: 2001387
Task: 5970
diff --git a/octavia_tempest_plugin/clients.py b/octavia_tempest_plugin/clients.py
index 27ebb89..7b766be 100644
--- a/octavia_tempest_plugin/clients.py
+++ b/octavia_tempest_plugin/clients.py
@@ -20,6 +20,8 @@
 from octavia_tempest_plugin.services.load_balancer.v2 import (
     loadbalancer_client)
 from octavia_tempest_plugin.services.load_balancer.v2 import (
+    member_client)
+from octavia_tempest_plugin.services.load_balancer.v2 import (
     pool_client)
 
 CONF = config.CONF
@@ -37,3 +39,5 @@
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
         self.pool_client = pool_client.PoolClient(
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+        self.member_client = member_client.MemberClient(
+            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 8ca0138..cf38c5a 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -61,6 +61,14 @@
 LISTENER_ID = 'listener_id'
 LOADBALANCERS = 'loadbalancers'
 
+POOL_ID = 'pool_id'
+ADDRESS = 'address'
+WEIGHT = 'weight'
+BACKUP = 'backup'
+SUBNET_ID = 'subnet_id'
+MONITOR_ADDRESS = 'monitor_address'
+MONITOR_PORT = 'monitor_port'
+
 # API valid fields
 SHOW_LOAD_BALANCER_RESPONSE_FIELDS = (
     ADMIN_STATE_UP, CREATED_AT, DESCRIPTION, FLAVOR_ID, ID, LISTENERS, NAME,
@@ -83,6 +91,11 @@
     CREATED_AT, UPDATED_AT
 )
 
+SHOW_MEMBER_RESPONSE_FIELDS = (
+    ID, NAME, PROVISIONING_STATUS, OPERATING_STATUS, ADMIN_STATE_UP,
+    ADDRESS, PROTOCOL_PORT, WEIGHT, BACKUP, MONITOR_PORT, MONITOR_ADDRESS
+)
+
 # Other constants
 ACTIVE = 'ACTIVE'
 ADMIN_STATE_UP_TRUE = 'true'
@@ -92,6 +105,7 @@
 FIELDS = 'fields'
 OFFLINE = 'OFFLINE'
 ONLINE = 'ONLINE'
+NO_MONITOR = 'NO_MONITOR'
 SORT = 'sort'
 
 # Protocols
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
index 46ae997..f2aaf84 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
@@ -52,9 +52,12 @@
         self.uri = self.base_uri.format(object=self.list_root_tag)
         # Create a method for each object's cleanup
         # This method should be used (rather than delete) for tempest cleanups.
-        setattr(self, 'cleanup_{}'.format(self.root_tag), self._cleanup_obj)
+        cleanup_func_name = 'cleanup_{}'.format(self.root_tag)
+        if not hasattr(self, cleanup_func_name):
+            setattr(self, cleanup_func_name, self._cleanup_obj)
 
-    def _create_object(self, return_object_only=True, **kwargs):
+    def _create_object(self, parent_id=None, return_object_only=True,
+                       **kwargs):
         """Create an object.
 
         :param return_object_only: If True, the response returns the object
@@ -90,14 +93,21 @@
         :returns: An appropriate object.
         """
         obj_dict = {self.root_tag: kwargs}
-        response, body = self.post(self.uri, json.dumps(obj_dict))
+
+        if parent_id:
+            request_uri = self.uri.format(parent=parent_id)
+        else:
+            request_uri = self.uri
+
+        response, body = self.post(request_uri, json.dumps(obj_dict))
         self.expected_success(201, response.status)
         if return_object_only:
             return json.loads(body.decode('utf-8'))[self.root_tag]
         else:
             return json.loads(body.decode('utf-8'))
 
-    def _show_object(self, obj_id, query_params=None, return_object_only=True):
+    def _show_object(self, obj_id, parent_id=None, query_params=None,
+                     return_object_only=True):
         """Get object details.
 
         :param obj_id: The object ID to query.
@@ -133,10 +143,15 @@
                                      couldn't be parsed
         :returns: An appropriate object.
         """
-        if query_params:
-            request_uri = '{0}/{1}?{2}'.format(self.uri, obj_id, query_params)
+        if parent_id:
+            uri = self.uri.format(parent=parent_id)
         else:
-            request_uri = '{0}/{1}'.format(self.uri, obj_id)
+            uri = self.uri
+
+        if query_params:
+            request_uri = '{0}/{1}?{2}'.format(uri, obj_id, query_params)
+        else:
+            request_uri = '{0}/{1}'.format(uri, obj_id)
 
         response, body = self.get(request_uri)
         self.expected_success(200, response.status)
@@ -145,7 +160,8 @@
         else:
             return json.loads(body.decode('utf-8'))
 
-    def _list_objects(self, query_params=None, return_object_only=True):
+    def _list_objects(self, parent_id=None, query_params=None,
+                      return_object_only=True):
         """Get a list of the appropriate objects.
 
         :param query_params: The optional query parameters to append to the
@@ -180,10 +196,15 @@
                                      couldn't be parsed
         :returns: A list of appropriate objects.
         """
-        if query_params:
-            request_uri = '{0}?{1}'.format(self.uri, query_params)
+        if parent_id:
+            uri = self.uri.format(parent=parent_id)
         else:
-            request_uri = self.uri
+            uri = self.uri
+
+        if query_params:
+            request_uri = '{0}?{1}'.format(uri, query_params)
+        else:
+            request_uri = uri
         response, body = self.get(request_uri)
         self.expected_success(200, response.status)
         if return_object_only:
@@ -191,10 +212,12 @@
         else:
             return json.loads(body.decode('utf-8'))
 
-    def _update_object(self, obj_id, return_object_only=True, **kwargs):
+    def _update_object(self, obj_id, parent_id=None, return_object_only=True,
+                       **kwargs):
         """Update an object.
 
         :param obj_id: The object ID to update.
+        :param parent_id: The parent object ID, if applicable.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
@@ -228,15 +251,22 @@
         :returns: An appropriate object.
         """
         obj_dict = {self.root_tag: kwargs}
-        uri = '{0}/{1}'.format(self.uri, obj_id)
-        response, body = self.put(uri, json.dumps(obj_dict))
+
+        if parent_id:
+            uri = self.uri.format(parent=parent_id)
+        else:
+            uri = self.uri
+
+        request_uri = '{0}/{1}'.format(uri, obj_id)
+        response, body = self.put(request_uri, json.dumps(obj_dict))
         self.expected_success(200, response.status)
         if return_object_only:
             return json.loads(body.decode('utf-8'))[self.root_tag]
         else:
             return json.loads(body.decode('utf-8'))
 
-    def _delete_obj(self, obj_id, ignore_errors=False, cascade=False):
+    def _delete_obj(self, obj_id, parent_id=None, ignore_errors=False,
+                    cascade=False):
         """Delete an object.
 
         :param obj_id: The object ID to delete.
@@ -271,22 +301,27 @@
         :returns: None if ignore_errors is True, the response status code
                   if not.
         """
-        if cascade:
-            uri = '{0}/{1}?cascade=true'.format(self.uri, obj_id)
+        if parent_id:
+            uri = self.uri.format(parent=parent_id)
         else:
-            uri = '{0}/{1}'.format(self.uri, obj_id)
+            uri = self.uri
+
+        if cascade:
+            request_uri = '{0}/{1}?cascade=true'.format(uri, obj_id)
+        else:
+            request_uri = '{0}/{1}'.format(uri, obj_id)
         if ignore_errors:
             try:
-                response, body = self.delete(uri)
+                response, body = self.delete(request_uri)
             except Exception:
                 return
         else:
-            response, body = self.delete(uri)
+            response, body = self.delete(request_uri)
 
         self.expected_success(204, response.status)
         return response.status
 
-    def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None):
+    def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None):
         """Clean up an object (for use in tempest addClassResourceCleanup).
 
         We always need to wait for the parent LB to be in a mutable state
@@ -296,9 +331,13 @@
         tempest will delete the first one and then immediately try to delete
         the second one, which will fail because the LB will be immutable.
 
+        We also need to wait to return until the parent LB is back in a mutable
+        state so future tests don't break right at the start.
+
         This function:
         * Waits until the parent LB is ACTIVE
         * Deletes the object
+        * Waits until the parent LB is ACTIVE
 
         :param obj_id: The object ID to clean up.
         :param lb_client: (Optional) The loadbalancer client, if this isn't the
@@ -307,6 +346,11 @@
                       obj_id is for a sub-object and not a loadbalancer.
         :return:
         """
+        if parent_id:
+            uri = self.uri.format(parent=parent_id)
+        else:
+            uri = self.uri
+
         if lb_client and lb_id:
             wait_id = lb_id
             wait_client = lb_client
@@ -343,10 +387,20 @@
             LOG.error("Cleanup encountered an unknown exception while waiting "
                       "for %s %s: %s", wait_client.root_tag, wait_id, e)
 
-        uri = '{0}/{1}'.format(self.uri, obj_id)
+        uri = '{0}/{1}'.format(uri, obj_id)
         LOG.info("Cleaning up %s %s...", self.root_tag, obj_id)
         return_status = test_utils.call_and_ignore_notfound_exc(
             self.delete, uri)
+
+        if lb_id and lb_client:
+            LOG.info("Waiting for %s %s to be ACTIVE...",
+                     wait_client.root_tag, wait_id)
+            waiters.wait_for_status(wait_func, wait_id,
+                                    const.PROVISIONING_STATUS,
+                                    const.ACTIVE,
+                                    self.build_interval,
+                                    self.timeout)
+
         LOG.info("Cleanup complete for %s %s...", self.root_tag, obj_id)
         return return_status
 
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
index 73d2cfb..082f4b7 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
@@ -257,7 +257,7 @@
         return self._update_object(**kwargs)
 
     def delete_listener(self, listener_id, ignore_errors=False):
-        """Delete an object.
+        """Delete a listener.
 
         :param listener_id: The listener ID to delete.
         :param ignore_errors: True if errors should be ignored.
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py b/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py
index 6fb7e69..6ea83f0 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/loadbalancer_client.py
@@ -46,10 +46,10 @@
         :param description: A human-readable description for the resource.
         :param admin_state_up: The administrative state of the resource, which
                                is up (true) or down (false).
-        :param flavor: The load balancer flavor ID.
+        :param flavor: The loadbalancer flavor ID.
         :param listeners: A list of listner dictionaries.
         :param project_id: The ID of the project owning this resource.
-        :param provider: Provider name for the load balancer.
+        :param provider: Provider name for the loadbalancer.
         :param vip_address: The IP address of the Virtual IP (VIP).
         :param vip_network_id: The ID of the network for the Virtual IP (VIP).
         :param vip_port_id: The ID of the Virtual IP (VIP) port.
@@ -217,7 +217,7 @@
         return self._update_object(**kwargs)
 
     def delete_loadbalancer(self, lb_id, cascade=False, ignore_errors=False):
-        """Delete an object.
+        """Delete a loadbalancer.
 
         :param lb_id: The loadbalancer ID to delete.
         :param ignore_errors: True if errors should be ignored.
@@ -256,9 +256,9 @@
                                 cascade=cascade)
 
     def failover_loadbalancer(self, lb_id):
-        """Failover a load balancer.
+        """Failover a loadbalancer.
 
-        :param lb_id: The load balancer ID to query.
+        :param lb_id: The loadbalancer ID to query.
         :raises AssertionError: if the expected_code isn't a valid http success
                                 response code
         :raises BadRequest: If a 400 response code is received
@@ -293,9 +293,9 @@
 
     def get_loadbalancer_stats(self, lb_id, query_params=None,
                                return_object_only=True):
-        """Get load balancer statistics.
+        """Get loadbalancer statistics.
 
-        :param lb_id: The load balancer ID to query.
+        :param lb_id: The loadbalancer ID to query.
         :param query_params: The optional query parameters to append to the
                              request. Ex. fields=id&fields=name
         :param return_object_only: If True, the response returns the object
@@ -326,7 +326,7 @@
                                         of the handled checks
         :raises UnprocessableEntity: If a 422 response code is received and
                                      couldn't be parsed
-        :returns: A load balancer statistics object.
+        :returns: A loadbalancer statistics object.
         """
         if query_params:
             request_uri = '{0}/{1}/stats?{2}'.format(self.uri, lb_id,
@@ -343,9 +343,9 @@
 
     def get_loadbalancer_status(self, lb_id, query_params=None,
                                 return_object_only=True):
-        """Get a load balancer status tree.
+        """Get a loadbalancer status tree.
 
-        :param lb_id: The load balancer ID to query.
+        :param lb_id: The loadbalancer ID to query.
         :param query_params: The optional query parameters to append to the
                              request. Ex. fields=id&fields=name
         :param return_object_only: If True, the response returns the object
@@ -376,7 +376,7 @@
                                         of the handled checks
         :raises UnprocessableEntity: If a 422 response code is received and
                                      couldn't be parsed
-        :returns: A load balancer statuses object.
+        :returns: A loadbalancer statuses object.
         """
         if query_params:
             request_uri = '{0}/{1}/status?{2}'.format(self.uri, lb_id,
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/member_client.py b/octavia_tempest_plugin/services/load_balancer/v2/member_client.py
new file mode 100644
index 0000000..3909ac2
--- /dev/null
+++ b/octavia_tempest_plugin/services/load_balancer/v2/member_client.py
@@ -0,0 +1,325 @@
+#   Copyright 2018 GoDaddy
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+import json
+
+from oslo_log import log as logging
+from tempest import config
+
+from octavia_tempest_plugin.services.load_balancer.v2 import base_client
+from octavia_tempest_plugin.services.load_balancer.v2 import pool_client
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+Unset = base_client.Unset
+
+
+class MemberClient(base_client.BaseLBaaSClient):
+
+    root_tag = 'member'
+    list_root_tag = 'members'
+
+    def __init__(self, *args, **kwargs):
+        super(MemberClient, self).__init__(*args, **kwargs)
+        pool_list_root_tag = pool_client.PoolClient.list_root_tag
+        # /v2.0/lbaas/pools/<POOL_UUID>/members
+        self.uri = "{pool_base_uri}/{parent}/{object}".format(
+            pool_base_uri=self.base_uri.format(object=pool_list_root_tag),
+            parent="{parent}",
+            object=self.list_root_tag
+        )
+
+    def create_member(self, pool_id, address, protocol_port,
+                      name=Unset, admin_state_up=Unset, weight=Unset,
+                      backup=Unset, subnet_id=Unset, monitor_address=Unset,
+                      monitor_port=Unset, return_object_only=True):
+        """Create a member.
+
+        :param pool_id: The ID of the pool where the member will live.
+        :param address: The IP address of the resource.
+        :param protocol_port: The protocol port number for the resource.
+        :param name: Human-readable name of the resource.
+        :param admin_state_up: The administrative state of the resource, which
+                               is up (true) or down (false).
+        :param weight: The weight of a member determines the portion of
+                       requests or connections it services compared to the
+                       other members of the pool.
+        :param backup: Is the member a backup?
+        :param monitor_address: An alternate IP address used for health
+                                monitoring a backend member.
+        :param monitor_port: An alternate protocol port used for health
+                             monitoring a backend member.
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A member object.
+        """
+        kwargs = {arg: value for arg, value in locals().items()
+                  if arg != 'self' and value is not Unset}
+        kwargs['parent_id'] = kwargs.pop('pool_id')
+        return self._create_object(**kwargs)
+
+    def show_member(self, member_id, pool_id, query_params=None,
+                    return_object_only=True):
+        """Get member details.
+
+        :param member_id: The member ID to query.
+        :param pool_id: The ID of the pool where the member lives.
+        :param query_params: The optional query parameters to append to the
+                             request. Ex. fields=id&fields=name
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A member object.
+        """
+        return self._show_object(obj_id=member_id,
+                                 parent_id=pool_id,
+                                 query_params=query_params,
+                                 return_object_only=return_object_only)
+
+    def list_members(self, pool_id, query_params=None,
+                     return_object_only=True):
+        """Get a list of member objects.
+
+        :param pool_id: The ID of the pool where the members live.
+        :param query_params: The optional query parameters to append to the
+                             request. Ex. fields=id&fields=name
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A list of member objects.
+        """
+        return self._list_objects(parent_id=pool_id,
+                                  query_params=query_params,
+                                  return_object_only=return_object_only)
+
+    def update_member(self, member_id, pool_id, name=Unset,
+                      admin_state_up=Unset, weight=Unset, backup=Unset,
+                      monitor_address=Unset, monitor_port=Unset,
+                      return_object_only=True):
+        """Update a member.
+
+        :param member_id: The member ID to update.
+        :param pool_id: The ID of the pool where the member lives.
+        :param name: Human-readable name of the resource.
+        :param admin_state_up: The administrative state of the resource, which
+                               is up (true) or down (false).
+        :param weight: The weight of a member determines the portion of
+                       requests or connections it services compared to the
+                       other members of the pool.
+        :param backup: Is the member a backup?
+        :param monitor_address: An alternate IP address used for health
+                                monitoring a backend member.
+        :param monitor_port: An alternate protocol port used for health
+                             monitoring a backend member.
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A member object.
+        """
+        kwargs = {arg: value for arg, value in locals().items()
+                  if arg != 'self' and value is not Unset}
+        kwargs['obj_id'] = kwargs.pop('member_id')
+        kwargs['parent_id'] = kwargs.pop('pool_id')
+        return self._update_object(**kwargs)
+
+    def update_members(self, pool_id, members_list):
+        """Batch update all members on a pool.
+
+        :param pool_id: The ID of the pool where the members live.
+        :param members_list: The list of members to enforce on the pool.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A member object.
+        """
+        obj_dict = {self.list_root_tag: members_list}
+        request_uri = self.uri.format(parent=pool_id)
+
+        response, body = self.put(request_uri, json.dumps(obj_dict))
+        self.expected_success(202, response.status)
+        return
+
+    def delete_member(self, member_id, pool_id, ignore_errors=False):
+        """Delete a member.
+
+        :param member_id: The member ID to delete.
+        :param pool_id: The ID of the pool where the member lives.
+        :param ignore_errors: True if errors should be ignored.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: None if ignore_errors is True, the response status code
+                  if not.
+        """
+        return self._delete_obj(obj_id=member_id,
+                                parent_id=pool_id,
+                                ignore_errors=ignore_errors)
+
+    def cleanup_member(self, member_id, pool_id, lb_client=None, lb_id=None):
+        kwargs = {arg: value for arg, value in locals().items()
+                  if arg != 'self' and value is not Unset}
+        kwargs['obj_id'] = kwargs.pop('member_id')
+        kwargs['parent_id'] = kwargs.pop('pool_id')
+        return self._cleanup_obj(**kwargs)
+
+    def is_resource_deleted(self, id):
+        # Trying to implement this for members would be impossible, because
+        # they are sub-objects that can't be referenced directly, and this is
+        # used internally in tempest where we have no control over passed args
+        raise NotImplementedError()
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
index 73ca3c9..46ec38d 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
@@ -205,7 +205,7 @@
         return self._update_object(**kwargs)
 
     def delete_pool(self, pool_id, ignore_errors=False):
-        """Delete an object.
+        """Delete a pool.
 
         :param pool_id: The pool ID to delete.
         :param ignore_errors: True if errors should be ignored.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 26da704..4f83192 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -51,7 +51,7 @@
         """Tests load balancer create and basic show APIs.
 
         * Tests that users without the load balancer member role cannot
-        *   create load balancers.
+          create load balancers.
         * Create a fully populated load balancer.
         * Show load balancer details.
         * Validate the show reflects the requested values.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
new file mode 100644
index 0000000..4b556c7
--- /dev/null
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -0,0 +1,860 @@
+# Copyright 2018 GoDaddy
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+import time
+from uuid import UUID
+
+from dateutil import parser
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class MemberAPITest(test_base.LoadBalancerBaseTest):
+    """Test the member object API."""
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(MemberAPITest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_member")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
+        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        listener_name = data_utils.rand_name("lb_member_listener1_member")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '80',
+            const.LOADBALANCER_ID: cls.lb_id,
+        }
+        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+        cls.listener_id = listener[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1_member")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LISTENER_ID: cls.listener_id,
+        }
+
+        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+        cls.pool_id = pool[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+    # Note: This test also covers basic member show API
+    @decorators.idempotent_id('0623aa1f-753d-44e7-afa1-017d274eace7')
+    def test_member_ipv4_create(self):
+        self._test_member_create(4)
+
+    # Note: This test also covers basic member show API
+    @decorators.idempotent_id('141944cc-5e2c-4e83-88f8-f61a6797c9b7')
+    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
+                          'IPv6 testing is disabled')
+    def test_member_ipv6_create(self):
+        self._test_member_create(6)
+
+    def _test_member_create(self, ip_version):
+        """Tests member create and basic show APIs.
+
+        * Tests that users without the loadbalancer member role cannot
+          create members.
+        * Create a fully populated member.
+        * Show member details.
+        * Validate the show reflects the requested values.
+        """
+        if ip_version == 4:
+            member_address = '192.0.2.1'
+            member_monitor_address = '192.0.2.2'
+        else:
+            member_address = '2001:db8:0:0:0:0:0:1'
+            member_monitor_address = '2001:db8:0:0:0:0:0:2'
+
+        member_name = data_utils.rand_name("lb_member_member1-create")
+        member_kwargs = {
+            const.NAME: member_name,
+            const.ADMIN_STATE_UP: True,
+            const.POOL_ID: self.pool_id,
+            const.ADDRESS: member_address,
+            const.PROTOCOL_PORT: 80,
+            const.WEIGHT: 50,
+            const.BACKUP: False,
+            const.MONITOR_ADDRESS: member_monitor_address,
+            const.MONITOR_PORT: 8080,
+        }
+        if self.lb_member_vip_subnet:
+            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        # Test that a user without the load balancer role cannot
+        # create a member
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.create_member,
+                **member_kwargs)
+
+        member = self.mem_member_client.create_member(**member_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        parser.parse(member[const.CREATED_AT])
+        parser.parse(member[const.UPDATED_AT])
+        UUID(member[const.ID])
+        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
+                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+    @decorators.idempotent_id('9ce7ad78-915b-42ce-b0d8-44d88a929f3d')
+    def test_member_list(self):
+        """Tests member list API and field filtering.
+
+        * Create a clean pool.
+        * Create three members.
+        * Validates that other accounts cannot list the members.
+        * List the members using the default sort order.
+        * List the members using descending sort order.
+        * List the members using ascending sort order.
+        * List the members returning one field at a time.
+        * List the members returning two fields.
+        * List the members filtering to one of the three.
+        * List the members filtered, one field, and sorted.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool2_member-list")
+        pool = self.mem_pool_client.create_pool(
+            name=pool_name, loadbalancer_id=self.lb_id,
+            protocol=const.HTTP, lb_algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
+        pool_id = pool[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool, pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        member1_name = data_utils.rand_name("lb_member_member2-list")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 101,
+        }
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        member1 = waiters.wait_for_status(
+            self.mem_member_client.show_member, member1[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=pool_id)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+        # Time resolution for created_at is only to the second, and we need to
+        # ensure that each object has a distinct creation time. Delaying one
+        # second is both a simple and a reliable way to accomplish this.
+        time.sleep(1)
+
+        member2_name = data_utils.rand_name("lb_member_member1-list")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 100,
+        }
+        member2 = self.mem_member_client.create_member(
+            **member2_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        member2 = waiters.wait_for_status(
+            self.mem_member_client.show_member, member2[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=pool_id)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+        # Time resolution for created_at is only to the second, and we need to
+        # ensure that each object has a distinct creation time. Delaying one
+        # second is both a simple and a reliable way to accomplish this.
+        time.sleep(1)
+
+        member3_name = data_utils.rand_name("lb_member_member3-list")
+        member3_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member3_name,
+            const.ADMIN_STATE_UP: False,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 102,
+        }
+        member3 = self.mem_member_client.create_member(
+            **member3_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member3[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        member3 = waiters.wait_for_status(
+            self.mem_member_client.show_member, member3[const.ID],
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=pool_id)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+
+        # Test that a different user cannot list members
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.member_client
+            self.assertRaises(
+                exceptions.Forbidden,
+                member2_client.list_members,
+                pool_id)
+
+        # Test that a user without the lb member role cannot list load
+        # balancers
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.list_members,
+                pool_id)
+
+        # Check the default sort order, created_at
+        members = self.mem_member_client.list_members(pool_id)
+        self.assertEqual(member1[const.PROTOCOL_PORT],
+                         members[0][const.PROTOCOL_PORT])
+        self.assertEqual(member2[const.PROTOCOL_PORT],
+                         members[1][const.PROTOCOL_PORT])
+        self.assertEqual(member3[const.PROTOCOL_PORT],
+                         members[2][const.PROTOCOL_PORT])
+
+        # Test sort descending by protocol_port
+        members = self.mem_member_client.list_members(
+            pool_id, query_params='{sort}={descr}:{desc}'.format(
+                sort=const.SORT, descr=const.PROTOCOL_PORT, desc=const.DESC))
+        self.assertEqual(member1[const.PROTOCOL_PORT],
+                         members[1][const.PROTOCOL_PORT])
+        self.assertEqual(member2[const.PROTOCOL_PORT],
+                         members[2][const.PROTOCOL_PORT])
+        self.assertEqual(member3[const.PROTOCOL_PORT],
+                         members[0][const.PROTOCOL_PORT])
+
+        # Test sort ascending by protocol_port
+        members = self.mem_member_client.list_members(
+            pool_id, query_params='{sort}={descr}:{asc}'.format(
+                sort=const.SORT, descr=const.PROTOCOL_PORT, asc=const.ASC))
+        self.assertEqual(member1[const.PROTOCOL_PORT],
+                         members[1][const.PROTOCOL_PORT])
+        self.assertEqual(member2[const.PROTOCOL_PORT],
+                         members[0][const.PROTOCOL_PORT])
+        self.assertEqual(member3[const.PROTOCOL_PORT],
+                         members[2][const.PROTOCOL_PORT])
+
+        # Test fields
+        for field in const.SHOW_MEMBER_RESPONSE_FIELDS:
+            members = self.mem_member_client.list_members(
+                pool_id, query_params='{fields}={field}'.format(
+                    fields=const.FIELDS, field=field))
+            self.assertEqual(1, len(members[0]))
+            self.assertEqual(member1[field], members[0][field])
+            self.assertEqual(member2[field], members[1][field])
+            self.assertEqual(member3[field], members[2][field])
+
+        # Test multiple fields at the same time
+        members = self.mem_member_client.list_members(
+            pool_id,
+            query_params='{fields}={admin}&{fields}={created}'.format(
+                fields=const.FIELDS, admin=const.ADMIN_STATE_UP,
+                created=const.CREATED_AT))
+        self.assertEqual(2, len(members[0]))
+        self.assertTrue(members[0][const.ADMIN_STATE_UP])
+        parser.parse(members[0][const.CREATED_AT])
+        self.assertTrue(members[1][const.ADMIN_STATE_UP])
+        parser.parse(members[1][const.CREATED_AT])
+        self.assertFalse(members[2][const.ADMIN_STATE_UP])
+        parser.parse(members[2][const.CREATED_AT])
+
+        # Test filtering
+        members = self.mem_member_client.list_members(
+            pool_id,
+            query_params='{desc}={lb_desc}'.format(
+                desc=const.PROTOCOL_PORT,
+                lb_desc=member2[const.PROTOCOL_PORT]))
+        self.assertEqual(1, len(members))
+        self.assertEqual(member2[const.PROTOCOL_PORT],
+                         members[0][const.PROTOCOL_PORT])
+
+        # Test combined params
+        members = self.mem_member_client.list_members(
+            pool_id,
+            query_params='{admin}={true}&'
+                         '{fields}={descr}&{fields}={id}&'
+                         '{sort}={descr}:{desc}'.format(
+                             admin=const.ADMIN_STATE_UP,
+                             true=const.ADMIN_STATE_UP_TRUE,
+                             fields=const.FIELDS, descr=const.PROTOCOL_PORT,
+                             id=const.ID, sort=const.SORT, desc=const.DESC))
+        # Should get two members
+        self.assertEqual(2, len(members))
+        # members should have two fields
+        self.assertEqual(2, len(members[0]))
+        # Should be in descending order
+        self.assertEqual(member2[const.PROTOCOL_PORT],
+                         members[1][const.PROTOCOL_PORT])
+        self.assertEqual(member1[const.PROTOCOL_PORT],
+                         members[0][const.PROTOCOL_PORT])
+
+    @decorators.idempotent_id('7674ae04-7e92-44ef-9adf-40718d7ec705')
+    def test_member_show(self):
+        """Tests member show API.
+
+        * Create a fully populated member.
+        * Show member details.
+        * Validate the show reflects the requested values.
+        * Validates that other accounts cannot see the member.
+        """
+        member_name = data_utils.rand_name("lb_member_member1-show")
+        member_kwargs = {
+            const.NAME: member_name,
+            const.ADMIN_STATE_UP: True,
+            const.POOL_ID: self.pool_id,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 81,
+            const.WEIGHT: 50,
+            const.BACKUP: False,
+            const.MONITOR_ADDRESS: '192.0.2.2',
+            const.MONITOR_PORT: 8080,
+        }
+        if self.lb_member_vip_subnet:
+            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member = self.mem_member_client.create_member(**member_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        parser.parse(member[const.CREATED_AT])
+        parser.parse(member[const.UPDATED_AT])
+        UUID(member[const.ID])
+        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
+                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+        # Test that a user with lb_admin role can see the member
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            member_client = self.os_roles_lb_admin.member_client
+            member_adm = member_client.show_member(
+                member[const.ID], pool_id=self.pool_id)
+            self.assertEqual(member_name, member_adm[const.NAME])
+
+        # Test that a user with cloud admin role can see the member
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            adm = self.os_admin.member_client.show_member(
+                member[const.ID], pool_id=self.pool_id)
+            self.assertEqual(member_name, adm[const.NAME])
+
+        # Test that a different user, with load balancer member role, cannot
+        # see this member
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.member_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.show_member,
+                              member[const.ID], pool_id=self.pool_id)
+
+        # Test that a user, without the load balancer member role, cannot
+        # show members
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.show_member,
+                member[const.ID], pool_id=self.pool_id)
+
+    @decorators.idempotent_id('c07572b8-e853-48f3-a8ea-37fc293a4724')
+    def test_member_update(self):
+        """Tests member show API and field filtering.
+
+        * Create a fully populated member.
+        * Show member details.
+        * Validate the show reflects the initial values.
+        * Validates that other accounts cannot update the member.
+        * Update the member details.
+        * Show member details.
+        * Validate the show reflects the initial values.
+        """
+        member_name = data_utils.rand_name("lb_member_member1-update")
+        member_kwargs = {
+            const.NAME: member_name,
+            const.ADMIN_STATE_UP: False,
+            const.POOL_ID: self.pool_id,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 82,
+            const.WEIGHT: 50,
+            const.BACKUP: False,
+            const.MONITOR_ADDRESS: '192.0.2.2',
+            const.MONITOR_PORT: 8080,
+        }
+        if self.lb_member_vip_subnet:
+            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member = self.mem_member_client.create_member(**member_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        if not CONF.load_balancer.test_with_noop:
+            member = waiters.wait_for_status(
+                self.mem_member_client.show_member,
+                member[const.ID], const.OPERATING_STATUS,
+                const.OFFLINE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout,
+                pool_id=self.pool_id)
+
+        parser.parse(member[const.CREATED_AT])
+        parser.parse(member[const.UPDATED_AT])
+        UUID(member[const.ID])
+
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
+                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+        if CONF.load_balancer.test_with_noop:
+            # Operating status with noop will stay in NO_MONITOR
+            self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+        else:
+            # Operating status will be OFFLINE while admin_state_up = False
+            self.assertEqual(const.OFFLINE, member[const.OPERATING_STATUS])
+
+        # Test that a user, without the load balancer member role, cannot
+        # use this command
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.update_member,
+                member[const.ID], pool_id=self.pool_id, admin_state_up=True)
+
+        # Assert we didn't go into PENDING_*
+        member_check = self.mem_member_client.show_member(
+            member[const.ID], pool_id=self.pool_id)
+        self.assertEqual(const.ACTIVE,
+                         member_check[const.PROVISIONING_STATUS])
+        self.assertEqual(member_kwargs[const.ADMIN_STATE_UP],
+                         member_check[const.ADMIN_STATE_UP])
+
+        # Test that a user, without the load balancer member role, cannot
+        # update this member
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.member_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.update_member,
+                              member[const.ID], pool_id=self.pool_id,
+                              admin_state_up=True)
+
+        # Assert we didn't go into PENDING_*
+        member_check = self.mem_member_client.show_member(
+            member[const.ID], pool_id=self.pool_id)
+        self.assertEqual(const.ACTIVE,
+                         member_check[const.PROVISIONING_STATUS])
+        self.assertEqual(member_kwargs[const.ADMIN_STATE_UP],
+                         member_check[const.ADMIN_STATE_UP])
+
+        new_name = data_utils.rand_name("lb_member_member1-UPDATED")
+        member_update_kwargs = {
+            const.POOL_ID: member_kwargs[const.POOL_ID],
+            const.NAME: new_name,
+            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
+            const.WEIGHT: member[const.WEIGHT] + 1,
+            const.BACKUP: not member[const.BACKUP],
+            const.MONITOR_ADDRESS: '192.0.2.3',
+            const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
+        }
+        member = self.mem_member_client.update_member(
+            member[const.ID], **member_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+        if not CONF.load_balancer.test_with_noop:
+            member = waiters.wait_for_status(
+                self.mem_member_client.show_member,
+                member[const.ID], const.OPERATING_STATUS,
+                const.NO_MONITOR,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout,
+                pool_id=self.pool_id)
+
+        # Operating status will be NO_MONITOR regardless of noop
+        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+        # Test changed items
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
+                       const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        for item in equal_items:
+            self.assertEqual(member_update_kwargs[item], member[item])
+
+        # Test unchanged items
+        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+    @decorators.idempotent_id('83e0a9f2-491f-46a8-b3ce-6969d70a4e9f')
+    def test_member_batch_update(self):
+        """Tests member batch update.
+
+        * Create two members.
+        * Batch update the members so one is deleted, created, and updated
+        * Validate the member list is correct.
+        """
+        pool_name = data_utils.rand_name("lb_member_pool3_member-batch")
+        pool = self.mem_pool_client.create_pool(
+            name=pool_name, loadbalancer_id=self.lb_id,
+            protocol=const.HTTP, lb_algorithm=const.LB_ALGORITHM_ROUND_ROBIN)
+        pool_id = pool[const.ID]
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool, pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        member1_name = data_utils.rand_name("lb_member_member1-batch")
+        member1_kwargs = {
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.POOL_ID: pool_id,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 80,
+            const.WEIGHT: 50,
+            const.BACKUP: False,
+            const.MONITOR_ADDRESS: '192.0.2.2',
+            const.MONITOR_PORT: 8080,
+        }
+        if self.lb_member_vip_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        member2_name = data_utils.rand_name("lb_member_member2-batch")
+        member2_kwargs = {
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.POOL_ID: pool_id,
+            const.ADDRESS: '192.0.2.3',
+            const.PROTOCOL_PORT: 81,
+            const.WEIGHT: 51,
+            const.BACKUP: True,
+            const.MONITOR_ADDRESS: '192.0.2.4',
+            const.MONITOR_PORT: 8081,
+        }
+        if self.lb_member_vip_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id,
+                                const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        member3_name = data_utils.rand_name("lb_member_member3-batch")
+        member3_kwargs = {
+            const.NAME: member3_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: '192.0.2.5',
+            const.PROTOCOL_PORT: 82,
+            const.WEIGHT: 52,
+            const.BACKUP: True,
+            const.MONITOR_ADDRESS: '192.0.2.6',
+            const.MONITOR_PORT: 8082,
+        }
+        if self.lb_member_vip_subnet:
+            member3_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member2_name_update = data_utils.rand_name("lb_member_member2-new")
+        member2_kwargs[const.NAME] = member2_name_update
+        member2_kwargs.pop(const.POOL_ID)
+        batch_update_list = [member2_kwargs, member3_kwargs]
+
+        # Test that a user, without the load balancer member role, cannot
+        # use this command
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.update_members,
+                pool_id=self.pool_id, members_list=batch_update_list)
+
+        # Assert we didn't go into PENDING_*
+        member_check = self.mem_member_client.show_member(
+            member2[const.ID], pool_id=pool_id)
+        self.assertEqual(const.ACTIVE, member_check[const.PROVISIONING_STATUS])
+        self.assertEqual(member2_name, member_check[const.NAME])
+
+        self.mem_member_client.update_members(
+            pool_id=pool_id, members_list=batch_update_list)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        members = self.mem_member_client.list_members(
+            pool_id,
+            query_params='{sort}={port}:{asc}'.format(
+                sort=const.SORT, port=const.PROTOCOL_PORT, asc=const.ASC))
+        for m in members:
+            self.addClassResourceCleanup(
+                self.mem_member_client.cleanup_member,
+                m[const.ID], pool_id=pool_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        # We should have two members: member2 and member3, in that order
+        self.assertEqual(2, len(members))
+        # Member2 is the same ID
+        self.assertEqual(member2[const.ID], members[0][const.ID])
+        # Member3 will have a different ID (not member1)
+        self.assertNotEqual(member1[const.ID], members[1][const.ID])
+
+        # Member2's name should be updated, and member3 should exist
+        self.assertEqual(member2_name_update, members[0][const.NAME])
+        self.assertEqual(member3_name, members[1][const.NAME])
+
+    @decorators.idempotent_id('f129ba5e-a16e-4178-924f-6a9c5b8b1589')
+    def test_member_delete(self):
+        """Tests member create and delete APIs.
+
+        * Creates a member.
+        * Validates that other accounts cannot delete the member
+        * Deletes the member.
+        * Validates the member is in the DELETED state.
+        """
+        member_name = data_utils.rand_name("lb_member_member1-delete")
+        member_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member_name,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 83,
+        }
+        member = self.mem_member_client.create_member(**member_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Test that a user without the load balancer role cannot
+        # delete this member
+        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+            self.assertRaises(
+                exceptions.Forbidden,
+                self.os_primary.member_client.delete_member,
+                member[const.ID], pool_id=self.pool_id)
+
+        # Test that a different user, with the load balancer member role
+        # cannot delete this member
+        if not CONF.load_balancer.RBAC_test_type == const.NONE:
+            member2_client = self.os_roles_lb_member2.member_client
+            self.assertRaises(exceptions.Forbidden,
+                              member2_client.delete_member,
+                              member[const.ID], pool_id=self.pool_id)
+
+        self.mem_member_client.delete_member(member[const.ID],
+                                             pool_id=self.pool_id)
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_member_client.show_member, member[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=self.pool_id)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
new file mode 100644
index 0000000..16610a8
--- /dev/null
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -0,0 +1,204 @@
+# Copyright 2018 GoDaddy
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from uuid import UUID
+
+from dateutil import parser
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class MemberScenarioTest(test_base.LoadBalancerBaseTest):
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(MemberScenarioTest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_member")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
+        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        listener_name = data_utils.rand_name("lb_member_listener1_member")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '80',
+            const.LOADBALANCER_ID: cls.lb_id,
+        }
+        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+        cls.listener_id = listener[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1_member")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LISTENER_ID: cls.listener_id,
+        }
+        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+        cls.pool_id = pool[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+    @decorators.idempotent_id('15c8c3e3-569c-4029-95df-a9f72049e267')
+    def test_member_CRUD(self):
+        """Tests member create, read, update, delete
+
+        * Create a fully populated member.
+        * Show member details.
+        * Update the member.
+        * Delete the member.
+        """
+        # Member create
+        member_name = data_utils.rand_name("lb_member_member1-CRUD")
+        member_kwargs = {
+            const.NAME: member_name,
+            const.ADMIN_STATE_UP: True,
+            const.POOL_ID: self.pool_id,
+            const.ADDRESS: '192.0.2.1',
+            const.PROTOCOL_PORT: 80,
+            const.WEIGHT: 50,
+            const.BACKUP: False,
+            const.MONITOR_ADDRESS: '192.0.2.2',
+            const.MONITOR_PORT: 8080,
+        }
+        if self.lb_member_vip_subnet:
+            member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
+                const.ID]
+
+        member = self.mem_member_client.create_member(**member_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        parser.parse(member[const.CREATED_AT])
+        parser.parse(member[const.UPDATED_AT])
+        UUID(member[const.ID])
+        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
+                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+        # Member update
+        new_name = data_utils.rand_name("lb_member_member1-update")
+        member_update_kwargs = {
+            const.POOL_ID: member_kwargs[const.POOL_ID],
+            const.NAME: new_name,
+            const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
+            const.WEIGHT: member[const.WEIGHT] + 1,
+            const.BACKUP: not member[const.BACKUP],
+            const.MONITOR_ADDRESS: '192.0.2.3',
+            const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
+        }
+        member = self.mem_member_client.update_member(
+            member[const.ID], **member_update_kwargs)
+
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout,
+            pool_id=self.pool_id)
+
+        # Test changed items
+        equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
+                       const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        for item in equal_items:
+            self.assertEqual(member_update_kwargs[item], member[item])
+
+        # Test unchanged items
+        equal_items = [const.ADDRESS, const.PROTOCOL_PORT]
+        if const.SUBNET_ID in member_kwargs:
+            equal_items.append(const.SUBNET_ID)
+        else:
+            self.assertIsNone(member.get(const.SUBNET_ID))
+
+        for item in equal_items:
+            self.assertEqual(member_kwargs[item], member[item])
+
+        # Member delete
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        self.mem_member_client.delete_member(member[const.ID],
+                                             pool_id=self.pool_id)
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_member_client.show_member, member[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=self.pool_id)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 9862e12..bacc9fd 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -105,6 +105,7 @@
         cls.mem_lb_client = cls.os_roles_lb_member.loadbalancer_client
         cls.mem_listener_client = cls.os_roles_lb_member.listener_client
         cls.mem_pool_client = cls.os_roles_lb_member.pool_client
+        cls.mem_member_client = cls.os_roles_lb_member.member_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/octavia_tempest_plugin/tests/waiters.py b/octavia_tempest_plugin/tests/waiters.py
index 6598fcc..5abb26e 100644
--- a/octavia_tempest_plugin/tests/waiters.py
+++ b/octavia_tempest_plugin/tests/waiters.py
@@ -27,7 +27,8 @@
 
 
 def wait_for_status(show_client, id, status_key, status,
-                    check_interval, check_timeout, root_tag=None):
+                    check_interval, check_timeout, root_tag=None,
+                    **kwargs):
     """Waits for an object to reach a specific status.
 
     :param show_client: The tempest service client show method.
@@ -51,11 +52,11 @@
     while True:
         if status == const.DELETED:
             try:
-                response = show_client(id)
+                response = show_client(id, **kwargs)
             except exceptions.NotFound:
                 return
         else:
-            response = show_client(id)
+            response = show_client(id, **kwargs)
 
         if root_tag:
             object_details = response[root_tag]
@@ -127,7 +128,7 @@
 
 def wait_for_deleted_status_or_not_found(
         show_client, id, status_key, check_interval, check_timeout,
-        root_tag=None):
+        root_tag=None, **kwargs):
     """Waits for an object to reach a DELETED status or be not found (404).
 
     :param show_client: The tempest service client show method.
@@ -149,7 +150,7 @@
              'found(404)'.format(name=show_client.__name__))
     while True:
         try:
-            response = show_client(id)
+            response = show_client(id, **kwargs)
         except exceptions.NotFound:
             return