merge lbaasv2, portsbinding, volumes v2; remove 'rackspace' refs; update docs
diff --git a/acceptance/openstack/blockstorage/v2/extensions/pkg.go b/acceptance/openstack/blockstorage/v2/extensions/pkg.go
new file mode 100644
index 0000000..89d906d
--- /dev/null
+++ b/acceptance/openstack/blockstorage/v2/extensions/pkg.go
@@ -0,0 +1,3 @@
+// The extensions package contains acceptance tests for the Openstack Cinder V2 extensions service.
+
+package extensions
diff --git a/acceptance/openstack/blockstorage/v2/extensions/volumeactions_test.go b/acceptance/openstack/blockstorage/v2/extensions/volumeactions_test.go
new file mode 100644
index 0000000..20a4597
--- /dev/null
+++ b/acceptance/openstack/blockstorage/v2/extensions/volumeactions_test.go
@@ -0,0 +1,149 @@
+// +build acceptance blockstorage
+
+package extensions
+
+import (
+	"os"
+	"testing"
+
+	"github.com/gophercloud/gophercloud"
+	"github.com/gophercloud/gophercloud/openstack"
+	"github.com/gophercloud/gophercloud/openstack/blockstorage/v2/extensions/volumeactions"
+	"github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
+	th "github.com/gophercloud/gophercloud/testhelper"
+)
+
+func newClient(t *testing.T) (*gophercloud.ServiceClient, error) {
+	ao, err := openstack.AuthOptionsFromEnv()
+	th.AssertNoErr(t, err)
+
+	client, err := openstack.AuthenticatedClient(ao)
+	th.AssertNoErr(t, err)
+
+	return openstack.NewBlockStorageV2(client, gophercloud.EndpointOpts{
+		Region: os.Getenv("OS_REGION_NAME"),
+	})
+}
+
+func TestVolumeAttach(t *testing.T) {
+	client, err := newClient(t)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Creating volume")
+	cv, err := volumes.Create(client, &volumes.CreateOpts{
+		Size: 1,
+		Name: "blockv2-volume",
+	}).Extract()
+	th.AssertNoErr(t, err)
+
+	defer func() {
+		err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+		th.AssertNoErr(t, err)
+
+		t.Logf("Deleting volume")
+		err = volumes.Delete(client, cv.ID).ExtractErr()
+		th.AssertNoErr(t, err)
+	}()
+
+	err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+	th.AssertNoErr(t, err)
+
+	instanceID := os.Getenv("OS_INSTANCE_ID")
+	if instanceID == "" {
+		t.Fatal("Environment variable OS_INSTANCE_ID is required")
+	}
+
+	t.Logf("Attaching volume")
+	err = volumeactions.Attach(client, cv.ID, &volumeactions.AttachOpts{
+		MountPoint:   "/mnt",
+		Mode:         "rw",
+		InstanceUUID: instanceID,
+	}).ExtractErr()
+	th.AssertNoErr(t, err)
+
+	err = volumes.WaitForStatus(client, cv.ID, "in-use", 60)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Detaching volume")
+	err = volumeactions.Detach(client, cv.ID).ExtractErr()
+	th.AssertNoErr(t, err)
+}
+
+func TestVolumeReserve(t *testing.T) {
+	client, err := newClient(t)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Creating volume")
+	cv, err := volumes.Create(client, &volumes.CreateOpts{
+		Size: 1,
+		Name: "blockv2-volume",
+	}).Extract()
+	th.AssertNoErr(t, err)
+
+	defer func() {
+		err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+		th.AssertNoErr(t, err)
+
+		t.Logf("Deleting volume")
+		err = volumes.Delete(client, cv.ID).ExtractErr()
+		th.AssertNoErr(t, err)
+	}()
+
+	err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Reserving volume")
+	err = volumeactions.Reserve(client, cv.ID).ExtractErr()
+	th.AssertNoErr(t, err)
+
+	err = volumes.WaitForStatus(client, cv.ID, "attaching", 60)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Unreserving volume")
+	err = volumeactions.Unreserve(client, cv.ID).ExtractErr()
+	th.AssertNoErr(t, err)
+
+	err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+	th.AssertNoErr(t, err)
+}
+
+func TestVolumeConns(t *testing.T) {
+	client, err := newClient(t)
+	th.AssertNoErr(t, err)
+
+	t.Logf("Creating volume")
+	cv, err := volumes.Create(client, &volumes.CreateOpts{
+		Size: 1,
+		Name: "blockv2-volume",
+	}).Extract()
+	th.AssertNoErr(t, err)
+
+	defer func() {
+		err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+		th.AssertNoErr(t, err)
+
+		t.Logf("Deleting volume")
+		err = volumes.Delete(client, cv.ID).ExtractErr()
+		th.AssertNoErr(t, err)
+	}()
+
+	err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+	th.AssertNoErr(t, err)
+
+	connOpts := &volumeactions.ConnectorOpts{
+		IP:        "127.0.0.1",
+		Host:      "stack",
+		Initiator: "iqn.1994-05.com.redhat:17cf566367d2",
+		Multipath: false,
+		Platform:  "x86_64",
+		OSType:    "linux2",
+	}
+
+	t.Logf("Initializing connection")
+	_, err = volumeactions.InitializeConnection(client, cv.ID, connOpts).Extract()
+	th.AssertNoErr(t, err)
+
+	t.Logf("Terminating connection")
+	err = volumeactions.TerminateConnection(client, cv.ID, connOpts).ExtractErr()
+	th.AssertNoErr(t, err)
+}
diff --git a/acceptance/openstack/blockstorage/v2/pkg.go b/acceptance/openstack/blockstorage/v2/pkg.go
new file mode 100644
index 0000000..31dd0ff
--- /dev/null
+++ b/acceptance/openstack/blockstorage/v2/pkg.go
@@ -0,0 +1,3 @@
+// The v2 package contains acceptance tests for the Openstack Cinder V2 service.
+
+package v2
diff --git a/acceptance/openstack/blockstorage/v2/volumes_test.go b/acceptance/openstack/blockstorage/v2/volumes_test.go
new file mode 100644
index 0000000..9edf31a
--- /dev/null
+++ b/acceptance/openstack/blockstorage/v2/volumes_test.go
@@ -0,0 +1,63 @@
+// +build acceptance blockstorage
+
+package v2
+
+import (
+	"os"
+	"testing"
+
+	"github.com/gophercloud/gophercloud"
+	"github.com/gophercloud/gophercloud/openstack"
+	"github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
+	"github.com/gophercloud/gophercloud/pagination"
+	th "github.com/gophercloud/gophercloud/testhelper"
+)
+
+func newClient(t *testing.T) (*gophercloud.ServiceClient, error) {
+	ao, err := openstack.AuthOptionsFromEnv()
+	th.AssertNoErr(t, err)
+
+	client, err := openstack.AuthenticatedClient(ao)
+	th.AssertNoErr(t, err)
+
+	return openstack.NewBlockStorageV2(client, gophercloud.EndpointOpts{
+		Region: os.Getenv("OS_REGION_NAME"),
+	})
+}
+
+func TestVolumes(t *testing.T) {
+	client, err := newClient(t)
+	th.AssertNoErr(t, err)
+
+	cv, err := volumes.Create(client, &volumes.CreateOpts{
+		Size: 1,
+		Name: "blockv2-volume",
+	}).Extract()
+	th.AssertNoErr(t, err)
+	defer func() {
+		err = volumes.WaitForStatus(client, cv.ID, "available", 60)
+		th.AssertNoErr(t, err)
+		err = volumes.Delete(client, cv.ID).ExtractErr()
+		th.AssertNoErr(t, err)
+	}()
+
+	_, err = volumes.Update(client, cv.ID, &volumes.UpdateOpts{
+		Name: "blockv2-updated-volume",
+	}).Extract()
+	th.AssertNoErr(t, err)
+
+	v, err := volumes.Get(client, cv.ID).Extract()
+	th.AssertNoErr(t, err)
+	t.Logf("Got volume: %+v\n", v)
+
+	if v.Name != "blockv2-updated-volume" {
+		t.Errorf("Unable to update volume: Expected name: blockv2-updated-volume\nActual name: %s", v.Name)
+	}
+
+	err = volumes.List(client, &volumes.ListOpts{Name: "blockv2-updated-volume"}).EachPage(func(page pagination.Page) (bool, error) {
+		vols, err := volumes.ExtractVolumes(page)
+		th.CheckEquals(t, 1, len(vols))
+		return true, err
+	})
+	th.AssertNoErr(t, err)
+}
diff --git a/acceptance/openstack/networking/v2/extensions/lbaas/common.go b/acceptance/openstack/networking/v2/extensions/lbaas/common.go
index ed948bd..760ee5b 100644
--- a/acceptance/openstack/networking/v2/extensions/lbaas/common.go
+++ b/acceptance/openstack/networking/v2/extensions/lbaas/common.go
@@ -45,6 +45,7 @@
 		Protocol: "HTTP",
 		Name:     "tmp_pool",
 		SubnetID: subnetID,
+		Provider: "haproxy",
 	}).Extract()
 
 	th.AssertNoErr(t, err)
diff --git a/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go b/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
index 70ee844..6151217 100644
--- a/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
+++ b/acceptance/openstack/networking/v2/extensions/lbaas/pool_test.go
@@ -55,7 +55,7 @@
 		}
 
 		for _, p := range poolList {
-			t.Logf("Listing pool: ID [%s] Name [%s] Status [%s] LB algorithm [%s]", p.ID, p.Name, p.Status, p.LBMethod)
+			t.Logf("Listing pool: ID [%s] Name [%s] Status [%s] LB algorithm [%s] Provider [%s]", p.ID, p.Name, p.Status, p.LBMethod, p.Provider)
 		}
 
 		return true, nil
diff --git a/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancer_test.go b/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancer_test.go
new file mode 100644
index 0000000..051b7eb
--- /dev/null
+++ b/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancer_test.go
@@ -0,0 +1,493 @@
+// +build acceptance networking lbaas_v2 lbaasloadbalancer
+
+package lbaas_v2
+
+import (
+	"testing"
+	"time"
+
+	"github.com/gophercloud/gophercloud"
+	base "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
+	"github.com/gophercloud/gophercloud/pagination"
+	th "github.com/gophercloud/gophercloud/testhelper"
+)
+
+// Note: when creating a new Loadbalancer (VM), it can take some time before it is ready for use,
+// this timeout is used for waiting until the Loadbalancer provisioning status goes to ACTIVE state.
+const loadbalancerActiveTimeoutSeconds = 120
+const loadbalancerDeleteTimeoutSeconds = 10
+
+func setupTopology(t *testing.T) (string, string) {
+	// create network
+	n, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network"}).Extract()
+	th.AssertNoErr(t, err)
+
+	t.Logf("Created network, ID %s", n.ID)
+
+	// create subnet
+	s, err := subnets.Create(base.Client, subnets.CreateOpts{
+		NetworkID: n.ID,
+		CIDR:      "192.168.199.0/24",
+		IPVersion: subnets.IPv4,
+		Name:      "tmp_subnet",
+	}).Extract()
+	th.AssertNoErr(t, err)
+
+	t.Logf("Created subnet, ID %s", s.ID)
+
+	return n.ID, s.ID
+}
+
+func deleteTopology(t *testing.T, networkID string) {
+	res := networks.Delete(base.Client, networkID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("deleted network, ID %s", networkID)
+}
+
+func TestLoadbalancers(t *testing.T) {
+	base.Setup(t)
+	defer base.Teardown()
+
+	// setup network topology
+	networkID, subnetID := setupTopology(t)
+
+	// create Loadbalancer
+	loadbalancerID := createLoadbalancer(t, subnetID)
+
+	// list Loadbalancers
+	listLoadbalancers(t)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// update Loadbalancer
+	updateLoadbalancer(t, loadbalancerID)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// create listener
+	listenerID := createListener(t, listeners.ProtocolHTTP, 80, loadbalancerID)
+
+	// list listeners
+	listListeners(t)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// update listener
+	updateListener(t, listenerID)
+
+	// get listener
+	getListener(t, listenerID)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// create pool
+	poolID := createPool(t, pools.ProtocolHTTP, listenerID, pools.LBMethodRoundRobin)
+
+	// list pools
+	listPools(t)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// update pool
+	updatePool(t, poolID)
+
+	// get pool
+	getPool(t, poolID)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// create member
+	memberID := createMember(t, subnetID, poolID, "1.2.3.4", 80, 5)
+
+	// list members
+	listMembers(t, poolID)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// update member
+	updateMember(t, poolID, memberID)
+
+	// get member
+	getMember(t, poolID, memberID)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// create monitor
+	monitorID := createMonitor(t, poolID, monitors.TypePING, 10, 10, 3)
+
+	// list monitors
+	listMonitors(t)
+
+	// get Loadbalancer and wait until ACTIVE
+	getLoadbalancerWaitActive(t, loadbalancerID)
+
+	// update monitor
+	updateMonitor(t, monitorID)
+
+	// get monitor
+	getMonitor(t, monitorID)
+
+	// get loadbalancer statuses tree
+	rawStatusTree, err := loadbalancers.GetStatuses(base.Client, loadbalancerID).ExtractStatuses()
+	if err == nil {
+		// verify statuses tree ID's of relevant objects
+		if rawStatusTree.Loadbalancer.ID != loadbalancerID {
+			t.Errorf("Loadbalancer ID did not match")
+		}
+		if rawStatusTree.Loadbalancer.Listeners[0].ID != listenerID {
+			t.Errorf("Listner ID did not match")
+		}
+		if rawStatusTree.Loadbalancer.Listeners[0].Pools[0].ID != poolID {
+			t.Errorf("Pool ID did not match")
+		}
+		if rawStatusTree.Loadbalancer.Listeners[0].Pools[0].Members[0].ID != memberID {
+			t.Errorf("Member ID did not match")
+		}
+		if rawStatusTree.Loadbalancer.Listeners[0].Pools[0].Monitor.ID != monitorID {
+			t.Errorf("Monitor ID did not match")
+		}
+	} else {
+		t.Errorf("Failed to extract Loadbalancer statuses tree: %v", err)
+	}
+
+	getLoadbalancerWaitActive(t, loadbalancerID)
+	deleteMonitor(t, monitorID)
+	getLoadbalancerWaitActive(t, loadbalancerID)
+	deleteMember(t, poolID, memberID)
+	getLoadbalancerWaitActive(t, loadbalancerID)
+	deletePool(t, poolID)
+	getLoadbalancerWaitActive(t, loadbalancerID)
+	deleteListener(t, listenerID)
+	getLoadbalancerWaitActive(t, loadbalancerID)
+	deleteLoadbalancer(t, loadbalancerID)
+	getLoadbalancerWaitDeleted(t, loadbalancerID)
+	deleteTopology(t, networkID)
+}
+
+func createLoadbalancer(t *testing.T, subnetID string) string {
+	lb, err := loadbalancers.Create(base.Client, loadbalancers.CreateOpts{
+		VipSubnetID:  subnetID,
+		Name:         "tmp_loadbalancer",
+		AdminStateUp: loadbalancers.Up,
+	}).Extract()
+
+	th.AssertNoErr(t, err)
+	t.Logf("Created Loadbalancer, ID %s", lb.ID)
+
+	return lb.ID
+}
+
+func deleteLoadbalancer(t *testing.T, loadbalancerID string) {
+	res := loadbalancers.Delete(base.Client, loadbalancerID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("deleted Loadbalancer, ID %s", loadbalancerID)
+}
+
+func listLoadbalancers(t *testing.T) {
+	err := loadbalancers.List(base.Client, loadbalancers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
+		loadbalancerList, err := loadbalancers.ExtractLoadbalancers(page)
+		if err != nil {
+			t.Errorf("Failed to extract Loadbalancers: %v", err)
+			return false, err
+		}
+
+		for _, loadbalancer := range loadbalancerList {
+			t.Logf("Listing Loadbalancer: ID [%s] Name [%s] Address [%s]",
+				loadbalancer.ID, loadbalancer.Name, loadbalancer.VipAddress)
+		}
+
+		return true, nil
+	})
+
+	th.AssertNoErr(t, err)
+}
+
+func getLoadbalancerWaitDeleted(t *testing.T, loadbalancerID string) {
+	start := time.Now().Second()
+	for {
+		time.Sleep(1 * time.Second)
+
+		if time.Now().Second()-start >= loadbalancerDeleteTimeoutSeconds {
+			t.Errorf("Loadbalancer failed to delete")
+			return
+		}
+
+		_, err := loadbalancers.Get(base.Client, loadbalancerID).Extract()
+		if err != nil {
+			if errData, ok := err.(*(gophercloud.UnexpectedResponseCodeError)); ok {
+				if errData.Actual == 404 {
+					return
+				}
+			} else {
+				th.AssertNoErr(t, err)
+			}
+		}
+	}
+}
+
+func getLoadbalancerWaitActive(t *testing.T, loadbalancerID string) {
+	start := time.Now().Second()
+	for {
+		time.Sleep(1 * time.Second)
+
+		if time.Now().Second()-start >= loadbalancerActiveTimeoutSeconds {
+			t.Errorf("Loadbalancer failed to go into ACTIVE provisioning status")
+			return
+		}
+
+		loadbalancer, err := loadbalancers.Get(base.Client, loadbalancerID).Extract()
+		th.AssertNoErr(t, err)
+		if loadbalancer.ProvisioningStatus == "ACTIVE" {
+			t.Logf("Retrieved Loadbalancer, ID [%s]: OperatingStatus [%s]", loadbalancer.ID, loadbalancer.ProvisioningStatus)
+			return
+		}
+	}
+}
+
+func updateLoadbalancer(t *testing.T, loadbalancerID string) {
+	_, err := loadbalancers.Update(base.Client, loadbalancerID, loadbalancers.UpdateOpts{Name: "tmp_newName"}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Updated Loadbalancer ID [%s]", loadbalancerID)
+}
+
+func listListeners(t *testing.T) {
+	err := listeners.List(base.Client, listeners.ListOpts{Name: "tmp_listener"}).EachPage(func(page pagination.Page) (bool, error) {
+		listenerList, err := listeners.ExtractListeners(page)
+		if err != nil {
+			t.Errorf("Failed to extract Listeners: %v", err)
+			return false, err
+		}
+
+		for _, listener := range listenerList {
+			t.Logf("Listing Listener: ID [%s] Name [%s] Loadbalancers [%s]",
+				listener.ID, listener.Name, listener.Loadbalancers)
+		}
+
+		return true, nil
+	})
+
+	th.AssertNoErr(t, err)
+}
+
+func createListener(t *testing.T, protocol listeners.Protocol, protocolPort int, loadbalancerID string) string {
+	l, err := listeners.Create(base.Client, listeners.CreateOpts{
+		Protocol:       protocol,
+		ProtocolPort:   protocolPort,
+		LoadbalancerID: loadbalancerID,
+		Name:           "tmp_listener",
+	}).Extract()
+
+	th.AssertNoErr(t, err)
+	t.Logf("Created Listener, ID %s", l.ID)
+
+	return l.ID
+}
+
+func deleteListener(t *testing.T, listenerID string) {
+	res := listeners.Delete(base.Client, listenerID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("Deleted Loadbalancer, ID %s", listenerID)
+}
+
+func getListener(t *testing.T, listenerID string) {
+	listener, err := listeners.Get(base.Client, listenerID).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Getting Listener, ID [%s]: ", listener.ID)
+}
+
+func updateListener(t *testing.T, listenerID string) {
+	_, err := listeners.Update(base.Client, listenerID, listeners.UpdateOpts{Name: "tmp_newName"}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Updated Listener, ID [%s]", listenerID)
+}
+
+func listPools(t *testing.T) {
+	err := pools.List(base.Client, pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
+		poolsList, err := pools.ExtractPools(page)
+		if err != nil {
+			t.Errorf("Failed to extract Pools: %v", err)
+			return false, err
+		}
+
+		for _, pool := range poolsList {
+			t.Logf("Listing Pool: ID [%s] Name [%s] Listeners [%s] LBMethod [%s]",
+				pool.ID, pool.Name, pool.Listeners, pool.LBMethod)
+		}
+
+		return true, nil
+	})
+
+	th.AssertNoErr(t, err)
+}
+
+func createPool(t *testing.T, protocol pools.Protocol, listenerID string, lbMethod pools.LBMethod) string {
+	p, err := pools.Create(base.Client, pools.CreateOpts{
+		LBMethod:   lbMethod,
+		Protocol:   protocol,
+		Name:       "tmp_pool",
+		ListenerID: listenerID,
+	}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Created Pool, ID %s", p.ID)
+
+	return p.ID
+}
+
+func deletePool(t *testing.T, poolID string) {
+	res := pools.Delete(base.Client, poolID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("Deleted Pool, ID %s", poolID)
+}
+
+func getPool(t *testing.T, poolID string) {
+	pool, err := pools.Get(base.Client, poolID).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Getting Pool, ID [%s]: ", pool.ID)
+}
+
+func updatePool(t *testing.T, poolID string) {
+	_, err := pools.Update(base.Client, poolID, pools.UpdateOpts{Name: "tmp_newName"}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Updated Pool, ID [%s]", poolID)
+}
+
+func createMember(t *testing.T, subnetID string, poolID string, address string, protocolPort int, weight int) string {
+	m, err := pools.CreateAssociateMember(base.Client, poolID, pools.MemberCreateOpts{
+		SubnetID:     subnetID,
+		Address:      address,
+		ProtocolPort: protocolPort,
+		Weight:       weight,
+		Name:         "tmp_member",
+	}).ExtractMember()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Created Member, ID %s", m.ID)
+
+	return m.ID
+}
+
+func deleteMember(t *testing.T, poolID string, memberID string) {
+	res := pools.DeleteMember(base.Client, poolID, memberID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("Deleted Member, ID %s", memberID)
+}
+
+func listMembers(t *testing.T, poolID string) {
+	err := pools.ListAssociateMembers(base.Client, poolID, pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
+		membersList, err := pools.ExtractMembers(page)
+		if err != nil {
+			t.Errorf("Failed to extract Members: %v", err)
+			return false, err
+		}
+
+		for _, member := range membersList {
+			t.Logf("Listing Member: ID [%s] Name [%s] Pool ID [%s] Weight [%s]",
+				member.ID, member.Name, member.PoolID, member.Weight)
+		}
+
+		return true, nil
+	})
+
+	th.AssertNoErr(t, err)
+}
+
+func getMember(t *testing.T, poolID string, memberID string) {
+	member, err := pools.GetAssociateMember(base.Client, poolID, memberID).ExtractMember()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Getting Member, ID [%s]: ", member.ID)
+}
+
+func updateMember(t *testing.T, poolID string, memberID string) {
+	_, err := pools.UpdateAssociateMember(base.Client, poolID, memberID, pools.MemberUpdateOpts{Name: "tmp_newName"}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Updated Member, ID [%s], in Pool, ID [%s]", memberID, poolID)
+}
+
+func createMonitor(t *testing.T, poolID string, checkType string, delay int, timeout int, maxRetries int) string {
+	m, err := monitors.Create(base.Client, monitors.CreateOpts{
+		PoolID:     poolID,
+		Name:       "tmp_monitor",
+		Delay:      delay,
+		Timeout:    timeout,
+		MaxRetries: maxRetries,
+		Type:       checkType,
+	}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Created Monitor, ID [%s]", m.ID)
+
+	return m.ID
+}
+
+func deleteMonitor(t *testing.T, monitorID string) {
+	res := monitors.Delete(base.Client, monitorID)
+	th.AssertNoErr(t, res.Err)
+	t.Logf("Deleted Monitor, ID %s", monitorID)
+}
+
+func listMonitors(t *testing.T) {
+	err := monitors.List(base.Client, monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
+		monitorsList, err := monitors.ExtractMonitors(page)
+		if err != nil {
+			t.Errorf("Failed to extract Monitors: %v", err)
+			return false, err
+		}
+
+		for _, monitor := range monitorsList {
+			t.Logf("Listing Monitors: ID [%s] Type [%s] HTTPMethod [%s] URLPath [%s]",
+				monitor.ID, monitor.Type, monitor.HTTPMethod, monitor.URLPath)
+		}
+
+		return true, nil
+	})
+
+	th.AssertNoErr(t, err)
+}
+
+func getMonitor(t *testing.T, monitorID string) {
+	monitor, err := monitors.Get(base.Client, monitorID).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Getting Monitor, ID [%s]: ", monitor.ID)
+}
+
+func updateMonitor(t *testing.T, monitorID string) {
+	_, err := monitors.Update(base.Client, monitorID, monitors.UpdateOpts{MaxRetries: 10}).Extract()
+
+	th.AssertNoErr(t, err)
+
+	t.Logf("Updated Monitor, ID [%s]", monitorID)
+}
diff --git a/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go b/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go
new file mode 100644
index 0000000..24b7482
--- /dev/null
+++ b/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go
@@ -0,0 +1 @@
+package lbaas_v2
diff --git a/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go b/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go
new file mode 100644
index 0000000..5dae1b1
--- /dev/null
+++ b/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go
@@ -0,0 +1 @@
+package portsbinding
diff --git a/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go b/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go
new file mode 100644
index 0000000..b703e3b
--- /dev/null
+++ b/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go
@@ -0,0 +1,129 @@
+// +build acceptance networking portsbinding
+
+package portsbinding
+
+import (
+	"testing"
+
+	base "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
+	"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
+	"github.com/gophercloud/gophercloud/pagination"
+	th "github.com/gophercloud/gophercloud/testhelper"
+)
+
+func TestPortBinding(t *testing.T) {
+	base.Setup(t)
+	defer base.Teardown()
+
+	// Setup network
+	t.Log("Setting up network")
+	networkID, err := createNetwork()
+	th.AssertNoErr(t, err)
+	defer networks.Delete(base.Client, networkID)
+
+	// Setup subnet
+	t.Logf("Setting up subnet on network %s", networkID)
+	subnetID, err := createSubnet(networkID)
+	th.AssertNoErr(t, err)
+	defer subnets.Delete(base.Client, subnetID)
+
+	// Create port
+	t.Logf("Create port based on subnet %s", subnetID)
+	hostID := "localhost"
+	portID := createPort(t, networkID, subnetID, hostID)
+
+	// Get port
+	if portID == "" {
+		t.Fatalf("In order to retrieve a port, the portID must be set")
+	}
+	p, err := portsbinding.Get(base.Client, portID).Extract()
+	th.AssertNoErr(t, err)
+	th.AssertEquals(t, p.ID, portID)
+	th.AssertEquals(t, p.HostID, hostID)
+
+	// Update port
+	newHostID := "openstack"
+	updateOpts := portsbinding.UpdateOpts{
+		HostID: newHostID,
+	}
+	p, err = portsbinding.Update(base.Client, portID, updateOpts).Extract()
+
+	th.AssertNoErr(t, err)
+	th.AssertEquals(t, p.HostID, newHostID)
+
+	// List ports
+	t.Logf("Listing all ports")
+	listPorts(t)
+
+	// Delete port
+	res := ports.Delete(base.Client, portID)
+	th.AssertNoErr(t, res.Err)
+}
+
+func listPorts(t *testing.T) {
+	count := 0
+	pager := ports.List(base.Client, ports.ListOpts{})
+	err := pager.EachPage(func(page pagination.Page) (bool, error) {
+		count++
+		t.Logf("--- Page ---")
+
+		portList, err := portsbinding.ExtractPorts(page)
+		th.AssertNoErr(t, err)
+
+		for _, p := range portList {
+			t.Logf("Port: ID [%s] Name [%s] HostID [%s] VNICType [%s] VIFType [%s]",
+				p.ID, p.Name, p.HostID, p.VNICType, p.VIFType)
+		}
+
+		return true, nil
+	})
+
+	th.CheckNoErr(t, err)
+
+	if count == 0 {
+		t.Logf("No pages were iterated over when listing ports")
+	}
+}
+
+func createPort(t *testing.T, networkID, subnetID, hostID string) string {
+	enable := false
+	opts := portsbinding.CreateOpts{
+		CreateOptsBuilder: ports.CreateOpts{
+			NetworkID:    networkID,
+			Name:         "my_port",
+			AdminStateUp: &enable,
+			FixedIPs:     []ports.IP{{SubnetID: subnetID}},
+		},
+		HostID: hostID,
+	}
+
+	p, err := portsbinding.Create(base.Client, opts).Extract()
+	th.AssertNoErr(t, err)
+	th.AssertEquals(t, p.NetworkID, networkID)
+	th.AssertEquals(t, p.Name, "my_port")
+	th.AssertEquals(t, p.AdminStateUp, false)
+
+	return p.ID
+}
+
+func createNetwork() (string, error) {
+	res, err := networks.Create(base.Client, networks.CreateOpts{Name: "tmp_network", AdminStateUp: networks.Up}).Extract()
+	return res.ID, err
+}
+
+func createSubnet(networkID string) (string, error) {
+	s, err := subnets.Create(base.Client, subnets.CreateOpts{
+		NetworkID:  networkID,
+		CIDR:       "192.168.199.0/24",
+		IPVersion:  subnets.IPv4,
+		Name:       "my_subnet",
+		EnableDHCP: subnets.Down,
+		AllocationPools: []subnets.AllocationPool{
+			{Start: "192.168.199.2", End: "192.168.199.200"},
+		},
+	}).Extract()
+	return s.ID, err
+}