Merge "Few updates for baremtal dependent tests"
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 40cb523..1c37cad 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -12,14 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log
-
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest import test
 
-LOG = log.getLogger(__name__)
-
 
 class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Agents API"""
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index c9ba730..2dd2e89 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -31,7 +31,6 @@
         super(MigrationsAdminTest, cls).setup_clients()
         cls.client = cls.os_adm.migrations_client
         cls.flavors_admin_client = cls.os_adm.flavors_client
-        cls.admin_hosts_client = cls.os_adm.hosts_client
         cls.admin_servers_client = cls.os_adm.servers_client
 
     @test.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index efa55d5..a8a8b83 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -29,7 +29,6 @@
         super(ServersAdminTestJSON, cls).setup_clients()
         cls.client = cls.os_adm.servers_client
         cls.non_admin_client = cls.servers_client
-        cls.flavors_client = cls.os_adm.flavors_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index f6a6040..d7e01f0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -318,12 +318,7 @@
     def rebuild_server(cls, server_id, validatable=False, **kwargs):
         # Destroy an existing server and creates a new one
         if server_id:
-            try:
-                cls.servers_client.delete_server(server_id)
-                waiters.wait_for_server_termination(cls.servers_client,
-                                                    server_id)
-            except Exception:
-                LOG.exception('Failed to delete server %s' % server_id)
+            cls.delete_server(server_id)
 
         cls.password = data_utils.rand_password()
         server = cls.create_test_server(
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 19e2880..0b4a2a8 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest.common import waiters
@@ -23,7 +21,6 @@
 from tempest import test
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
diff --git a/tempest/api/compute/security_groups/test_security_group_rules_negative.py b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
index 4f53663..32b3ea3 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules_negative.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
@@ -35,7 +35,6 @@
     @classmethod
     def setup_clients(cls):
         super(SecurityGroupRulesNegativeTestJSON, cls).setup_clients()
-        cls.client = cls.security_groups_client
         cls.rules_client = cls.security_group_rules_client
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 2f43157..d2e31ad 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -19,7 +19,6 @@
 from tempest.api.compute import base
 from tempest.common.utils import data_utils
 from tempest.common.utils.linux import remote_client
-from tempest.common import waiters
 from tempest import config
 from tempest import test
 
@@ -178,12 +177,7 @@
         # when trying to delete the subnet. The tear down in the base class
         # will try to delete the server and get a 404 but it's ignored so
         # we're OK.
-        def cleanup_server():
-            self.client.delete_server(server_multi_nics['id'])
-            waiters.wait_for_server_termination(self.client,
-                                                server_multi_nics['id'])
-
-        self.addCleanup(cleanup_server)
+        self.addCleanup(self.delete_server, server_multi_nics['id'])
 
         addresses = (self.client.list_addresses(server_multi_nics['id'])
                      ['addresses'])
@@ -215,13 +209,7 @@
 
         server_multi_nics = self.create_test_server(
             networks=networks, wait_until='ACTIVE')
-
-        def cleanup_server():
-            self.client.delete_server(server_multi_nics['id'])
-            waiters.wait_for_server_termination(self.client,
-                                                server_multi_nics['id'])
-
-        self.addCleanup(cleanup_server)
+        self.addCleanup(self.delete_server, server_multi_nics['id'])
 
         addresses = (self.client.list_addresses(server_multi_nics['id'])
                      ['addresses'])
@@ -312,7 +300,7 @@
             self.validation_resources['keypair']['private_key'],
             server=server_no_eph_disk,
             servers_client=self.client)
-        partition_num = len(linux_client.get_partitions().split('\n'))
+        disks_num = len(linux_client.get_disks().split('\n'))
 
         # Explicit server deletion necessary for Juno compatibility
         self.client.delete_server(server_no_eph_disk['id'])
@@ -332,8 +320,8 @@
             self.validation_resources['keypair']['private_key'],
             server=server_with_eph_disk,
             servers_client=self.client)
-        partition_num_emph = len(linux_client.get_partitions().split('\n'))
-        self.assertEqual(partition_num + 1, partition_num_emph)
+        disks_num_eph = len(linux_client.get_disks().split('\n'))
+        self.assertEqual(disks_num + 1, disks_num_eph)
 
 
 class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 7252e1b..b2d5ae7 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -52,9 +52,7 @@
         super(DeviceTaggingTest, cls).setup_clients()
         cls.networks_client = cls.os.networks_client
         cls.ports_client = cls.os.ports_client
-        cls.volumes_client = cls.os.volumes_client
         cls.subnets_client = cls.os.subnets_client
-        cls.routers_client = cls.os.routers_client
         cls.interfaces_client = cls.os.interfaces_client
 
     @classmethod
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 5a35b4e..3e408d2 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -40,14 +40,9 @@
             srv = cls.create_test_server(wait_until='ACTIVE')
             cls.existing_fixtures.append(srv)
 
-        srv = cls.create_test_server()
+        srv = cls.create_test_server(wait_until='ACTIVE')
         cls.client.delete_server(srv['id'])
-        # We ignore errors on termination because the server may
-        # be put into ERROR status on a quick spawn, then delete,
-        # as the compute node expects the instance local status
-        # to be spawning, not deleted. See LP Bug#1061167
-        waiters.wait_for_server_termination(cls.client, srv['id'],
-                                            ignore_error=True)
+        waiters.wait_for_server_termination(cls.client, srv['id'])
         cls.deleted_fixtures.append(srv)
 
     @test.attr(type=['negative'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 56ec8c6..1edadef 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -111,9 +111,9 @@
                 server=server,
                 servers_client=self.servers_client)
 
-            partitions = linux_client.get_partitions()
-            device_name_to_match = ' ' + self.device + '\n'
-            self.assertIn(device_name_to_match, partitions)
+            disks = linux_client.get_disks()
+            device_name_to_match = '\n' + self.device + ' '
+            self.assertIn(device_name_to_match, disks)
 
         self._detach_volume(server['id'], attachment['volumeId'])
         self.servers_client.stop_server(server['id'])
@@ -133,8 +133,8 @@
                 server=server,
                 servers_client=self.servers_client)
 
-            partitions = linux_client.get_partitions()
-            self.assertNotIn(device_name_to_match, partitions)
+            disks = linux_client.get_disks()
+            self.assertNotIn(device_name_to_match, disks)
 
     @test.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
     def test_list_get_volume_attachments(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index b7fa0fe..1f18bfe 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -29,6 +29,7 @@
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
+    @test.related_bug('1630783', status_code=500)
     @test.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
     def test_delete_attached_volume(self):
         server = self.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index e4e625b..82cc653 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -13,14 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
-
 from tempest.api.compute import base
 from tempest import config
 from tempest import test
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class VolumesTestJSON(base.BaseV2ComputeTest):
diff --git a/tempest/api/network/test_service_providers.py b/tempest/api/network/test_service_providers.py
index 09fb5fe..be17b3e 100644
--- a/tempest/api/network/test_service_providers.py
+++ b/tempest/api/network/test_service_providers.py
@@ -10,6 +10,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.api.network import base
 from tempest import test
 
@@ -17,6 +19,9 @@
 class ServiceProvidersTest(base.BaseNetworkTest):
 
     @test.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
+    @testtools.skipUnless(
+        test.is_extension_enabled('service-type', 'network'),
+        'service-type extension not enabled.')
     def test_service_providers_list(self):
         body = self.service_providers_client.list_service_providers()
         self.assertIn('service_providers', body)
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 261e652..a63cbf0 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -14,27 +14,11 @@
 #    under the License.
 
 from tempest.api.volume import base
-from tempest import config
 from tempest import test
 
-CONF = config.CONF
-
 
 class VolumesActionsV2Test(base.BaseVolumeAdminTest):
 
-    @classmethod
-    def resource_setup(cls):
-        super(VolumesActionsV2Test, cls).resource_setup()
-
-        # Create a test shared volume for tests
-        cls.volume = cls.create_volume()
-
-    def tearDown(self):
-        # Set volume's status to available after test
-        self.admin_volume_client.reset_volume_status(
-            self.volume['id'], status='available')
-        super(VolumesActionsV2Test, self).tearDown()
-
     def _create_reset_and_force_delete_temp_volume(self, status=None):
         # Create volume, reset volume status, and force delete temp volume
         temp_volume = self.create_volume()
@@ -47,11 +31,13 @@
     @test.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
     def test_volume_reset_status(self):
         # test volume reset status : available->error->available
-        self.admin_volume_client.reset_volume_status(
-            self.volume['id'], status='error')
-        volume_get = self.admin_volume_client.show_volume(
-            self.volume['id'])['volume']
-        self.assertEqual('error', volume_get['status'])
+        volume = self.create_volume()
+        for status in ['error', 'available']:
+            self.admin_volume_client.reset_volume_status(
+                volume['id'], status=status)
+            volume_get = self.admin_volume_client.show_volume(
+                volume['id'])['volume']
+            self.assertEqual(status, volume_get['status'])
 
     @test.idempotent_id('21737d5a-92f2-46d7-b009-a0cc0ee7a570')
     def test_volume_force_delete_when_volume_is_creating(self):
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7cd72e1..9f522bd 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -181,7 +181,7 @@
         self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
                         volume_id, 'available')
         self.addCleanup(self.servers_client.detach_volume, server_id,
-                        self.volume_origin['id'])
+                        volume_id)
 
     @classmethod
     def clear_volumes(cls):
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 972dd58..0091027 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -38,8 +38,10 @@
                         volume['id'])
         backup_name = data_utils.rand_name(
             self.__class__.__name__ + '-Backup')
+        description = data_utils.rand_name("volume-backup-description")
         backup = self.create_backup(volume_id=volume['id'],
-                                    name=backup_name)
+                                    name=backup_name,
+                                    description=description)
         self.assertEqual(backup_name, backup['name'])
         waiters.wait_for_volume_status(self.volumes_client,
                                        volume['id'], 'available')
@@ -47,6 +49,7 @@
         # Get a given backup
         backup = self.backups_client.show_backup(backup['id'])['backup']
         self.assertEqual(backup_name, backup['name'])
+        self.assertEqual(description, backup['description'])
 
         # Get all backups with detail
         backups = self.backups_client.list_backups(
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 9ec217f..d8993bb 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -112,11 +112,22 @@
         output = self.exec_command('grep -c ^processor /proc/cpuinfo')
         return int(output)
 
-    def get_partitions(self):
-        # Return the contents of /proc/partitions
-        command = 'cat /proc/partitions'
+    def get_disks(self):
+        # Select root disk devices as shown by lsblk
+        command = 'lsblk -lb --nodeps'
         output = self.exec_command(command)
-        return output
+        selected = []
+        pos = None
+        for l in output.splitlines():
+            if pos is None and l.find("TYPE") > 0:
+                pos = l.find("TYPE")
+                # Show header line too
+                selected.append(l)
+            # lsblk lists disk type in a column right-aligned with TYPE
+            elif pos > 0 and l[pos:pos + 4] == "disk":
+                selected.append(l)
+
+        return "\n".join(selected)
 
     def get_boot_time(self):
         cmd = 'cut -f1 -d. /proc/uptime'
diff --git a/tempest/config.py b/tempest/config.py
index ee13abc..9e03b7f 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -422,10 +422,16 @@
                 default=['all'],
                 help="A list of enabled filters that nova will accept as hints"
                      " to the scheduler when creating a server. A special "
-                     "entry 'all' indicates all filters are enabled. Empty "
-                     "list indicates all filters are disabled. The full "
-                     "available list of filters is in nova.conf: "
-                     "DEFAULT.scheduler_available_filters"),
+                     "entry 'all' indicates all filters that are included "
+                     "with nova are enabled. Empty list indicates all filters "
+                     "are disabled. The full list of available filters is in "
+                     "nova.conf: DEFAULT.scheduler_available_filters. If the "
+                     "default value is overridden in nova.conf by the test "
+                     "environment (which means that a different set of "
+                     "filters is enabled than what is included in Nova by "
+                     "default) then, this option must be configured to "
+                     "contain the same filters that Nova uses in the test "
+                     "environment."),
     cfg.BoolOpt('swap_volume',
                 default=False,
                 help='Does the test environment support in-place swapping of '
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index 4d1044b..a83c68b 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -67,9 +67,9 @@
         API reference:
         http://developer.openstack.org/api-ref-compute-v2.1.html#createFlavor
         """
-        if kwargs.get('ephemeral'):
+        if 'ephemeral' in kwargs:
             kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
-        if kwargs.get('is_public'):
+        if 'is_public' in kwargs:
             kwargs['os-flavor-access:is_public'] = kwargs.pop('is_public')
 
         post_body = json.dumps({'flavor': kwargs})
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index f2f17d2..c454ae2 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -66,10 +66,10 @@
         waiters.wait_for_server_status(self.servers_client,
                                        server['id'], 'ACTIVE')
 
-    def check_partitions(self):
+    def check_disks(self):
         # NOTE(andreaf) The device name may be different on different guest OS
-        partitions = self.linux_client.get_partitions()
-        self.assertEqual(1, partitions.count(CONF.compute.volume_device_name))
+        disks = self.linux_client.get_disks()
+        self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
 
     def create_and_add_security_group_to_server(self, server):
         secgroup = self._create_security_group()
@@ -145,7 +145,7 @@
         self.linux_client = self.get_remote_client(
             floating_ip['ip'], private_key=keypair['private_key'])
 
-        self.check_partitions()
+        self.check_disks()
 
         # delete the floating IP, this should refresh the server addresses
         self.compute_floating_ips_client.delete_floating_ip(floating_ip['id'])
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 0f2c78c..dff00e7 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -85,9 +85,9 @@
         ssh = self.get_remote_client(ip_address, private_key=private_key)
 
         def _func():
-            part = ssh.get_partitions()
-            LOG.debug("Partitions:%s" % part)
-            return CONF.compute.volume_device_name in part
+            disks = ssh.get_disks()
+            LOG.debug("Disks: %s" % disks)
+            return CONF.compute.volume_device_name in disks
 
         if not test_utils.call_until_true(_func,
                                           CONF.compute.build_timeout,
diff --git a/tempest/test.py b/tempest/test.py
index 35709ae..4cc2a2b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -140,8 +140,38 @@
     return False
 
 
+def related_bug(bug, status_code=None):
+    """A decorator useful to know solutions from launchpad bug reports
+
+    @param bug: The launchpad bug number causing the test
+    @param status_code: The status code related to the bug report
+    """
+    def decorator(f):
+        @functools.wraps(f)
+        def wrapper(self, *func_args, **func_kwargs):
+            try:
+                return f(self, *func_args, **func_kwargs)
+            except Exception as exc:
+                exc_status_code = getattr(exc, 'status_code', None)
+                if status_code is None or status_code == exc_status_code:
+                    LOG.error('Hints: This test was made for the bug %s. '
+                              'The failure could be related to '
+                              'https://launchpad.net/bugs/%s' % (bug, bug))
+                raise exc
+        return wrapper
+    return decorator
+
+
 def is_scheduler_filter_enabled(filter_name):
-    """Check the list of enabled compute scheduler filters from config. """
+    """Check the list of enabled compute scheduler filters from config.
+
+    This function checks whether the given compute scheduler filter is
+    available and configured in the config file. If the
+    scheduler_available_filters option is set to 'all' (Default value. which
+    means default filters are configured in nova) in tempest.conf then, this
+    function returns True with assumption that requested filter 'filter_name'
+    is one of available filter in nova ("nova.scheduler.filters.all_filters").
+    """
 
     filters = CONF.compute_feature_enabled.scheduler_available_filters
     if len(filters) == 0:
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index e59e08f..e4f4c04 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -107,13 +107,20 @@
         self.assertEqual(self.conn.get_number_of_vcpus(), 16)
         self._assert_exec_called_with('grep -c ^processor /proc/cpuinfo')
 
-    def test_get_partitions(self):
-        proc_partitions = """major minor  #blocks  name
+    def test_get_disks(self):
+        output_lsblk = """\
+NAME       MAJ:MIN    RM          SIZE RO TYPE MOUNTPOINT
+sda          8:0       0  128035676160  0 disk
+sdb          8:16      0 1000204886016  0 disk
+sr0         11:0       1    1073741312  0 rom"""
+        result = """\
+NAME       MAJ:MIN    RM          SIZE RO TYPE MOUNTPOINT
+sda          8:0       0  128035676160  0 disk
+sdb          8:16      0 1000204886016  0 disk"""
 
-8        0  1048576 vda"""
-        self.ssh_mock.mock.exec_command.return_value = proc_partitions
-        self.assertEqual(self.conn.get_partitions(), proc_partitions)
-        self._assert_exec_called_with('cat /proc/partitions')
+        self.ssh_mock.mock.exec_command.return_value = output_lsblk
+        self.assertEqual(self.conn.get_disks(), result)
+        self._assert_exec_called_with('lsblk -lb --nodeps')
 
     def test_get_boot_time(self):
         booted_at = 10000
diff --git a/tempest/tests/lib/services/image/v2/test_namespace_properties_client.py b/tempest/tests/lib/services/image/v2/test_namespace_properties_client.py
new file mode 100644
index 0000000..1d56db6
--- /dev/null
+++ b/tempest/tests/lib/services/image/v2/test_namespace_properties_client.py
@@ -0,0 +1,191 @@
+# Copyright 2016 EasyStack.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.image.v2 import namespace_properties_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestNamespacePropertiesClient(base.BaseServiceTest):
+    FAKE_CREATE_SHOW_NAMESPACE_PROPERTY = {
+        "description": "property",
+        "enum": ["xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv"],
+        "name": "OS::Glance::Image",
+        "title": "Hypervisor Type",
+        "type": "string"
+    }
+
+    FAKE_LIST_NAMESPACE_PROPERTY = {
+        "properties": {
+            "hw_disk_bus": {
+                "description": "property.",
+                "enum": ["scsi", "virtio", "uml", "xen", "ide", "usb"],
+                "title": "Disk Bus",
+                "type": "string"
+            },
+            "hw_machine_type": {
+                "description": "desc.",
+                "title": "Machine Type",
+                "type": "string"
+            },
+            "hw_qemu_guest_agent": {
+                "description": "desc.",
+                "enum": [
+                    "yes",
+                    "no"
+                ],
+                "title": "QEMU Guest Agent",
+                "type": "string"
+            },
+            "hw_rng_model": {
+                "default": "virtio",
+                "description": "desc",
+                "title": "Random Number Generator Device",
+                "type": "string"
+            },
+            "hw_scsi_model": {
+                "default": "virtio-scsi",
+                "description": "desc.",
+                "title": "SCSI Model",
+                "type": "string"
+            },
+            "hw_video_model": {
+                "description": "The video image driver used.",
+                "enum": [
+                    "vga",
+                    "cirrus",
+                    "vmvga",
+                    "xen",
+                    "qxl"
+                ],
+                "title": "Video Model",
+                "type": "string"
+            },
+            "hw_video_ram": {
+                "description": "desc.",
+                "title": "Max Video Ram",
+                "type": "integer"
+            },
+            "hw_vif_model": {
+                "description": "desc.",
+                "enum": ["e1000",
+                         "ne2k_pci",
+                         "pcnet",
+                         "rtl8139",
+                         "virtio",
+                         "e1000",
+                         "e1000e",
+                         "VirtualE1000",
+                         "VirtualE1000e",
+                         "VirtualPCNet32",
+                         "VirtualSriovEthernetCard",
+                         "VirtualVmxnet",
+                         "netfront",
+                         "ne2k_pci"
+                         ],
+                "title": "Virtual Network Interface",
+                "type": "string"
+            },
+            "os_command_line": {
+                "description": "desc.",
+                "title": "Kernel Command Line",
+                "type": "string"
+            }
+        }
+    }
+
+    FAKE_UPDATE_NAMESPACE_PROPERTY = {
+        "description": "property",
+        "enum": ["xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv"],
+        "name": "OS::Glance::Image",
+        "title": "update Hypervisor Type",
+        "type": "string"
+    }
+
+    def setUp(self):
+        super(TestNamespacePropertiesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = namespace_properties_client.NamespacePropertiesClient(
+            fake_auth, 'image', 'regionOne')
+
+    def _test_create_namespace_property(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_namespace_property,
+            'tempest.lib.common.rest_client.RestClient.post',
+            self.FAKE_CREATE_SHOW_NAMESPACE_PROPERTY,
+            bytes_body, status=201,
+            namespace="OS::Compute::Hypervisor",
+            title="Hypervisor Type", name="OS::Glance::Image",
+            type="string",
+            enum=["xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv"])
+
+    def _test_list_namespace_property(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_namespace_properties,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_NAMESPACE_PROPERTY,
+            bytes_body,
+            namespace="OS::Compute::Hypervisor")
+
+    def _test_show_namespace_property(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_namespace_properties,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_CREATE_SHOW_NAMESPACE_PROPERTY,
+            bytes_body,
+            namespace="OS::Compute::Hypervisor",
+            property_name="OS::Glance::Image")
+
+    def _test_update_namespace_property(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_namespace_properties,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_UPDATE_NAMESPACE_PROPERTY,
+            bytes_body,
+            namespace="OS::Compute::Hypervisor",
+            property_name="OS::Glance::Image",
+            title="update Hypervisor Type", type="string",
+            enum=["xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv"],
+            name="OS::Glance::Image")
+
+    def test_create_namespace_property_with_str_body(self):
+        self._test_create_namespace_property()
+
+    def test_create_namespace_property_with_bytes_body(self):
+        self._test_create_namespace_property(bytes_body=True)
+
+    def test_list_namespace_property_with_str_body(self):
+        self._test_list_namespace_property()
+
+    def test_list_namespace_property_with_bytes_body(self):
+        self._test_list_namespace_property(bytes_body=True)
+
+    def test_show_namespace_property_with_str_body(self):
+        self._test_show_namespace_property()
+
+    def test_show_namespace_property_with_bytes_body(self):
+        self._test_show_namespace_property(bytes_body=True)
+
+    def test_update_namespace_property_with_str_body(self):
+        self._test_update_namespace_property()
+
+    def test_update_namespace_property_with_bytes_body(self):
+        self._test_update_namespace_property(bytes_body=True)
+
+    def test_delete_namespace(self):
+        self.check_service_client_function(
+            self.client.delete_namespace_property,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {}, namespace="OS::Compute::Hypervisor",
+            property_name="OS::Glance::Image", status=204)