Merge "Updating the paramiko version to 2.7.0 or greater"
diff --git a/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
new file mode 100644
index 0000000..bb91213
--- /dev/null
+++ b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
@@ -0,0 +1,16 @@
+prelude: |
+    This release is to tag the Tempest for OpenStack Victoria release.
+    This release marks the start of Victoria release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+    * Stein
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle. Every Tempest commit is also tested against master during
+    the Wallaby cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Victoria (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Victoria release.
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
deleted file mode 100644
index a8447d2..0000000
--- a/roles/process-stackviz/README.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-Generate stackviz report.
-
-Generate stackviz report using subunit and dstat data, using
-the stackviz archive embedded in test images.
-
-**Role Variables**
-
-.. zuul:rolevar:: devstack_base_dir
-   :default: /opt/stack
-
-   The devstack base directory.
-
-.. zuul:rolevar:: stage_dir
-   :default: "{{ ansible_user_dir }}"
-
-   The stage directory where the input data can be found and
-   the output will be produced.
-
-.. zuul:rolevar:: zuul_work_dir
-   :default: {{ devstack_base_dir }}/tempest
-
-   Directory to work in. It has to be a fully qualified path.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
deleted file mode 100644
index f3bc32b..0000000
--- a/roles/process-stackviz/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-devstack_base_dir: /opt/stack
-stage_dir: "{{ ansible_user_dir }}"
-zuul_work_dir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
deleted file mode 100644
index e3a0a0e..0000000
--- a/roles/process-stackviz/tasks/main.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-- name: Check if stackviz archive exists
-  stat:
-    path: "/opt/cache/files/stackviz-latest.tar.gz"
-  register: stackviz_archive
-
-- debug:
-    msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
-  when: not stackviz_archive.stat.exists
-
-- name: Check if subunit data exists
-  stat:
-    path: "{{ zuul_work_dir }}/testrepository.subunit"
-  register: subunit_input
-
-- debug:
-    msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
-  when: not subunit_input.stat.exists
-
-- name: Install stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-  block:
-    - include_role:
-        name: ensure-pip
-
-    - pip:
-        name: "file://{{ stackviz_archive.stat.path }}"
-        virtualenv: /tmp/stackviz
-        virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
-        extra_args: -U
-
-- name: Deploy stackviz static html+js
-  command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Check if dstat data exists
-  stat:
-    path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
-  register: dstat_input
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Run stackviz with dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - dstat_input.stat.exists
-  failed_when: False
-
-- name: Run stackviz without dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - not dstat_input.stat.exists
-  failed_when: False
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log.py b/tempest/api/compute/admin/test_instance_usage_audit_log.py
index 1b62249..4dcbb3b 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log.py
@@ -22,6 +22,7 @@
 
 
 class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
+    """Test instance usage audit logs API"""
 
     @classmethod
     def setup_clients(cls):
@@ -30,12 +31,12 @@
 
     @decorators.idempotent_id('25319919-33d9-424f-9f99-2c203ee48b9d')
     def test_list_instance_usage_audit_logs(self):
-        # list instance usage audit logs
+        """Test listing instance usage audit logs"""
         self.adm_client.list_instance_usage_audit_logs()
 
     @decorators.idempotent_id('6e40459d-7c5f-400b-9e83-449fbc8e7feb')
     def test_get_instance_usage_audit_log(self):
-        # Get instance usage audit log before specified time
+        """Test getting instance usage audit log before specified time"""
         now = datetime.datetime.now()
         self.adm_client.show_instance_usage_audit_log(
             urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
index de8e221..84d18c4 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
@@ -23,6 +23,7 @@
 
 
 class InstanceUsageAuditLogNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Negative tests of instance usage audit logs"""
 
     @classmethod
     def setup_clients(cls):
@@ -32,7 +33,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a9d33178-d2c9-4131-ad3b-f4ca8d0308a2')
     def test_instance_usage_audit_logs_with_nonadmin_user(self):
-        # the instance_usage_audit_logs API just can be accessed by admin user
+        """Test list/show instance usage audit logs by non-admin should fail
+
+        The instance_usage_audit_logs API just can be accessed by admin user.
+        """
         self.assertRaises(lib_exc.Forbidden,
                           self.instance_usages_audit_log_client.
                           list_instance_usage_audit_logs)
@@ -45,6 +49,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9b952047-3641-41c7-ba91-a809fc5974c8')
     def test_get_instance_usage_audit_logs_with_invalid_time(self):
+        """Test showing instance usage audit logs with invalid time
+
+        Showing instance usage audit logs with invalid time should fail.
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.adm_client.show_instance_usage_audit_log,
                           "invalid_time")
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index dfa801b..f0a6a84 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -20,6 +20,12 @@
 
 
 class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
+    """Test security groups API that requires admin privilege
+
+    Test security groups API that requires admin privilege with compute
+    microversion less than 2.36
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -37,7 +43,17 @@
     @decorators.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
     @utils.services('network')
     def test_list_security_groups_list_all_tenants_filter(self):
-        # Admin can list security groups of all tenants
+        """Test listing security groups with all_tenants filter
+
+        1. Create two security groups for non-admin user
+        2. Create two security groups for admin user
+        3. Fetch all security groups based on 'all_tenants' search filter by
+           admin, check that all four created security groups are present in
+           fetched list
+        4. Fetch all security groups based on 'all_tenants' search filter by
+           non-admin, check only two security groups created by the provided
+           non-admin user are present in fetched list
+        """
         # List of all security groups created
         security_group_list = []
         # Create two security groups for a non-admin tenant
diff --git a/tempest/api/compute/admin/test_server_diagnostics.py b/tempest/api/compute/admin/test_server_diagnostics.py
index 005efdd..d855a62 100644
--- a/tempest/api/compute/admin/test_server_diagnostics.py
+++ b/tempest/api/compute/admin/test_server_diagnostics.py
@@ -19,6 +19,8 @@
 
 
 class ServerDiagnosticsTest(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion less than 2.48"""
+
     min_microversion = None
     max_microversion = '2.47'
 
@@ -29,6 +31,7 @@
 
     @decorators.idempotent_id('31ff3486-b8a0-4f56-a6c0-aab460531db3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         diagnostics = self.client.show_server_diagnostics(server_id)
 
@@ -41,6 +44,8 @@
 
 
 class ServerDiagnosticsV248Test(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion greater than 2.47"""
+
     min_microversion = '2.48'
     max_microversion = 'latest'
 
@@ -51,6 +56,7 @@
 
     @decorators.idempotent_id('64d0d48c-dff1-11e6-bf01-fe55135034f3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         # Response status and filed types will be checked by json schema
         self.client.show_server_diagnostics(server_id)
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index edcb1a7..c1236a7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -68,21 +68,7 @@
 
 
 class TestVolumeSwap(TestVolumeSwapBase):
-    """The test suite for swapping of volume with admin user.
-
-    The following is the scenario outline:
-
-    1. Create a volume "volume1" with non-admin.
-    2. Create a volume "volume2" with non-admin.
-    3. Boot an instance "instance1" with non-admin.
-    4. Attach "volume1" to "instance1" with non-admin.
-    5. Swap volume from "volume1" to "volume2" as admin.
-    6. Check the swap volume is successful and "volume2"
-       is attached to "instance1" and "volume1" is in available state.
-    7. Swap volume from "volume2" to "volume1" as admin.
-    8. Check the swap volume is successful and "volume1"
-       is attached to "instance1" and "volume2" is in available state.
-    """
+    """The test suite for swapping of volume with admin user"""
 
     # NOTE(mriedem): This is an uncommon scenario to call the compute API
     # to swap volumes directly; swap volume is primarily only for volume
@@ -92,6 +78,21 @@
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @utils.services('volume')
     def test_volume_swap(self):
+        """Test swapping of volume attached to server with admin user
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot an instance "instance1" with non-admin.
+        4. Attach "volume1" to "instance1" with non-admin.
+        5. Swap volume from "volume1" to "volume2" as admin.
+        6. Check the swap volume is successful and "volume2"
+           is attached to "instance1" and "volume1" is in available state.
+        7. Swap volume from "volume2" to "volume1" as admin.
+        8. Check the swap volume is successful and "volume1"
+           is attached to "instance1" and "volume2" is in available state.
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
@@ -134,6 +135,12 @@
 
 
 class TestMultiAttachVolumeSwap(TestVolumeSwapBase):
+    """Test swapping volume attached to multiple servers
+
+    Test swapping volume attached to multiple servers with microversion
+    greater than 2.59
+    """
+
     min_microversion = '2.60'
     max_microversion = 'latest'
 
@@ -164,6 +171,20 @@
                              condition=CONF.compute.min_compute_nodes > 1)
     @utils.services('volume')
     def test_volume_swap_with_multiattach(self):
+        """Test swapping volume attached to multiple servers
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot 2 instances "server1" and "server2" with non-admin.
+        4. Attach "volume1" to "server1" with non-admin.
+        5. Attach "volume1" to "server2" with non-admin.
+        6. Swap "volume1" to "volume2" on "server1"
+        7. Check "volume1" is attached to "server2" and not attached to
+           "server1"
+        8. Check "volume2" is attached to "server1".
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 7b0f48b..10d522b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -23,6 +23,8 @@
 
 
 class VolumesAdminNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of volume swapping"""
+
     create_default_network = True
 
     @classmethod
@@ -40,6 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
+        """Test swapping non existent volume should fail"""
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -51,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
+        """Test swapping volume to a non existence volume should fail"""
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -62,6 +66,12 @@
 
 
 class UpdateMultiattachVolumeNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of swapping volume attached to multiple servers
+
+    Negative tests of swapping volume attached to multiple servers with
+    compute microversion greater than 2.59 and volume microversion greater
+    than 3.26
+    """
 
     min_microversion = '2.60'
     volume_min_microversion = '3.27'
@@ -76,7 +86,16 @@
     @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
     @utils.services('volume')
     def test_multiattach_rw_volume_update_failure(self):
+        """Test swapping volume attached to multi-servers with read-write mode
 
+        1. Create two volumes "vol1" and "vol2"
+        2. Create two instances "server1" and "server2"
+        3. Attach "vol1" to both of these instances
+        4. By default both of these attachments should have an attach_mode of
+           read-write, so trying to swap "vol1" to "vol2" should fail
+        5. Check "vol1" is still attached to both servers
+        6. Check "vol2" is not attached to any server
+        """
         # Create two multiattach capable volumes.
         vol1 = self.create_volume(multiattach=True)
         vol2 = self.create_volume(multiattach=True)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 74570ce..8b847fc 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -566,17 +566,19 @@
         # the state of the volume to change to available. This is so we don't
         # error out when trying to delete the volume during teardown.
         if volume['multiattach']:
+            att = waiters.wait_for_volume_attachment_create(
+                self.volumes_client, volume['id'], server['id'])
             self.addCleanup(waiters.wait_for_volume_attachment_remove,
                             self.volumes_client, volume['id'],
-                            attachment['id'])
+                            att['attachment_id'])
         else:
             self.addCleanup(waiters.wait_for_volume_resource_status,
                             self.volumes_client, volume['id'], 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume['id'], 'in-use')
         # Ignore 404s on detach in case the server is deleted or the volume
         # is already detached.
         self.addCleanup(self._detach_volume, server, volume)
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'in-use')
         return attachment
 
     def create_volume_snapshot(self, volume_id, name=None, description=None,
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 58861a1..9ab75c5 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -28,6 +28,9 @@
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
                              'name': flavor['name']}
+        # description field is added to the response of list_flavors in 2.55
+        if not self.is_requested_microversion_compatible('2.54'):
+            flavor_min_detail.update({'description': flavor['description']})
         self.assertIn(flavor_min_detail, flavors)
 
     @decorators.idempotent_id('6e85fde4-b3cd-4137-ab72-ed5f418e8c24')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index b811421..23f8326 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -22,6 +22,7 @@
 
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+    """Test server images API"""
 
     @classmethod
     def resource_setup(cls):
@@ -54,6 +55,7 @@
 
     @decorators.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
     def test_create_delete_image(self):
+        """Test create/delete server image"""
         if self.is_requested_microversion_compatible('2.35'):
             MIN_DISK = 'minDisk'
             MIN_RAM = 'minRam'
@@ -93,6 +95,7 @@
 
     @decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
     def test_create_image_specify_multibyte_character_image_name(self):
+        """Test creating server image with multibyte character image name"""
         # prefix character is:
         # http://unicode.org/cldr/utility/character.jsp?a=20A1
 
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 37f9be3..0296220 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -30,6 +30,8 @@
 
 
 class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server images"""
+
     create_default_network = True
 
     def tearDown(self):
@@ -87,7 +89,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('55d1d38c-dd66-4933-9c8e-7d92aeb60ddc')
     def test_create_image_specify_invalid_metadata(self):
-        # Return an error when creating image with invalid metadata
+        """Test creating server image with invalid metadata should fail"""
         meta = {'': ''}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -95,7 +97,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3d24d11f-5366-4536-bd28-cff32b748eca')
     def test_create_image_specify_metadata_over_limits(self):
-        # Return an error when creating image with meta data over 255 chars
+        """Test creating server image with metadata over 255 should fail"""
         meta = {'a' * 256: 'b' * 256}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -103,8 +105,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0460efcf-ee88-4f94-acef-1bf658695456')
     def test_create_second_image_when_first_image_is_being_saved(self):
-        # Disallow creating another image when first image is being saved
+        """Test creating another server image when first image is being saved
 
+        Creating another server image when first image is being saved is
+        not allowed.
+        """
         # Create first snapshot
         image = self.create_image_from_server(self.server_id)
         self.addCleanup(self._reset_server)
@@ -123,8 +128,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
     def test_create_image_specify_name_over_character_limit(self):
-        # Return an error if snapshot name over 255 characters is passed
-
+        """Test creating server image with image name over 255 should fail"""
         snapshot_name = ('a' * 256)
         self.assertRaises(lib_exc.BadRequest,
                           self.compute_images_client.create_image,
@@ -133,8 +137,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0894954d-2db2-4195-a45b-ffec0bc0187e')
     def test_delete_image_that_is_not_yet_active(self):
-        # Return an error while trying to delete an image what is creating
-
+        """Test deleting a non-active server image should fail"""
         image = self.create_image_from_server(self.server_id)
         if api_version_utils.compare_version_header_to_response(
             "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 2ac7de3..7930c67 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -31,6 +31,8 @@
 
 
 class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
+    """Test listing server images with compute microversion less than 2.36"""
+
     max_microversion = '2.35'
 
     @classmethod
@@ -129,8 +131,11 @@
 
     @decorators.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
     def test_list_images_filter_by_status(self):
-        # The list of images should contain only images with the
-        # provided status
+        """Test listing server images filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(**params)['images']
 
@@ -140,8 +145,11 @@
 
     @decorators.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
     def test_list_images_filter_by_name(self):
-        # List of all images should contain the expected images filtered
-        # by name
+        """Test listing server images filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(**params)['images']
 
@@ -153,7 +161,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_id(self):
-        # The images should contain images filtered by server id
+        """Test listing images filtered by server id
+
+        The list of images should contain only images with the
+        provided server id.
+        """
         params = {'server': self.server1['id']}
         images = self.client.list_images(**params)['images']
 
@@ -169,7 +181,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_ref(self):
-        # The list of servers should be filtered by server ref
+        """Test listing images filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -188,7 +204,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_type(self):
-        # The list of servers should be filtered by image type
+        """Test listing images filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(**params)['images']
 
@@ -202,13 +222,22 @@
 
     @decorators.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
     def test_list_images_limit_results(self):
-        # Verify only the expected number of results are returned
+        """Test listing images with limited count
+
+        If we use limit=1 when listing images, then only 1 image should be
+        returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(**params)['images']
         self.assertEqual(1, len([x for x in images if 'id' in x]))
 
     @decorators.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
     def test_list_images_filter_by_changes_since(self):
+        """Test listing images filtered by changes-since
+
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Verify only updated images are returned in the detailed list
 
         # Becoming ACTIVE will modify the updated time
@@ -220,8 +249,11 @@
 
     @decorators.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
     def test_list_images_with_detail_filter_by_status(self):
-        # Detailed list of all images should only contain images
-        # with the provided status
+        """Test listing server images details filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -231,8 +263,11 @@
 
     @decorators.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
     def test_list_images_with_detail_filter_by_name(self):
-        # Detailed list of all images should contain the expected
-        # images filtered by name
+        """Test listing server images details filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -242,8 +277,11 @@
 
     @decorators.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
     def test_list_images_with_detail_limit_results(self):
-        # Verify only the expected number of results (with full details)
-        # are returned
+        """Test listing images details with limited count
+
+        If we use limit=1 when listing images with full details, then only 1
+        image should be returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(detail=True, **params)['images']
         self.assertEqual(1, len(images))
@@ -252,7 +290,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_server_ref(self):
-        # Detailed list of servers should be filtered by server ref
+        """Test listing images details filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -271,7 +313,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_type(self):
-        # The detailed list of servers should be filtered by image type
+        """Test listing images details filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(detail=True, **params)['images']
         self.client.show_image(self.image_ref)
@@ -286,8 +332,11 @@
 
     @decorators.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
     def test_list_images_with_detail_filter_by_changes_since(self):
-        # Verify an update image is returned
+        """Test listing images details filtered by changes-since
 
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Becoming ACTIVE will modify the updated time
         # Filter by the image's created time
         params = {'changes-since': self.image1['created']}
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index 81c59f9..f77da4b 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -22,6 +22,12 @@
 
 
 class ListImageFiltersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing images using compute images API
+
+    Negative tests of listing images using compute images API with
+    microversion less than 2.36.
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -39,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
     def test_get_nonexistent_image(self):
-        # Check raises a NotFound
+        """Test getting a non existent image should fail"""
         nonexistent_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.show_image,
                           nonexistent_image)
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index d0f53fe..bd3f58d 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -24,6 +24,7 @@
 
 
 class ServersTestMultiNic(base.BaseV2ComputeTest):
+    """Test multiple networks in servers"""
 
     @classmethod
     def skip_checks(cls):
@@ -59,8 +60,11 @@
 
     @decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_multiple_nics_order(self):
-        # Verify that the networks order given at the server creation is
-        # preserved within the server.
+        """Test verifying multiple networks order in server
+
+        The networks order given at the server creation is preserved within
+        the server.
+        """
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
         net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
 
@@ -95,6 +99,12 @@
 
     @decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_duplicate_network_nics(self):
+        """Test multiple duplicate networks can be used to create server
+
+        Creating server with networks [net1, net2, net1], the server can
+        be created successfully and all three networks are in the server
+        addresses.
+        """
         # Verify that server creation does not fail when more than one nic
         # is created on the same network.
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index d477be0..4db6987 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -34,6 +34,8 @@
 
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
+    """Test server actions"""
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
@@ -84,6 +86,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
                           'Change password not available.')
     def test_change_server_password(self):
+        """Test changing server's password
+
+        The server's password should be set to the provided password and
+        the user can authenticate with the new password.
+        """
         # Since this test messes with the password and makes the
         # server unreachable, it should create its own server
         validation_resources = self.get_test_validation_resources(
@@ -147,17 +154,24 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
     def test_reboot_server_hard(self):
-        # The server should be power cycled
+        """Test hard rebooting server
+
+        The server should be power cycled.
+        """
         self._test_reboot_server('HARD')
 
     @decorators.skip_because(bug="1014647")
     @decorators.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
     def test_reboot_server_soft(self):
-        # The server should be signaled to reboot gracefully
+        """Test soft rebooting server
+
+        The server should be signaled to reboot gracefully.
+        """
         self._test_reboot_server('SOFT')
 
     @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
     def test_remove_server_all_security_groups(self):
+        """Test removing all security groups from server"""
         server = self.create_test_server(wait_until='ACTIVE')
 
         # Remove all Security group
@@ -232,12 +246,19 @@
 
     @decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
     def test_rebuild_server(self):
+        """Test rebuilding server
+
+        The server should be rebuilt using the provided image and data.
+        """
         self._test_rebuild_server()
 
     @decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
     def test_rebuild_server_in_stop_state(self):
-        # The server in stop state  should be rebuilt using the provided
-        # image and remain in SHUTOFF state
+        """Test rebuilding server in stop state
+
+        The server in stop state should be rebuilt using the provided
+        image and remain in SHUTOFF state.
+        """
         server = self.client.show_server(self.server_id)['server']
         old_image = server['image']['id']
         new_image = (self.image_ref_alt
@@ -274,6 +295,10 @@
     @decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
     @utils.services('volume')
     def test_rebuild_server_with_volume_attached(self):
+        """Test rebuilding server with volume attached
+
+        The volume should be attached to the instance after rebuild.
+        """
         # create a new volume and attach it to the server
         volume = self.create_volume()
 
@@ -333,6 +358,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm(self):
+        """Test resizing server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=False)
 
     @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@@ -341,6 +367,7 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_volume_backed_server_confirm(self):
+        """Test resizing a volume backed server and then confirming"""
         # We have to create a new server that is volume-backed since the one
         # from setUp is not volume-backed.
         kwargs = {'volume_backed': True,
@@ -377,14 +404,18 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm_from_stopped(self):
+        """Test resizing a stopped server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=True)
 
     @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_revert(self):
-        # The server's RAM and disk space should return to its original
-        # values after a resize is reverted
+        """Test resizing server and then reverting
+
+        The server's RAM and disk space should return to its original
+        values after a resize is reverted.
+        """
 
         self.client.resize_server(self.server_id, self.flavor_ref_alt)
         # NOTE(zhufl): Explicitly delete the server to get a new one for later
@@ -405,10 +436,13 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_server_revert_with_volume_attached(self):
-        # Tests attaching a volume to a server instance and then resizing
-        # the instance. Once the instance is resized, revert the resize which
-        # should move the instance and volume attachment back to the original
-        # compute host.
+        """Test resizing a volume attached server and then reverting
+
+        Tests attaching a volume to a server instance and then resizing
+        the instance. Once the instance is resized, revert the resize which
+        should move the instance and volume attachment back to the original
+        compute host.
+        """
 
         # Create a blank volume and attach it to the server created in setUp.
         volume = self.create_volume()
@@ -437,7 +471,14 @@
                           'Snapshotting not available, backup not possible.')
     @utils.services('image')
     def test_create_backup(self):
-        # Positive test:create backup successfully and rotate backups correctly
+        """Test creating server backup
+
+        1. create server backup1 with rotation=2, there are 1 backup.
+        2. create server backup2 with rotation=2, there are 2 backups.
+        3. create server backup3, due to the rotation is 2, the first one
+           (backup1) will be deleted, so now there are still 2 backups.
+        """
+
         # create the first and the second backup
 
         # Check if glance v1 is available to determine which client to use. We
@@ -563,8 +604,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id and number of lines
+        """Test getting console output for a server
+
+        Should be able to GET the console output for a given server_id and
+        number of lines.
+        """
 
         # This reboot is necessary for outputting some console log after
         # creating an instance backup. If an instance backup, the console
@@ -579,6 +623,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_with_unlimited_size(self):
+        """Test getting server's console output with unlimited size
+
+        The console output lines length should be bigger than the one
+        of test_get_console_output.
+        """
         server = self.create_test_server(wait_until='ACTIVE')
 
         def _check_full_length_console_log():
@@ -597,8 +646,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_server_id_in_shutoff_status(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id in SHUTOFF status
+        """Test getting console output for a server in SHUTOFF status
+
+        Should be able to GET the console output for a given server_id
+        in SHUTOFF status.
+        """
 
         # NOTE: SHUTOFF is irregular status. To avoid test instability,
         #       one server is created only for this test without using
@@ -614,6 +666,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_pause_unpause_server(self):
+        """Test pausing and unpausing server"""
         self.client.pause_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
         self.client.unpause_server(self.server_id)
@@ -623,6 +676,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     def test_suspend_resume_server(self):
+        """Test suspending and resuming server"""
         self.client.suspend_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'SUSPENDED')
@@ -634,6 +688,7 @@
                           'Shelve is not available.')
     @utils.services('image')
     def test_shelve_unshelve_server(self):
+        """Test shelving and unshelving server"""
         if CONF.image_feature_enabled.api_v2:
             glance_client = self.os_primary.image_client_v2
         elif CONF.image_feature_enabled.api_v1:
@@ -673,6 +728,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_shelve_paused_server(self):
+        """Test shelving a paused server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.pause_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
@@ -682,6 +738,7 @@
 
     @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
     def test_stop_start_server(self):
+        """Test stopping and starting server"""
         self.client.stop_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
         self.client.start_server(self.server_id)
@@ -689,6 +746,12 @@
 
     @decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
     def test_lock_unlock_server(self):
+        """Test locking and unlocking server
+
+        Lock the server, and trying to stop it will fail because locked
+        server is not allowed to be stopped by non-admin user.
+        Then unlock the server, now the server can be stopped and started.
+        """
         # Lock the server,try server stop(exceptions throw),unlock it and retry
         self.client.lock_server(self.server_id)
         self.addCleanup(self.client.unlock_server, self.server_id)
@@ -714,6 +777,10 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
                           'VNC Console feature is disabled.')
     def test_get_vnc_console(self):
+        """Test getting vnc console from a server
+
+        The returned vnc console url should be in valid format.
+        """
         if self.is_requested_microversion_compatible('2.5'):
             body = self.client.get_vnc_console(
                 self.server_id, type='novnc')['console']
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index c936ce5..5a3f5d0 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -19,6 +19,7 @@
 
 
 class ServerAddressesTestJSON(base.BaseV2ComputeTest):
+    """Test server addresses"""
     create_default_network = True
 
     @classmethod
@@ -36,8 +37,10 @@
     @decorators.idempotent_id('6eb718c0-02d9-4d5e-acd1-4e0c269cef39')
     @utils.services('network')
     def test_list_server_addresses(self):
-        # All public and private addresses for
-        # a server should be returned
+        """Test listing server address
+
+        All public and private addresses for a server should be returned.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
@@ -51,8 +54,11 @@
     @decorators.idempotent_id('87bbc374-5538-4f64-b673-2b0e4443cc30')
     @utils.services('network')
     def test_list_server_addresses_by_network(self):
-        # Providing a network type should filter
-        # the addresses return by that type
+        """Test listing server addresses filtered by network addresses
+
+        Providing a network address should filter the addresses same with
+        the specified one.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
diff --git a/tempest/api/compute/servers/test_servers_microversions.py b/tempest/api/compute/servers/test_servers_microversions.py
index 2434884..566d04a 100644
--- a/tempest/api/compute/servers/test_servers_microversions.py
+++ b/tempest/api/compute/servers/test_servers_microversions.py
@@ -32,11 +32,13 @@
 
 
 class ServerShowV254Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.53"""
     min_microversion = '2.54'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('09170a98-4940-4637-add7-1a35121f1a5a')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.53"""
         server = self.create_test_server(wait_until='ACTIVE')
         keypair_name = data_utils.rand_name(
             self.__class__.__name__ + '-keypair')
@@ -52,11 +54,13 @@
 
 
 class ServerShowV257Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.56"""
     min_microversion = '2.57'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('803df848-080a-4261-8f11-b020cd9b6f60')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.56"""
         server = self.create_test_server(wait_until='ACTIVE')
         user_data = "ZWNobyAiaGVsbG8gd29ybGQi"
         # Checking rebuild API response schema
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 6676358..4f85048 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -60,7 +60,8 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-        server = cls.create_test_server()
+        # Wait until the instance is active to avoid the delete racing
+        server = cls.create_test_server(wait_until='ACTIVE')
         cls.client.delete_server(server['id'])
         waiters.wait_for_server_termination(cls.client, server['id'])
         cls.deleted_server_id = server['id']
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index f3ccf8d..30bea60 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -24,6 +24,7 @@
 
 
 class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest):
+    """Test volume snapshots with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -48,6 +49,7 @@
 
     @decorators.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
     def test_volume_snapshot_create_get_list_delete(self):
+        """Test create/get/list/delete volume snapshot"""
         volume = self.create_volume()
         self.addCleanup(self.delete_volume, volume['id'])
 
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 0d23c1f..554f418 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -25,6 +25,7 @@
 
 
 class VolumesGetTestJSON(base.BaseV2ComputeTest):
+    """Test compute volumes API with microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -45,7 +46,7 @@
 
     @decorators.idempotent_id('f10f25eb-9775-4d9d-9cbe-1cf54dae9d5f')
     def test_volume_create_get_delete(self):
-        # CREATE, GET, DELETE Volume
+        """Test create/get/delete volume"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         # Create volume
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 28bc174..0b37264 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -21,6 +21,8 @@
 
 
 class VolumesTestJSON(base.BaseV2ComputeTest):
+    """Test listing volumes with compute microversion less than 2.36"""
+
     # NOTE: This test creates a number of 1G volumes. To run successfully,
     # ensure that the backing file for the volume group that Nova uses
     # has space for at least 3 1G volumes!
@@ -57,7 +59,7 @@
 
     @decorators.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
     def test_volume_list(self):
-        # Should return the list of Volumes
+        """Test listing volumes should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes()['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -72,7 +74,7 @@
 
     @decorators.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
     def test_volume_list_with_details(self):
-        # Should return the list of Volumes with details
+        """Test listing volumes with detail should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes(detail=True)['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -87,7 +89,11 @@
 
     @decorators.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
     def test_volume_list_param_limit(self):
-        # Return the list of volumes based on limit set
+        """Test listing volumes based on limit set
+
+        If we list volumes with limit=2, then only 2 volumes should be
+        returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(**params)['volumes']
 
@@ -96,7 +102,11 @@
 
     @decorators.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
     def test_volume_list_with_detail_param_limit(self):
-        # Return the list of volumes with details based on limit set.
+        """Test listing volumes with detail based on limit set
+
+        If we list volumes with detail with limit=2, then only 2 volumes with
+        detail should be returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(detail=True,
                                                     **params)['volumes']
@@ -106,7 +116,12 @@
 
     @decorators.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
     def test_volume_list_param_offset_and_limit(self):
-        # Return the list of volumes based on offset and limit set.
+        """Test listing volumes based on offset and limit set
+
+        If we list volumes with offset=1 and limit=1, then 1 volume located
+        in the position 1 in the all volumes list should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes()['volumes']
         params = {'offset': 1, 'limit': 1}
@@ -123,7 +138,13 @@
 
     @decorators.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
     def test_volume_list_with_detail_param_offset_and_limit(self):
-        # Return the list of volumes details based on offset and limit set.
+        """Test listing volumes with detail based on offset and limit set
+
+        If we list volumes with detail with offset=1 and limit=1, then 1
+        volume with detail located in the position 1 in the all volumes list
+        should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes(detail=True)['volumes']
         params = {'offset': 1, 'limit': 1}
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index 444ce93..f553e32 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -23,6 +23,7 @@
 
 
 class VolumesNegativeTest(base.BaseV2ComputeTest):
+    """Negative tests of volumes with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -44,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c03ea686-905b-41a2-8748-9635154b7c57')
     def test_volume_get_nonexistent_volume_id(self):
-        # Negative: Should not be able to get details of nonexistent volume
+        """Test getting details of a non existent volume should fail"""
         # Creating a nonexistent volume id
         # Trying to GET a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.show_volume,
@@ -53,7 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('54a34226-d910-4b00-9ef8-8683e6c55846')
     def test_volume_delete_nonexistent_volume_id(self):
-        # Negative: Should not be able to delete nonexistent Volume
+        """Test deleting a nonexistent volume should fail"""
         # Creating nonexistent volume id
         # Trying to DELETE a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
@@ -62,8 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5125ae14-152b-40a7-b3c5-eae15e9022ef')
     def test_create_volume_with_invalid_size(self):
-        # Negative: Should not be able to create volume with invalid size
-        # in request
+        """Test creating volume with invalid size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -72,8 +72,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('131cb3a1-75cc-4d40-b4c3-1317f64719b0')
     def test_create_volume_without_passing_size(self):
-        # Negative: Should not be able to create volume without passing size
-        # in request
+        """Test creating volume without specifying size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -82,7 +81,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8cce995e-0a83-479a-b94d-e1e40b8a09d1')
     def test_create_volume_with_size_zero(self):
-        # Negative: Should not be able to create volume with size zero
+        """Test creating volume with size=0 should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -91,14 +90,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62bab09a-4c03-4617-8cca-8572bc94af9b')
     def test_get_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to get volume when empty ID is passed
+        """Test getting volume details without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.show_volume, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62972737-124b-4513-b6cf-2f019f178494')
     def test_delete_invalid_volume_id(self):
-        # Negative: Should not be able to delete volume when invalid ID is
-        # passed
+        """Test deleting volume with an invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_volume,
                           data_utils.rand_name('invalid'))
@@ -106,5 +104,5 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0d1417c5-4ae8-4c2c-adc5-5f0b864253e5')
     def test_delete_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to delete volume when empty ID is passed
+        """Test deleting volume without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index ff5026b..0b9d381 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -65,6 +65,8 @@
             name=data_utils.rand_name(self.__class__.__name__),
             allowed_address_pairs=allowed_address_pairs)
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
 
@@ -82,6 +84,8 @@
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
         if mac_address is None:
@@ -115,6 +119,8 @@
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         newportid = resp['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        newportid)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, newportid)
         ipaddress = resp['port']['fixed_ips'][0]['ip_address']
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index d84f3a3..6e93d69 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -44,11 +44,14 @@
 
     :return: default config dir
     """
+    # NOTE: The default directory should be on a Linux box.
     global_conf_dir = '/etc/tempest'
     xdg_config = os.environ.get('XDG_CONFIG_HOME',
-                                os.path.expanduser('~/.config'))
+                                os.path.expanduser(os.path.join('~',
+                                                                '.config')))
     user_xdg_global_path = os.path.join(xdg_config, 'tempest')
-    user_global_path = os.path.join(os.path.expanduser('~'), '.tempest/etc')
+    user_global_path = os.path.join(os.path.expanduser('~'),
+                                    '.tempest', 'etc')
     if os.path.isdir(global_conf_dir):
         return global_conf_dir
     elif os.path.isdir(user_xdg_global_path):
@@ -121,7 +124,7 @@
     def generate_sample_config(self, local_dir):
         conf_generator = os.path.join(os.path.dirname(__file__),
                                       'config-generator.tempest.conf')
-        output_file = os.path.join(local_dir, 'etc/tempest.conf.sample')
+        output_file = os.path.join(local_dir, 'etc', 'tempest.conf.sample')
         if os.path.isfile(conf_generator):
             generator.main(['--config-file', conf_generator, '--output-file',
                             output_file])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 14790d6..fc25914 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -223,6 +223,25 @@
              resource_name, resource_id, status, time.time() - start)
 
 
+def wait_for_volume_attachment_create(client, volume_id, server_id):
+    """Waits for a volume attachment to be created at a given volume."""
+    start = int(time.time())
+    while True:
+        attachments = client.show_volume(volume_id)['volume']['attachments']
+        found = [a for a in attachments if a['server_id'] == server_id]
+        if found:
+            LOG.info('Attachment %s created for volume %s to server %s after '
+                     'waiting for %f seconds', found[0]['attachment_id'],
+                     volume_id, server_id, time.time() - start)
+            return found[0]
+        time.sleep(client.build_interval)
+        if int(time.time()) - start >= client.build_timeout:
+            message = ('Failed to attach volume %s to server %s '
+                       'within the required time (%s s).' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+
+
 def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
     """Waits for a volume attachment to be removed from a given volume."""
     start = int(time.time())
diff --git a/tempest/lib/cmd/skip_tracker.py b/tempest/lib/cmd/skip_tracker.py
index 87806b7..95376e3 100755
--- a/tempest/lib/cmd/skip_tracker.py
+++ b/tempest/lib/cmd/skip_tracker.py
@@ -31,10 +31,11 @@
 except ImportError:
     launchpad = None
 
-LPCACHEDIR = os.path.expanduser('~/.launchpadlib/cache')
+LPCACHEDIR = os.path.expanduser(os.path.join('~', '.launchpadlib', 'cache'))
 LOG = logging.getLogger(__name__)
 
-BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                       '..', '..', '..'))
 TESTDIR = os.path.join(BASEDIR, 'tempest')
 
 
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 1011504..641d727 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -172,7 +172,7 @@
         return self.is_multi_user()
 
     def _create_hash_file(self, hash_string):
-        path = os.path.join(os.path.join(self.accounts_dir, hash_string))
+        path = os.path.join(self.accounts_dir, hash_string)
         if not os.path.isfile(path):
             with open(path, 'w') as fd:
                 fd.write(self.name)
@@ -194,8 +194,7 @@
             if res:
                 return _hash
             else:
-                path = os.path.join(os.path.join(self.accounts_dir,
-                                                 _hash))
+                path = os.path.join(self.accounts_dir, _hash)
                 with open(path, 'r') as fd:
                     names.append(fd.read())
         msg = ('Insufficient number of users provided. %s have allocated all '
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 143c6e1..5816ab1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,8 @@
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
     # Load local tempest tests
-    for test_dir in ['tempest/api', 'tempest/scenario']:
-        full_test_dir = os.path.join(base_path, test_dir)
+    for test_dir in ['api', 'scenario']:
+        full_test_dir = os.path.join(base_path, 'tempest', test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
                                            top_level_dir=base_path))
diff --git a/tempest/tests/cmd/test_tempest_init.py b/tempest/tests/cmd/test_tempest_init.py
index 9042b12..fce0882 100644
--- a/tempest/tests/cmd/test_tempest_init.py
+++ b/tempest/tests/cmd/test_tempest_init.py
@@ -40,7 +40,7 @@
 
     def test_generate_sample_config(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         init_cmd = init.TempestInit(None, None)
         local_sample_conf_file = os.path.join(etc_dir_path,
@@ -56,7 +56,7 @@
 
     def test_update_local_conf(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         lock_dir = os.path.join(local_dir.path, 'tempest_lock')
         config_path = os.path.join(etc_dir_path, 'tempest.conf')
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 721fd76..277e049 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -579,7 +579,7 @@
                           os, 'fakeservice')
 
     def test_get_config_file(self):
-        conf_dir = os.path.join(os.getcwd(), 'etc/')
+        conf_dir = os.path.join(os.getcwd(), 'etc')
         conf_file = "tempest.conf.sample"
         local_sample_conf_file = os.path.join(conf_dir, conf_file)
 
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 32d6498..73924bd 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -234,6 +234,29 @@
                                     mock.call(volume_id)])
         mock_sleep.assert_called_once_with(1)
 
+    def test_wait_for_volume_attachment_create(self):
+        vol_detached = {'volume': {'attachments': []}}
+        vol_attached = {'volume': {'attachments': [
+                       {'id': uuids.volume_id,
+                        'attachment_id': uuids.attachment_id,
+                        'server_id': uuids.server_id,
+                        'volume_id': uuids.volume_id}]}}
+        show_volume = mock.MagicMock(side_effect=[
+            vol_detached, vol_detached, vol_attached])
+        client = mock.Mock(spec=volumes_client.VolumesClient,
+                           build_interval=1,
+                           build_timeout=5,
+                           show_volume=show_volume)
+        self.patch('time.time')
+        self.patch('time.sleep')
+        att = waiters.wait_for_volume_attachment_create(
+            client, uuids.volume_id, uuids.server_id)
+        assert att == vol_attached['volume']['attachments'][0]
+        # Assert that show volume is called until the attachment is removed.
+        show_volume.assert_has_calls([mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id)])
+
     def test_wait_for_volume_attachment(self):
         vol_detached = {'volume': {'attachments': []}}
         vol_attached = {'volume': {'attachments': [