Stress action for volume attach verify

The new stress action is able to verify the volume attachment,
on the guest.

The ``new_volume`` option can help in finding out the issue is on the
nova side (or on things controlled by nova) or on cider side.

Change-Id: I2d4c8548a2617dd33e2f116fce8b0b3d379b808e
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+    def _create_keypair(self):
+        keyname = data_utils.rand_name("key")
+        resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+        assert(resp.status == 200)
+
+    def _delete_keypair(self):
+        resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+        assert(resp.status == 202)
+
+    def _create_vm(self):
+        self.name = name = data_utils.rand_name("instance")
+        servers_client = self.manager.servers_client
+        self.logger.info("creating %s" % name)
+        vm_args = self.vm_extra_args.copy()
+        vm_args['security_groups'] = [self.sec_grp]
+        vm_args['key_name'] = self.key['name']
+        resp, server = servers_client.create_server(name, self.image,
+                                                    self.flavor,
+                                                    **vm_args)
+        self.server_id = server['id']
+        assert(resp.status == 202)
+        self.manager.servers_client.wait_for_server_status(self.server_id,
+                                                           'ACTIVE')
+
+    def _destroy_vm(self):
+        self.logger.info("deleting server: %s" % self.server_id)
+        resp, _ = self.manager.servers_client.delete_server(self.server_id)
+        assert(resp.status == 204)  # It cannot be 204 if I had to wait..
+        self.manager.servers_client.wait_for_server_termination(self.server_id)
+        self.logger.info("deleted server: %s" % self.server_id)
+
+    def _create_sec_group(self):
+        sec_grp_cli = self.manager.security_groups_client
+        s_name = data_utils.rand_name('sec_grp-')
+        s_description = data_utils.rand_name('desc-')
+        _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+                                                            s_description)
+        create_rule = sec_grp_cli.create_security_group_rule
+        create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+        create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+    def _destroy_sec_grp(self):
+        sec_grp_cli = self.manager.security_groups_client
+        sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+    def _create_floating_ip(self):
+        floating_cli = self.manager.floating_ips_client
+        _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+    def _destroy_floating_ip(self):
+        cli = self.manager.floating_ips_client
+        cli.delete_floating_ip(self.floating['id'])
+        cli.wait_for_resource_deletion(self.floating['id'])
+        self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+    def _create_volume(self):
+        name = data_utils.rand_name("volume")
+        self.logger.info("creating volume: %s" % name)
+        volumes_client = self.manager.volumes_client
+        resp, self.volume = volumes_client.create_volume(size=1,
+                                                         display_name=
+                                                         name)
+        assert(resp.status == 200)
+        volumes_client.wait_for_volume_status(self.volume['id'],
+                                              'available')
+        self.logger.info("created volume: %s" % self.volume['id'])
+
+    def _delete_volume(self):
+        self.logger.info("deleting volume: %s" % self.volume['id'])
+        volumes_client = self.manager.volumes_client
+        resp, _ = volumes_client.delete_volume(self.volume['id'])
+        assert(resp.status == 202)
+        volumes_client.wait_for_resource_deletion(self.volume['id'])
+        self.logger.info("deleted volume: %s" % self.volume['id'])
+
+    def _wait_disassociate(self):
+        cli = self.manager.floating_ips_client
+
+        def func():
+            _, floating = cli.get_floating_ip_details(self.floating['id'])
+            return floating['instance_id'] is None
+
+        if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+                                            CONF.compute.build_interval):
+            raise RuntimeError("IP disassociate timeout!")
+
+    def new_server_ops(self):
+        self._create_vm()
+        cli = self.manager.floating_ips_client
+        cli.associate_floating_ip_to_server(self.floating['ip'],
+                                            self.server_id)
+        if self.ssh_test_before_attach and self.enable_ssh_verify:
+            self.logger.info("Scanning for block devices via ssh on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+
+    def setUp(self, **kwargs):
+        """Note able configuration combinations:
+            Closest options to the test_stamp_pattern:
+                new_server = True
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = False
+            Just attaching:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+            Mostly API load by repeated attachment:
+                new_server = False
+                new_volume = False
+                enable_ssh_verify = False
+                ssh_test_before_attach = False
+            Minimal Nova load, but cinder load not decreased:
+                new_server = False
+                new_volume = True
+                enable_ssh_verify = True
+                ssh_test_before_attach = True
+        """
+        self.image = CONF.compute.image_ref
+        self.flavor = CONF.compute.flavor_ref
+        self.vm_extra_args = kwargs.get('vm_extra_args', {})
+        self.floating_pool = kwargs.get('floating_pool', None)
+        self.new_volume = kwargs.get('new_volume', True)
+        self.new_server = kwargs.get('new_server', False)
+        self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+        self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+                                                 False)
+        self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+        self.detach_match_count = kwargs.get('detach_match_count', 1)
+        self.attach_match_count = kwargs.get('attach_match_count', 2)
+        self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+        self._create_floating_ip()
+        self._create_sec_group()
+        self._create_keypair()
+        private_key = self.key['private_key']
+        username = CONF.compute.image_ssh_user
+        self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+                                                        username,
+                                                        pkey=private_key)
+        if not self.new_volume:
+            self._create_volume()
+        if not self.new_server:
+            self.new_server_ops()
+
+    # now we just test is number of partition increased or decrised
+    def part_wait(self, num_match):
+        def _part_state():
+            self.partitions = self.remote_client.get_partitions().split('\n')
+            matching = 0
+            for part_line in self.partitions[1:]:
+                if self.part_line_re.match(part_line):
+                    matching += 1
+            return matching == num_match
+        if tempest.test.call_until_true(_part_state,
+                                        CONF.compute.build_timeout,
+                                        CONF.compute.build_interval):
+            return
+        else:
+            raise RuntimeError("Unexpected partitions: %s",
+                               str(self.partitions))
+
+    def run(self):
+        if self.new_server:
+            self.new_server_ops()
+        if self.new_volume:
+            self._create_volume()
+        servers_client = self.manager.servers_client
+        self.logger.info("attach volume (%s) to vm %s" %
+                        (self.volume['id'], self.server_id))
+        resp, body = servers_client.attach_volume(self.server_id,
+                                                  self.volume['id'],
+                                                  self.part_name)
+        assert(resp.status == 200)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'in-use')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for new block device on %s"
+                             % self.server_id)
+            self.part_wait(self.attach_match_count)
+
+        resp, body = servers_client.detach_volume(self.server_id,
+                                                  self.volume['id'])
+        assert(resp.status == 202)
+        self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+                                                           'available')
+        if self.enable_ssh_verify:
+            self.logger.info("Scanning for block device disapperance on %s"
+                             % self.server_id)
+            self.part_wait(self.detach_match_count)
+        if self.new_volume:
+            self._delete_volume()
+        if self.new_server:
+            self._destroy_vm()
+
+    def tearDown(self):
+        cli = self.manager.floating_ips_client
+        cli.disassociate_floating_ip_from_server(self.floating['ip'],
+                                                 self.server_id)
+        self._wait_disassociate()
+        if not self.new_server:
+            self._destroy_vm()
+        self._delete_keypair()
+        self._destroy_floating_ip()
+        self._destroy_sec_grp()
+        if not self.new_volume:
+            self._delete_volume()
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+  "threads": 1,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {"vm_extra_args": {},
+             "new_volume": true,
+             "new_server": false,
+             "ssh_test_before_attach": false,
+             "enable_ssh_verify": true}
+}
+]