Merge "Avoid duplicate server creation in rebuild test"
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
index 7423bfb..7ad5c99 100644
--- a/roles/run-tempest-26/tasks/main.yaml
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -17,7 +17,7 @@
 
 - name: Limit max concurrency when more than 3 vcpus are available
   set_fact:
-    default_concurrency: "{{ num_cores|int // 2 }}"
+    default_concurrency: "{{ num_cores|int - 2 }}"
   when: num_cores|int > 3
 
 - name: Override target branch
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 3fb494f..3d78557 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -17,7 +17,7 @@
 
 - name: Limit max concurrency when more than 3 vcpus are available
   set_fact:
-    default_concurrency: "{{ num_cores|int // 2 }}"
+    default_concurrency: "{{ num_cores|int - 2 }}"
   when: num_cores|int > 3
 
 - name: Override target branch
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index fb6376e..d7fb62d 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -64,5 +64,5 @@
             configured_network = CONF.compute.fixed_network_name
             self.assertIn(configured_network, [x['label'] for x in networks])
         else:
-            network_labels = [x['label'] for x in networks]
-            self.assertNotEmpty(network_labels)
+            raise self.skipException(
+                "Environment has no known-for-sure existing network.")
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index bc00f8c..321078c 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -25,6 +25,8 @@
 class ServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Servers API using admin privileges"""
 
+    create_default_network = True
+
     @classmethod
     def setup_clients(cls):
         super(ServersAdminTestJSON, cls).setup_clients()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 5d172c7..ce6cd60 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -326,18 +326,18 @@
             body['id'])
         return body
 
-    def wait_for(self, condition):
+    def wait_for(self, condition, *args):
         """Repeatedly calls condition() until a timeout."""
         start_time = int(time.time())
         while True:
             try:
-                condition()
+                condition(*args)
             except Exception:
                 pass
             else:
                 return
             if int(time.time()) - start_time >= self.build_timeout:
-                condition()
+                condition(*args)
                 return
             time.sleep(self.build_interval)
 
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 23f8326..2b859da 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -24,6 +24,8 @@
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
     """Test server images API"""
 
+    create_default_network = True
+
     @classmethod
     def resource_setup(cls):
         super(ImagesOneServerTestJSON, cls).resource_setup()
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 87dfd88..3e54bf6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -207,9 +207,9 @@
             # NOTE(mriedem): tearDown requires the server to be started.
             self.client.start_server(server_id)
 
-    def _get_output(self):
+    def _get_output(self, server_id):
         output = self.client.get_console_output(
-            self.server_id, length=3)['output']
+            server_id, length=3)['output']
         self.assertTrue(output, "Console output was empty.")
         lines = len(output.split('\n'))
         self.assertEqual(lines, 3)
@@ -335,7 +335,7 @@
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
         self.reboot_server(self.server_id, type='HARD')
-        self.wait_for(self._get_output)
+        self.wait_for(self._get_output, self.server_id)
 
     @decorators.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
@@ -707,6 +707,7 @@
 
         self.wait_for(_check_full_length_console_log)
 
+    @decorators.skip_because(bug='2028851')
     @decorators.idempotent_id('5b65d4e7-4ecd-437c-83c0-d6b79d927568')
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
@@ -725,7 +726,7 @@
 
         self.client.stop_server(temp_server_id)
         waiters.wait_for_server_status(self.client, temp_server_id, 'SHUTOFF')
-        self.wait_for(self._get_output)
+        self.wait_for(self._get_output, temp_server_id)
 
     @decorators.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
     @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
diff --git a/tempest/api/volume/admin/test_snapshot_manage.py b/tempest/api/volume/admin/test_snapshot_manage.py
index ab0aa38..478bd16 100644
--- a/tempest/api/volume/admin/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/test_snapshot_manage.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.api.volume import base
+from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -31,6 +32,8 @@
      managed by Cinder from a storage back end to Cinder
     """
 
+    create_default_network = True
+
     @classmethod
     def skip_checks(cls):
         super(SnapshotManageAdminTest, cls).skip_checks()
@@ -46,8 +49,7 @@
                    "it should be a list of two elements")
             raise exceptions.InvalidConfiguration(msg)
 
-    @decorators.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
-    def test_unmanage_manage_snapshot(self):
+    def _test_unmanage_manage_snapshot(self, attached_volume=False):
         """Test unmanaging and managing volume snapshot"""
         # Create a volume
         volume = self.create_volume()
@@ -55,6 +57,13 @@
         # Create a snapshot
         snapshot = self.create_snapshot(volume_id=volume['id'])
 
+        if attached_volume:
+            # Create a server
+            server = self.create_server(wait_until='SSHABLE')
+            # Attach volume to instance
+            self.attach_volume(server['id'], volume['id'],
+                               wait_for_detach=False)
+
         # Unmanage the snapshot
         # Unmanage snapshot function works almost the same as delete snapshot,
         # but it does not delete the snapshot data
@@ -100,3 +109,17 @@
         self.assertEqual(snapshot['size'], new_snapshot_info['size'])
         for key in ['volume_id', 'name', 'description', 'metadata']:
             self.assertEqual(snapshot_ref[key], new_snapshot_info[key])
+
+    @decorators.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
+    def test_unmanage_manage_snapshot(self):
+        self._test_unmanage_manage_snapshot()
+
+    @decorators.idempotent_id('7c735385-e953-4198-8534-68137f72dbdc')
+    @utils.services('compute')
+    def test_snapshot_manage_with_attached_volume(self):
+        """Test manage a snapshot with an attached volume.
+
+           The case validates manage snapshot operation while
+           the parent volume is attached to an instance.
+        """
+        self._test_unmanage_manage_snapshot(attached_volume=True)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 20495ee..4d35bbb 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1140,14 +1140,19 @@
                                             server=server,
                                             username=username)
 
+        # Default the directory in which to write the timestamp file to /tmp
+        # and only use the mount_path as the target directory if we mounted
+        # dev_name to mount_path.
+        target_dir = '/tmp'
         if dev_name is not None:
             ssh_client.make_fs(dev_name, fs=fs)
             ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
                                                                mount_path))
-        cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
+            target_dir = mount_path
+        cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % target_dir
         ssh_client.exec_command(cmd_timestamp)
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
+                                            % target_dir)
         if dev_name is not None:
             ssh_client.exec_command('sudo umount %s' % mount_path)
         return timestamp
@@ -1172,10 +1177,15 @@
                                             server=server,
                                             username=username)
 
+        # Default the directory from which to read the timestamp file to /tmp
+        # and only use the mount_path as the target directory if we mounted
+        # dev_name to mount_path.
+        target_dir = '/tmp'
         if dev_name is not None:
             ssh_client.mount(dev_name, mount_path)
+            target_dir = mount_path
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
+                                            % target_dir)
         if dev_name is not None:
             ssh_client.exec_command('sudo umount %s' % mount_path)
         return timestamp
@@ -1659,7 +1669,8 @@
 
     def create_encrypted_volume(self, encryption_provider, volume_type,
                                 key_size=256, cipher='aes-xts-plain64',
-                                control_location='front-end'):
+                                control_location='front-end',
+                                wait_until='available'):
         """Creates an encrypted volume"""
         volume_type = self.create_volume_type(name=volume_type)
         self.create_encryption_type(type_id=volume_type['id'],
@@ -1667,7 +1678,8 @@
                                     key_size=key_size,
                                     cipher=cipher,
                                     control_location=control_location)
-        return self.create_volume(volume_type=volume_type['name'])
+        return self.create_volume(volume_type=volume_type['name'],
+                                  wait_until=wait_until)
 
 
 class ObjectStorageScenarioTest(ScenarioTest):
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 60abc02..753e64f 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.common import utils
+from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
 from tempest.scenario import manager
@@ -56,9 +57,16 @@
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luks(self):
         """LUKs v1 decrypts volume through libvirt."""
-        server = self.launch_instance()
         volume = self.create_encrypted_volume('luks',
-                                              volume_type='luks')
+                                              volume_type='luks',
+                                              wait_until=None)
+        server = self.launch_instance()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+
         self.attach_detach_volume(server, volume)
 
     @decorators.idempotent_id('7abec0a3-61a0-42a5-9e36-ad3138fb38b4')
@@ -68,16 +76,30 @@
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luksv2(self):
         """LUKs v2 decrypts volume through os-brick."""
-        server = self.launch_instance()
         volume = self.create_encrypted_volume('luks2',
-                                              volume_type='luksv2')
+                                              volume_type='luksv2',
+                                              wait_until=None)
+        server = self.launch_instance()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+
         self.attach_detach_volume(server, volume)
 
     @decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
     @decorators.attr(type='slow')
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_cryptsetup(self):
-        server = self.launch_instance()
         volume = self.create_encrypted_volume('plain',
-                                              volume_type='cryptsetup')
+                                              volume_type='cryptsetup',
+                                              wait_until=None)
+        server = self.launch_instance()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+
         self.attach_detach_volume(server, volume)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index cbe4122..7b819e0 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -898,10 +898,13 @@
                                        nic=spoof_nic, should_succeed=True)
         # Set a mac address by making nic down temporary
         spoof_ip_addresses = ssh_client.get_nic_ip_addresses(spoof_nic)
-        cmd = ("sudo ip link set {nic} down;"
+        dhcp_cmd = ("sudo start-stop-daemon -K -x /sbin/dhcpcd -p "
+                    "/var/run/dhcpcd/pid -o || true")
+        cmd = ("{dhcp_cmd}; sudo ip link set {nic} down;"
                "sudo ip link set dev {nic} address {mac};"
                "sudo ip link set {nic} up;"
                "sudo ip address flush dev {nic};").format(nic=spoof_nic,
+                                                          dhcp_cmd=dhcp_cmd,
                                                           mac=spoof_mac)
         for ip_address in spoof_ip_addresses:
             cmd += (
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index 023ad70..fe85234 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 from tempest.common import utils
+from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions
@@ -61,6 +62,7 @@
         # threshold (so that things don't get crazy if you have 1000
         # compute nodes but set min to 3).
         servers = []
+        host_server_ids = {}
 
         for host in hosts[:CONF.compute.min_compute_nodes]:
             # by getting to active state here, this means this has
@@ -68,12 +70,18 @@
             # in order to use the availability_zone:host scheduler hint,
             # admin client is need here.
             inst = self.create_server(
+                wait_until=None,
                 clients=self.os_admin,
                 availability_zone='%(zone)s:%(host_name)s' % host)
+            host_server_ids[host['host_name']] = inst['id']
+
+        for host_name, server_id in host_server_ids.items():
+            waiters.wait_for_server_status(self.os_admin.servers_client,
+                                           server_id, 'ACTIVE')
             server = self.os_admin.servers_client.show_server(
-                inst['id'])['server']
+                server_id)['server']
             # ensure server is located on the requested host
-            self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
+            self.assertEqual(host_name, server['OS-EXT-SRV-ATTR:host'])
             servers.append(server)
 
         # make sure we really have the number of servers we think we should
diff --git a/tempest/scenario/test_server_volume_attachment.py b/tempest/scenario/test_server_volume_attachment.py
index cc8cf00..1d0d0d0 100644
--- a/tempest/scenario/test_server_volume_attachment.py
+++ b/tempest/scenario/test_server_volume_attachment.py
@@ -69,11 +69,18 @@
     @utils.services('compute', 'volume', 'image', 'network')
     def test_server_detach_rules(self):
         """Test that various methods of detaching a volume honors the rules"""
+        volume = self.create_volume(wait_until=None)
+        volume2 = self.create_volume(wait_until=None)
+
         server = self.create_server(wait_until='SSHABLE')
         servers = self.servers_client.list_servers()['servers']
         self.assertIn(server['id'], [x['id'] for x in servers])
 
-        volume = self.create_volume()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
 
         volume = self.nova_volume_attach(server, volume)
         self.addCleanup(self.nova_volume_detach, server, volume)
@@ -143,7 +150,12 @@
                 volume['id'], connector=None, attachment_id=att_id)
 
         # Test user call to detach with mismatch is rejected
-        volume2 = self.create_volume()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume2['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume2 = self.volumes_client.show_volume(volume2['id'])['volume']
+
         volume2 = self.nova_volume_attach(server, volume2)
         att_id2 = volume2['attachments'][0]['attachment_id']
         self.assertRaises(
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4b81b9e..82f0341 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.common import utils
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
@@ -84,7 +85,7 @@
         security_group = self.create_security_group()
 
         # boot an instance and create a timestamp file in it
-        volume = self.create_volume()
+        volume = self.create_volume(wait_until=None)
         server = self.create_server(
             key_name=keypair['name'],
             security_groups=[{'name': security_group['name']}])
@@ -97,6 +98,12 @@
             ip_for_server, private_key=keypair['private_key'],
             server=server)
         disks_list_before_attach = linux_client.list_disks()
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+
         self.nova_volume_attach(server, volume)
         volume_device_name = self._attached_volume_name(
             disks_list_before_attach, ip_for_server, keypair['private_key'])
@@ -115,7 +122,7 @@
 
         # create second volume from the snapshot(volume2)
         volume_from_snapshot = self.create_volume(
-            snapshot_id=volume_snapshot['id'])
+            snapshot_id=volume_snapshot['id'], wait_until=None)
 
         # boot second instance from the snapshot(instance2)
         server_from_snapshot = self.create_server(
@@ -135,6 +142,14 @@
         disks_list_before_attach = linux_client.list_disks()
 
         # attach volume2 to instance2
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume_from_snapshot['id'],
+                                                'available')
+        # The volume retrieved on creation has a non-up-to-date status.
+        # Retrieval after it becomes active ensures correct details.
+        volume_from_snapshot = self.volumes_client.show_volume(
+            volume_from_snapshot['id'])['volume']
+
         self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
         volume_device_name = self._attached_volume_name(
             disks_list_before_attach, ip_for_snapshot, keypair['private_key'])
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 0901788..8ac0b42 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -393,12 +393,24 @@
         # Keystone policies are changed to work for both system as well as
         # for project scoped, we need to keep scope check disable for
         # keystone.
-        NOVA_ENFORCE_SCOPE: true
+        # Nova and Glance have enabled the new defaults and scope by default
+        # in devstack.
         CINDER_ENFORCE_SCOPE: true
-        GLANCE_ENFORCE_SCOPE: true
         NEUTRON_ENFORCE_SCOPE: true
         PLACEMENT_ENFORCE_SCOPE: true
 
+- job:
+    name: tempest-all-rbac-old-defaults
+    parent: tempest-all
+    description: |
+      Integration test that runs all tests on RBAC old defaults.
+      devstack_localrc:
+        # NOTE(gmann): Nova and Glance have enabled the new defaults and scope
+        # by default in devstack so we need some jobs keep testing the old
+        # defaults until they are removed from service side.
+        NOVA_ENFORCE_SCOPE: false
+        GLANCE_ENFORCE_SCOPE: false
+
 - project-template:
     name: integrated-gate-networking
     description: |
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 3223a1e..9787526 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -118,6 +118,8 @@
         - tempest-full-test-account-py3:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - ironic-tempest-bios-ipmi-direct-tinyipa:
+            irrelevant-files: *tempest-irrelevant-files
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
     gate:
@@ -146,6 +148,8 @@
         #    irrelevant-files: *tempest-irrelevant-files
         - nova-live-migration:
             irrelevant-files: *tempest-irrelevant-files
+        - ironic-tempest-bios-ipmi-direct-tinyipa:
+            irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
         - nova-multi-cell
@@ -157,6 +161,7 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-all:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-all-rbac-old-defaults
         - tempest-full-parallel
         - tempest-full-zed-extra-tests
         - tempest-full-yoga-extra-tests
@@ -191,6 +196,7 @@
     periodic:
       jobs:
         - tempest-all
+        - tempest-all-rbac-old-defaults
         - tempest-full-parallel
         - tempest-full-oslo-master
         - tempest-stestr-master