Merge "Get console output for any exception in ssh session" into mcp/caracal
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 8984d1d..688b31b 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -109,21 +109,24 @@
         """
         port = self.ports_client.show_port(port_id)['port']
         device_id = port['device_id']
+        dns_name = port.get('dns_name')
         start = int(time.time())
 
         # NOTE(mriedem): Nova updates the port's device_id to '' rather than
         # None, but it's not contractual so handle Falsey either way.
-        while device_id:
+        while any([device_id, dns_name]):
             time.sleep(self.build_interval)
             port = self.ports_client.show_port(port_id)['port']
             device_id = port['device_id']
+            dns_name = port.get('dns_name')
 
             timed_out = int(time.time()) - start >= self.build_timeout
 
-            if device_id and timed_out:
-                message = ('Port %s failed to detach (device_id %s) within '
-                           'the required time (%s s).' %
-                           (port_id, device_id, self.build_timeout))
+            if any([device_id, dns_name]) and timed_out:
+                message = ('Port %s failed to detach (device_id %s), '
+                           '(dns_name %s) within the required time (%s s).' %
+                           (port_id, device_id or 'is out',
+                            dns_name or 'is out', self.build_timeout))
                 raise lib_exc.TimeoutException(message)
 
         return port
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 1308b19..c90aea8 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -64,7 +64,15 @@
 
     def _validate_novnc_html(self, vnc_url):
         """Verify we can connect to novnc and get back the javascript."""
-        resp = urllib3.PoolManager().request('GET', vnc_url)
+        cert_params = {}
+
+        if CONF.identity.disable_ssl_certificate_validation:
+            cert_params['cert_reqs'] = "CERT_NONE"
+        else:
+            cert_params["cert_reqs"] = "CERT_REQUIRED"
+            cert_params["ca_certs"] = CONF.identity.ca_certificates_file
+
+        resp = urllib3.PoolManager(**cert_params).request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
                          'initial call: ' + str(resp.status))
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 38ca53b..286e0a5 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -13,10 +13,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.api.compute import base
 from tempest.common import tempest_fixtures as fixtures
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class QuotasTestJSON(base.BaseV2ComputeTest):
     """Test compute quotas"""
@@ -76,6 +81,8 @@
         for quota in expected_quota_set:
             self.assertIn(quota, quota_set.keys())
 
+    @testtools.skipIf(not CONF.auth.use_dynamic_credentials,
+                      'does not support static credentials')
     @decorators.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc')
     def test_compare_tenant_quotas_with_default_quotas(self):
         """Test tenants are created with the default compute quota values"""
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 689844b..f398062 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -45,6 +45,9 @@
         if not utils.is_extension_enabled('subnet_allocation', 'network'):
             msg = "subnet_allocation extension not enabled."
             raise cls.skipException(msg)
+        if not utils.is_extension_enabled('default-subnetpools', 'network'):
+            msg = "default-subnetpools extension not enabled."
+            raise cls.skipException(msg)
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e9811')
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index f055d19..d4bf0d7 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -88,9 +88,12 @@
         nafter = self._get_bytes_used()
         self.assertEqual(nbefore, nafter)
 
+    # NOTE(vsaienko): the quotas imlementation on active/active rgw deployment
+    # have no coordination, as result quota overflow might happen, since
+    # this is upstream and we can't fix downstream. Remove test from smoke
+    # Related-Prod: PRODX-11581
     @decorators.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
     @utils.requires_ext(extension='container_quotas', service='object')
-    @decorators.attr(type="smoke")
     def test_upload_too_many_objects(self):
         """Attempts to upload many objects that exceeds the count limit."""
         for _ in range(QUOTA_COUNT):
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 7c25f3d..4cb2262 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -179,10 +179,11 @@
         keys_with_change = ('volume_type',)
 
         # NOTE(vsaienko): with active-active cluster deployment volume
-        # services registered with different hostname.
-        if CONF.volume_feature_enabled.cluster_active_active:
-            keys_with_change += ('os-vol-host-attr:host',)
-        else:
+        # services registered with different hostname since we don't know
+        # which service process request host might or might not be changed.
+        # TODO(vsaienko): Revisit logic when is fixed
+        # https://bugs.launchpad.net/cinder/+bug/1874414
+        if not CONF.volume_feature_enabled.cluster_active_active:
             keys_with_no_change += ('os-vol-host-attr:host',)
 
         # Check the volume information after the retype
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index a2f2931..4e1dc59 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -94,6 +94,7 @@
         self.build_interval = build_interval
         self.build_timeout = build_timeout
         self.trace_requests = trace_requests
+        self.ca_certs = ca_certs
 
         self._skip_path = False
         self.general_header_lc = set(('cache-control', 'connection',
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index 65e8227..c7ac80f 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -167,11 +167,14 @@
         :param parsed_url: parsed url of the remote location
         """
         context = None
-        # If CONF.identity.disable_ssl_certificate_validation is true,
-        # do not check ssl certification.
-        if self.dscv:
-            context = ssl._create_unverified_context()
         if parsed_url.scheme == 'https':
+            # If CONF.identity.disable_ssl_certificate_validation is true,
+            # do not check ssl certification.
+            if self.dscv:
+                context = ssl._create_unverified_context()
+            else:
+                context = ssl.create_default_context(
+                    cafile=self.ca_certs)
             conn = httplib.HTTPSConnection(parsed_url.netloc,
                                            context=context)
         else:
diff --git a/tempest/serial_tests/api/admin/test_aggregates.py b/tempest/serial_tests/api/admin/test_aggregates.py
index cedeec0..ce54957 100644
--- a/tempest/serial_tests/api/admin/test_aggregates.py
+++ b/tempest/serial_tests/api/admin/test_aggregates.py
@@ -222,6 +222,9 @@
     @decorators.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
     def test_aggregate_add_host_create_server_with_az(self):
         """Test adding a host to the given aggregate and creating a server"""
+        if CONF.production:
+            raise self.skipException("Not allowed to run this test "
+                                     "on production environment")
         self.useFixture(fixtures.LockFixture('availability_zone'))
         az_name = data_utils.rand_name(
             prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
@@ -235,12 +238,20 @@
             if agg['availability_zone']:
                 hosts_in_zone.extend(agg['hosts'])
         hosts = [v for v in self.hosts_available if v not in hosts_in_zone]
-        if not hosts:
+        hosts_available = []
+        for host in hosts:
+            hypervisor_servers = (
+                self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+                    host)["hypervisors"][0].get("servers", None))
+            if not hypervisor_servers:
+                hosts_available.append(host)
+        if not hosts_available:
             raise self.skipException("All hosts are already in other "
-                                     "availability zones, so can't add "
+                                     "availability zones or have running "
+                                     "instances, so can't add "
                                      "host to aggregate. \nAggregates list: "
                                      "%s" % aggregates)
-        host = hosts[0]
+        host = hosts_available[0]
 
         self.client.add_host(aggregate['id'], host=host)
         self.addCleanup(self.client.remove_host, aggregate['id'], host=host)
diff --git a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
index a831fe5..cc45297 100644
--- a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
+++ b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
@@ -63,7 +63,11 @@
         hosts_available = []
         for host in svc_list:
             if (host['state'] == 'up' and host['status'] == 'enabled'):
-                hosts_available.append(host['host'])
+                hypervisor_servers = (
+                    self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+                        host["host"])["hypervisors"][0].get("servers", None))
+                if not hypervisor_servers:
+                    hosts_available.append(host["host"])
         aggregates = self.aggregates_client.list_aggregates()['aggregates']
         hosts_in_zone = []
         for agg in aggregates:
@@ -72,7 +76,8 @@
         hosts = [v for v in hosts_available if v not in hosts_in_zone]
         if not hosts:
             raise self.skipException("All hosts are already in other "
-                                     "availability zones, so can't add "
+                                     "availability zones or have running "
+                                     "instances, so can't add "
                                      "host to aggregate. \nAggregates list: "
                                      "%s" % aggregates)
         return hosts[0]
@@ -120,6 +125,9 @@
     @decorators.attr(type='slow')
     @utils.services('compute')
     def test_aggregate_basic_ops(self):
+        if CONF.production:
+            raise self.skipException("Not allowed to run this test "
+                                     "on production environment")
         self.useFixture(fixtures.LockFixture('availability_zone'))
         az = 'foo_zone'
         aggregate_name = data_utils.rand_name(