Merge "docs: Fix plugin registry generation failures"
diff --git a/HACKING.rst b/HACKING.rst
index e767b25..f2b800a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -431,7 +431,7 @@
 by modifying Tempest's `lib installation script`_ for previous branches
 (because DevStack is branched).
 
-.. _lib installation script: http://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/tempest
+.. _lib installation script: https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/tempest
 
 2. Bug fix on core project needing Tempest changes
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/README.rst b/README.rst
index 307ceb3..73930f1 100644
--- a/README.rst
+++ b/README.rst
@@ -165,7 +165,7 @@
 interface and when Z is incremented it's a bug fix release for the library.
 Also note that both Y and Z are reset to 0 at each increment of X.
 
-.. _semver: http://semver.org/
+.. _semver: https://semver.org/
 
 Configuration
 -------------
@@ -218,7 +218,7 @@
 argument is no longer required, however it may perform faster if included.
 
 For more information on these options and details about stestr, please see the
-`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+`stestr documentation <https://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
 
 Python 3.x
 ----------
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 9958792..dc0e94c 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -96,7 +96,7 @@
 that users don't have to worry about inadvertently installing a Tempest plugin
 when they install another package.
 
-.. _Branchless Tempest Spec: http://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
+.. _Branchless Tempest Spec: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
 
 The sole advantage to integrating a plugin into an existing python project is
 that it enables you to land code changes at the same time you land test changes
diff --git a/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
new file mode 100644
index 0000000..d0c3a7d
--- /dev/null
+++ b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+  - |
+    The ``scheduler_available_filters`` option is being deprecated in favor of
+    ``scheduler_enabled_filters``. The new name is more indicative of what the
+    option means. ``scheduler_enabled_filters``'s default value is set to the
+    default value of Nova's ``enabled_filters``.
+    ``scheduler_available_filters``'s default was `all`. There was confusion
+    around this value. Sometimes it was understood to mean the default Nova
+    filters are enabled, other times it was understood to mean all filters are
+    enabled. While `all` is still allowed for ``scheduler_enabled_filters`` for
+    backwards compatibility, it is strongly recommended to provide an explicit
+    list of filters that matches what's configured in nova.conf.
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 71b8e4f..e1787b6 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -56,3 +56,14 @@
                (?x)    # Ignore comments and whitespaces
                # Line with only a comment.
                (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+   :default: ''
+
+   String of extra command line options to pass to tox.
+
+   Here is an example of running tox with --sitepackages option:
+
+       ::
+           vars:
+             tox_extra_args: --sitepackages
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index c89eb93..06918b5 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -2,3 +2,4 @@
 tempest_test_regex: ''
 tox_envlist: smoke
 tempest_black_regex: ''
+tox_extra_args: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 54ddc71..16086aa 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -35,7 +35,7 @@
       when: blacklist_stat.stat.exists
 
 - name: Run Tempest
-  command: tox -e {{tox_envlist}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
             --concurrency={{tempest_concurrency|default(default_concurrency)}} \
             --black-regex={{tempest_black_regex|quote}}
   args:
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 2a5d607..6434161 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -24,6 +24,7 @@
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
@@ -340,20 +341,61 @@
     def test_add_remove_fixed_ip(self):
         # Add and Remove the fixed IP to server.
         server, ifs = self._create_server_get_interfaces()
-        interface_count = len(ifs)
-        self.assertGreater(interface_count, 0)
+        original_interface_count = len(ifs)  # This is the number of ports.
+        self.assertGreater(original_interface_count, 0)
+        # Get the starting list of IPs on the server.
+        addresses = self.os_primary.servers_client.list_addresses(
+            server['id'])['addresses']
+        # There should be one entry for the single network mapped to a list of
+        # addresses, which at this point should have at least one entry.
+        # Note that we could start with two addresses depending on how tempest
+        # is configured for using floating IPs.
+        self.assertEqual(1, len(addresses), addresses)  # number of networks
+        # Keep track of the original addresses so we can know which IP is new.
+        original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
+        original_ip_count = len(original_ips)
+        self.assertGreater(original_ip_count, 0, addresses)  # at least 1
         network_id = ifs[0]['net_id']
+        # Add another fixed IP to the server. This should result in another
+        # fixed IP on the same network (and same port since we only have one
+        # port).
         self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
-        # Remove the fixed IP from server.
+        # Wait for the ips count to increase by one.
+
+        def _wait_for_ip_increase():
+            _addresses = self.os_primary.servers_client.list_addresses(
+                server['id'])['addresses']
+            return len(list(_addresses.values())[0]) == original_ip_count + 1
+
+        if not test_utils.call_until_true(
+                _wait_for_ip_increase, CONF.compute.build_timeout,
+                CONF.compute.build_interval):
+            raise lib_exc.TimeoutException(
+                'Timed out while waiting for IP count to increase.')
+
+        # Remove the fixed IP that we just added.
         server_detail = self.os_primary.servers_client.show_server(
             server['id'])['server']
         # Get the Fixed IP from server.
         fixed_ip = None
         for ip_set in server_detail['addresses']:
             for ip in server_detail['addresses'][ip_set]:
-                if ip['OS-EXT-IPS:type'] == 'fixed':
+                if (ip['OS-EXT-IPS:type'] == 'fixed' and
+                        ip['addr'] not in original_ips):
                     fixed_ip = ip['addr']
                     break
             if fixed_ip is not None:
                 break
         self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
+        # Wait for the interface count to decrease by one.
+
+        def _wait_for_ip_decrease():
+            _addresses = self.os_primary.servers_client.list_addresses(
+                server['id'])['addresses']
+            return len(list(_addresses.values())[0]) == original_ip_count
+
+        if not test_utils.call_until_true(
+                _wait_for_ip_decrease, CONF.compute.build_timeout,
+                CONF.compute.build_interval):
+            raise lib_exc.TimeoutException(
+                'Timed out while waiting for IP count to decrease.')
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index b0ef3bc..6629794 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -24,11 +24,11 @@
 CONF = config.CONF
 
 
-class ServerRescueTestJSON(base.BaseV2ComputeTest):
+class ServerRescueTestBase(base.BaseV2ComputeTest):
 
     @classmethod
     def skip_checks(cls):
-        super(ServerRescueTestJSON, cls).skip_checks()
+        super(ServerRescueTestBase, cls).skip_checks()
         if not CONF.compute_feature_enabled.rescue:
             msg = "Server rescue not available."
             raise cls.skipException(msg)
@@ -36,11 +36,11 @@
     @classmethod
     def setup_credentials(cls):
         cls.set_network_resources(network=True, subnet=True, router=True)
-        super(ServerRescueTestJSON, cls).setup_credentials()
+        super(ServerRescueTestBase, cls).setup_credentials()
 
     @classmethod
     def resource_setup(cls):
-        super(ServerRescueTestJSON, cls).resource_setup()
+        super(ServerRescueTestBase, cls).resource_setup()
 
         password = data_utils.rand_password()
         server = cls.create_test_server(adminPass=password,
@@ -50,6 +50,9 @@
                                        'RESCUE')
         cls.rescued_server_id = server['id']
 
+
+class ServerRescueTestJSON(ServerRescueTestBase):
+
     @decorators.idempotent_id('fd032140-714c-42e4-a8fd-adcd8df06be6')
     def test_rescue_unrescue_instance(self):
         password = data_utils.rand_password()
@@ -62,6 +65,15 @@
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
 
+
+class ServerRescueTestJSONUnderV235(ServerRescueTestBase):
+
+    max_microversion = '2.35'
+
+    # TODO(zhufl): After 2.35 we should switch to neutron client to create
+    # floating ip, but that will need admin credential, so the testcases will
+    # have to be added in somewhere in admin directory.
+
     @decorators.idempotent_id('4842e0cf-e87d-4d9d-b61f-f4791da3cacc')
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index e2ed5ef..03543ac 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -89,14 +89,10 @@
             service = self.services_client.create_service(
                 name=name, type=s_type,
                 description=description)['OS-KSADM:service']
+            self.addCleanup(self.services_client.delete_service, service['id'])
             services.append(service)
         service_ids = [svc['id'] for svc in services]
 
-        def delete_services():
-            for service_id in service_ids:
-                self.services_client.delete_service(service_id)
-
-        self.addCleanup(delete_services)
         # List and Verify Services
         body = self.services_client.list_services()['OS-KSADM:services']
         found = [serv for serv in body if serv['id'] in service_ids]
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 2530072..83b3c30 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -39,7 +39,6 @@
         # Use alt_username as the trustee
         self.trust_id = None
         self.create_trustor_and_roles()
-        self.addCleanup(self.cleanup_user_and_roles)
 
     def tearDown(self):
         if self.trust_id:
@@ -55,6 +54,7 @@
             trustor_project_name,
             domain_id=CONF.identity.default_domain_id)['project']
         self.trustor_project_id = project['id']
+        self.addCleanup(self.projects_client.delete_project, project['id'])
         self.assertIsNotNone(self.trustor_project_id)
 
         # Create a trustor User
@@ -69,6 +69,7 @@
             email=u_email,
             project_id=self.trustor_project_id,
             domain_id=CONF.identity.default_domain_id)['user']
+        self.addCleanup(self.users_client.delete_user, user['id'])
         self.trustor_user_id = user['id']
 
         # And two roles, one we'll delegate and one we won't
@@ -76,10 +77,12 @@
         self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
 
         role = self.roles_client.create_role(name=self.delegated_role)['role']
+        self.addCleanup(self.roles_client.delete_role, role['id'])
         self.delegated_role_id = role['id']
 
         role = self.roles_client.create_role(
             name=self.not_delegated_role)['role']
+        self.addCleanup(self.roles_client.delete_role, role['id'])
         self.not_delegated_role_id = role['id']
 
         # Assign roles to trustor
@@ -109,16 +112,6 @@
         os = clients.Manager(credentials=creds)
         self.trustor_client = os.trusts_client
 
-    def cleanup_user_and_roles(self):
-        if self.trustor_user_id:
-            self.users_client.delete_user(self.trustor_user_id)
-        if self.trustor_project_id:
-            self.projects_client.delete_project(self.trustor_project_id)
-        if self.delegated_role_id:
-            self.roles_client.delete_role(self.delegated_role_id)
-        if self.not_delegated_role_id:
-            self.roles_client.delete_role(self.not_delegated_role_id)
-
     def create_trust(self, impersonate=True, expires=None):
 
         trust_create = self.trustor_client.create_trust(
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 237e728..9981ef8 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -57,18 +57,19 @@
             self.creds.user_id,
             tenant_id=self.creds.tenant_id)["credential"]
         created_creds.append(creds1['access'])
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, creds1['access'])
+
         # create second ec2 credentials
         creds2 = self.non_admin_users_client.create_user_ec2_credential(
             self.creds.user_id,
             tenant_id=self.creds.tenant_id)["credential"]
         created_creds.append(creds2['access'])
-        # add credentials to be cleaned up
-        self.addCleanup(
-            self.non_admin_users_client.delete_user_ec2_credential,
-            self.creds.user_id, creds1['access'])
         self.addCleanup(
             self.non_admin_users_client.delete_user_ec2_credential,
             self.creds.user_id, creds2['access'])
+
         # get the list of user ec2 credentials
         resp = self.non_admin_users_client.list_user_ec2_credentials(
             self.creds.user_id)["credentials"]
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 0730d58..399954c 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -135,7 +135,7 @@
             real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
             self._clean_network()
             self.assertEqual(eui_ip, real_ip,
-                             ('Real port IP %s shall be equal to EUI-64 %s'
+                             ('Real port IP %s shall be equal to EUI-64 %s '
                               'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
                                  real_ip, eui_ip,
                                  ra_mode if ra_mode else "Off",
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index f2730b3..e218d5a 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -44,15 +44,14 @@
 def is_scheduler_filter_enabled(filter_name):
     """Check the list of enabled compute scheduler filters from config.
 
-    This function checks whether the given compute scheduler filter is
-    available and configured in the config file. If the
-    scheduler_available_filters option is set to 'all' (Default value. which
-    means default filters are configured in nova) in tempest.conf then, this
-    function returns True with assumption that requested filter 'filter_name'
-    is one of available filter in nova ("nova.scheduler.filters.all_filters").
+    This function checks whether the given compute scheduler filter is enabled
+    in the nova config file. If the scheduler_enabled_filters option is set to
+    'all' in tempest.conf then, this function returns True with assumption that
+    requested filter 'filter_name' is one of the enabled filters in nova
+    ("nova.scheduler.filters.all_filters").
     """
 
-    filters = CONF.compute_feature_enabled.scheduler_available_filters
+    filters = CONF.compute_feature_enabled.scheduler_enabled_filters
     if not filters:
         return False
     if 'all' in filters:
diff --git a/tempest/config.py b/tempest/config.py
index c0a2d60..6c6ff58 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -472,20 +472,30 @@
     cfg.BoolOpt('config_drive',
                 default=True,
                 help='Enable special configuration drive with metadata.'),
-    cfg.ListOpt('scheduler_available_filters',
-                default=['all'],
-                help="A list of enabled filters that nova will accept as hints"
-                     " to the scheduler when creating a server. A special "
-                     "entry 'all' indicates all filters that are included "
-                     "with nova are enabled. Empty list indicates all filters "
-                     "are disabled. The full list of available filters is in "
-                     "nova.conf: filter_scheduler.enabled_filters. If the "
+    cfg.ListOpt('scheduler_enabled_filters',
+                default=["RetryFilter", "AvailabilityZoneFilter",
+                         "ComputeFilter", "ComputeCapabilitiesFilter",
+                         "ImagePropertiesFilter",
+                         "ServerGroupAntiAffinityFilter",
+                         "ServerGroupAffinityFilter"],
+                help="A list of enabled filters that Nova will accept as "
+                     "hints to the scheduler when creating a server. If the "
                      "default value is overridden in nova.conf by the test "
                      "environment (which means that a different set of "
                      "filters is enabled than what is included in Nova by "
-                     "default) then, this option must be configured to "
+                     "default), then this option must be configured to "
                      "contain the same filters that Nova uses in the test "
-                     "environment."),
+                     "environment. A special entry 'all' indicates all "
+                     "filters that are included with Nova are enabled. If "
+                     "using 'all', be sure to enable all filters in "
+                     "nova.conf, as tests can fail in unpredictable ways if "
+                     "Nova's and Tempest's enabled filters don't match. "
+                     "Empty list indicates all filters are disabled. The "
+                     "full list of enabled filters is in nova.conf: "
+                     "filter_scheduler.enabled_filters.",
+                deprecated_opts=[cfg.DeprecatedOpt(
+                    'scheduler_available_filters',
+                    group='compute-feature-enabled')]),
     cfg.BoolOpt('swap_volume',
                 default=False,
                 help='Does the test environment support in-place swapping of '
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 9a35b76..bbf5e89 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -12,9 +12,8 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-import base64
-
 import jsonschema
+from oslo_serialization import base64
 from oslo_utils import timeutils
 import six
 
@@ -46,9 +45,7 @@
     try:
         if isinstance(instance, six.text_type):
             instance = instance.encode('utf-8')
-        base64.decodestring(instance)
-    except base64.binascii.Error:
-        return False
+        base64.decode_as_bytes(instance)
     except TypeError:
         # The name must be string type. If instance isn't string type, the
         # TypeError will be raised at here.
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index f2d2d21..fb64333 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -104,7 +104,12 @@
         return rest_client.ResponseBody(resp, body)
 
     def import_backup(self, **kwargs):
-        """Import backup metadata record."""
+        """Import backup metadata record.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://developer.openstack.org/api-ref/block-storage/v3/index.html#import-a-backup
+        """
         post_body = json.dumps({'backup-record': kwargs})
         resp, body = self.post("backups/import_record", post_body)
         body = json.loads(body)
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 57a560c..438ee01 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -280,6 +280,7 @@
                                dualnet=True)
 
     @decorators.idempotent_id('9178ad42-10e4-47e9-8987-e02b170cc5cd')
+    @decorators.attr(type='slow')
     @utils.services('compute', 'network')
     def test_dualnet_multi_prefix_slaac(self):
         self._prepare_and_test(address6_mode='slaac', n_subnets6=2,