Merge "Added tempest subcommand for subunit_describe_calls"
diff --git a/.zuul.yaml b/.zuul.yaml
index 6685f3f..403c93d 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -100,6 +100,13 @@
devstack_localrc:
ENABLE_FILE_INJECTION: true
ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+ # since tempest-full is in the integrated-gate project template but
+ # the backup tests do not really involve other services so they should
+ # be run in some more cinder-specific job, especially because the
+ # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+ c-bak: false
- job:
name: tempest-full-oslo-master
@@ -169,6 +176,12 @@
s-object: false
s-proxy: false
# without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
c-bak: false
- job:
@@ -339,14 +352,18 @@
vars:
devstack_localrc:
USE_PYTHON3: true
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
- job:
- name: tempest-full-py3-opensuse150
+ name: tempest-full-py3-opensuse15
parent: tempest-full-py3
- nodeset: devstack-single-node-opensuse-150
+ nodeset: devstack-single-node-opensuse-15
description: |
Base integration test with Neutron networking and py36 running
- on openSUSE Leap 15.0
+ on openSUSE Leap 15.x
voting: false
- job:
@@ -403,6 +420,10 @@
s-proxy: false
# without Swift, c-bak cannot run (in the Gate at least)
c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
- job:
name: tempest-full-train
@@ -677,8 +698,6 @@
irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade:
- irrelevant-files: *tempest-irrelevant-files
- grenade-py3:
irrelevant-files: *tempest-irrelevant-files
- devstack-plugin-ceph-tempest:
@@ -708,7 +727,8 @@
- tempest-full-test-account-no-admin-py3:
voting: false
irrelevant-files: *tempest-irrelevant-files
- - openstack-tox-bashate
+ - openstack-tox-bashate:
+ irrelevant-files: *tempest-irrelevant-files-2
gate:
jobs:
- tempest-slow-py3:
@@ -717,8 +737,6 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-full:
irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade:
- irrelevant-files: *tempest-irrelevant-files
- grenade-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-ipv6-only:
@@ -741,7 +759,7 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-pg-full:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse150:
+ - tempest-full-py3-opensuse15:
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 423214d..e51b90b 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -116,7 +116,7 @@
$ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
will run the same set of tests as the default gate jobs. Or you can
- use `unittest`_ compatible test runners such as `testr`_, `pytest`_ etc.
+ use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
Tox also contains several existing job configurations. For example::
@@ -130,7 +130,6 @@
to run the tests tagged as smoke.
.. _unittest: https://docs.python.org/3/library/unittest.html
-.. _testr: https://testrepository.readthedocs.org/en/latest/MANUAL.html
.. _stestr: https://stestr.readthedocs.org/en/latest/MANUAL.html
.. _pytest: https://docs.pytest.org/en/latest/
@@ -269,14 +268,14 @@
will have a configuration file already set up to work with your
DevStack installation.
-Tempest is not tied to any single test runner, but `testr`_ is the most commonly
+Tempest is not tied to any single test runner, but `stestr`_ is the most commonly
used tool. Also, the nosetests test runner is **not** recommended to run Tempest.
After setting up your configuration file, you can execute the set of Tempest
-tests by using ``testr`` ::
+tests by using ``stestr``. By default, ``stestr`` runs tests in parallel ::
- $ testr run --parallel
+ $ stestr run
To run one single test serially ::
- $ testr run tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
+ $ stestr run --serial tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server
diff --git a/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
index d9ef626..4842f63 100644
--- a/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
+++ b/releasenotes/notes/fix-1847749-2670b1d4f6097a1a.yaml
@@ -1,6 +1,6 @@
---
fixes:
- |
- Bug#1847749. This privides the workaround of Skip Exception raised instead of skipping
+ Bug#1847749. This provides the workaround of Skip Exception raised instead of skipping
the CLI tests. If you are running Tempest with stestr > 2.5.0 then use this fix.
Ref- https://github.com/testing-cabal/testtools/issues/272
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index f83e62c..92524fc 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -41,11 +41,6 @@
cls.prepare_instance_network()
super(BaseAttachVolumeTest, cls).setup_credentials()
- @classmethod
- def resource_setup(cls):
- super(BaseAttachVolumeTest, cls).resource_setup()
- cls.device = CONF.compute.volume_device_name
-
def _create_server(self):
# Start a server and wait for it to become ready
validation_resources = self.get_test_validation_resources(
@@ -84,15 +79,18 @@
# NOTE(andreaf) We need to ensure the ssh key has been
# injected in the guest before we power cycle
linux_client.validate_authentication()
+ disks_before_attach = linux_client.count_disks()
volume = self.create_volume()
# NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
- # no longer honors a user-supplied device name, in that case
- # CONF.compute.volume_device_name must be set the equal value as
- # the libvirt auto-assigned one
- attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ # no longer honors a user-supplied device name, and there can be
+ # a mismatch between libvirt provide disk name and actual disk name
+ # on instance, hence we no longer validate this test with the supplied
+ # device name rather we count number of disk before attach
+ # detach to validate the testcase.
+
+ attachment = self.attach_volume(server, volume)
self.servers_client.stop_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
@@ -103,9 +101,8 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- device_name_to_match = '\n' + self.device + ' '
- self.assertIn(device_name_to_match, disks)
+ disks_after_attach = linux_client.count_disks()
+ self.assertGreater(disks_after_attach, disks_before_attach)
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
@@ -120,8 +117,8 @@
'ACTIVE')
if CONF.validation.run_validation:
- disks = linux_client.get_disks()
- self.assertNotIn(device_name_to_match, disks)
+ disks_after_detach = linux_client.count_disks()
+ self.assertEqual(disks_before_attach, disks_after_detach)
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 645a952..b4fb5a5 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -135,8 +135,6 @@
self.admin_project_id = ""
self._init_admin_ids()
- self.admin_role_added = []
-
# available services
self.project_services = cleanup_service.get_project_cleanup_services()
self.global_services = cleanup_service.get_global_cleanup_services()
@@ -170,7 +168,6 @@
# Loop through list of projects and clean them up.
for project in projects:
- self._add_admin(project['id'])
self._clean_project(project)
kwargs = {'data': self.dry_run_data,
@@ -188,15 +185,6 @@
f.write(json.dumps(self.dry_run_data, sort_keys=True,
indent=2, separators=(',', ': ')))
- self._remove_admin_user_roles()
-
- def _remove_admin_user_roles(self):
- project_ids = self.admin_role_added
- LOG.debug("Removing admin user roles where needed for projects: %s",
- project_ids)
- for project_id in project_ids:
- self._remove_admin_role(project_id)
-
def _clean_project(self, project):
print("Cleaning project: %s " % project['name'])
is_dry_run = self.options.dry_run
@@ -209,11 +197,6 @@
project_data = dry_run_data["_projects_to_clean"][project_id] = {}
project_data['name'] = project_name
- kwargs = {"username": CONF.auth.admin_username,
- "password": CONF.auth.admin_password,
- "project_name": project['name']}
- mgr = clients.Manager(credentials=credentials.get_credentials(
- **kwargs))
kwargs = {'data': project_data,
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
@@ -222,7 +205,7 @@
'project_id': project_id,
'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.project_services:
- svc = service(mgr, **kwargs)
+ svc = service(self.admin_mgr, **kwargs)
svc.run()
def _init_admin_ids(self):
@@ -272,46 +255,6 @@
def get_description(self):
return 'Cleanup after tempest run'
- def _add_admin(self, project_id):
- rl_cl = self.admin_mgr.roles_v3_client
- needs_role = True
- roles = rl_cl.list_user_roles_on_project(project_id,
- self.admin_id)['roles']
- for role in roles:
- if role['id'] == self.admin_role_id:
- needs_role = False
- LOG.debug("User already had admin privilege for this project")
- if needs_role:
- LOG.debug("Adding admin privilege for : %s", project_id)
- rl_cl.create_user_role_on_project(project_id, self.admin_id,
- self.admin_role_id)
- self.admin_role_added.append(project_id)
-
- def _remove_admin_role(self, project_id):
- LOG.debug("Remove admin user role for projectt: %s", project_id)
- # Must initialize Admin Manager for each user role
- # Otherwise authentication exception is thrown, weird
- id_cl = clients.Manager(
- credentials.get_configured_admin_credentials()).identity_client
- if (self._project_exists(project_id)):
- try:
- id_cl.delete_role_from_user_on_project(project_id,
- self.admin_id,
- self.admin_role_id)
- except Exception as ex:
- LOG.exception("Failed removing role from project which still "
- "exists, exception: %s", ex)
-
- def _project_exists(self, project_id):
- pr_cl = self.admin_mgr.projects_client
- try:
- p = pr_cl.show_project(project_id)
- LOG.debug("Project is: %s", str(p))
- return True
- except Exception as ex:
- LOG.debug("Project no longer exists? %s", ex)
- return False
-
def _init_state(self):
print("Initializing saved state.")
data = {}
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index dad710c..5875da3 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -73,6 +73,13 @@
msg = "'TYPE' column is required but the output doesn't have it: "
raise tempest.lib.exceptions.TempestException(msg + output)
+ def count_disks(self):
+ disks_list = self.get_disks()
+ disks_list = [line[0] for line in
+ [device_name.split()
+ for device_name in disks_list.splitlines()][1:]]
+ return len(disks_list)
+
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
diff --git a/tempest/config.py b/tempest/config.py
index d67d3e0..32cebc5 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -667,7 +667,7 @@
default=28,
help="The mask bits for project ipv4 subnets"),
cfg.StrOpt('project_network_v6_cidr',
- default="2003::/48",
+ default="2001:db8::/48",
help="The cidr block to allocate project ipv6 subnets from"),
cfg.IntOpt('project_network_v6_mask_bits',
default=64,
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 14a3c48..4fb6d2e 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -349,7 +349,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://docs.openstack.org/api-ref/block-storage/v3/index.html#force-delete-a-volume
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#force-detach-a-volume
"""
post_body = json.dumps({'os-force_detach': kwargs})
url = 'volumes/%s/action' % volume_id
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 644a018..caad41c 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -106,6 +106,15 @@
self.assertEqual(self.conn.get_disks(), result)
self._assert_exec_called_with('lsblk -lb --nodeps')
+ def test_count_disk(self):
+ output_lsblk = """\
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sda 8:0 0 128035676160 0 disk
+sdb 8:16 0 1000204886016 0 disk
+sr0 11:0 1 1073741312 0 rom"""
+ self.ssh_mock.mock.exec_command.return_value = output_lsblk
+ self.assertEqual(self.conn.count_disks(), 2)
+
def test_get_boot_time(self):
booted_at = 10000
uptime_sec = 5000.02
diff --git a/tempest/tests/lib/services/volume/v3/test_backups_client.py b/tempest/tests/lib/services/volume/v3/test_backups_client.py
index 5412064..97e1132 100644
--- a/tempest/tests/lib/services/volume/v3/test_backups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_backups_client.py
@@ -60,8 +60,11 @@
],
"name": "backup001",
"object_count": 22,
+ "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6",
+ "user_id": "515ba0dd59f84f25a6a084a45d8d93b2",
"size": 1,
"status": "available",
+ "updated_at": "2013-04-02T10:35:27.000000",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": True,
"has_dependent_backups": False
@@ -73,7 +76,16 @@
"backup": {
"id": "4c65c15f-a5c5-464b-b92a-90e4c04636a7",
"name": "fake-backup-name",
- "links": "fake-links"
+ "links": [
+ {
+ "href": "fake-url-1",
+ "rel": "self"
+ },
+ {
+ "href": "fake-url-2",
+ "rel": "bookmark"
+ }
+ ]
}
}
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
index 7021a3f..336aa32 100644
--- a/tempest/tests/lib/services/volume/v3/test_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -46,10 +46,8 @@
FAKE_UPDATE_VOLUME_TYPE = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
- 'qos_specs_id': None,
'name': 'volume-type-test',
'description': 'default volume type',
- 'os-volume-type-access:is_public': True,
'is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
diff --git a/tempest/tests/lib/services/volume/v3/test_volumes_client.py b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
index 1250536..56c1a35 100644
--- a/tempest/tests/lib/services/volume/v3/test_volumes_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
@@ -24,27 +24,25 @@
FAKE_VOLUME_SUMMARY = {
"volume-summary": {
- "total_size": 20,
- "total_count": 5
+ "total_size": 4,
+ "total_count": 4,
+ "metadata": {
+ "key1": ["value1", "value2"],
+ "key2": ["value2"]
+ }
}
}
FAKE_VOLUME_METADATA_ITEM = {
"meta": {
- "key1": "value1"
+ "name": "metadata1"
}
}
FAKE_VOLUME_IMAGE_METADATA = {
"metadata": {
- "container_format": "bare",
- "min_ram": "0",
- "disk_format": "raw",
- "image_name": "xly-ubuntu16-server",
- "image_id": "3e087b0c-10c5-4255-b147-6e8e9dbad6fc",
- "checksum": "008f5d22fe3cb825d714da79607a90f9",
- "min_disk": "0",
- "size": "8589934592"
+ "key1": "value1",
+ "key2": "value2"
}
}
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index a087a4c..b484a41 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -60,6 +60,9 @@
fi
}
+: ${UPPER_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
+DEPS="-c${UPPER_CONSTRAINTS_FILE}"
+
# function to create virtualenv to perform sanity operation
function prepare_workspace {
SANITY_DIR=$(pwd)
@@ -73,10 +76,10 @@
# Function to install project
function install_project {
- "$TVENV" pip install "$SANITY_DIR"/"$1"
+ "$TVENV" pip install $DEPS "$SANITY_DIR"/"$1"
# Check for test-requirements.txt file in a project then install it.
if [ -e "$SANITY_DIR"/"$1"/test-requirements.txt ]; then
- "$TVENV" pip install -r "$SANITY_DIR"/"$1"/test-requirements.txt
+ "$TVENV" pip install $DEPS -r "$SANITY_DIR"/"$1"/test-requirements.txt
fi
}