Merge "Fixing ignored value of --config parameter in tempest account-generator"
diff --git a/.zuul.yaml b/.zuul.yaml
index f57e81e..8576455 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,7 +1,6 @@
- job:
name: devstack-tempest
parent: devstack
- nodeset: openstack-single-node
description: |
Base Tempest job.
@@ -38,13 +37,59 @@
post-run: playbooks/post-tempest.yaml
- job:
+ name: tempest-all
+ parent: devstack-tempest
+ description: |
+ Integration test that runs all tests.
+ Former name for this job was:
+ * legacy-periodic-tempest-dsvm-all-master
+ vars:
+ tox_envlist: all
+ tempest_test_regex: tempest
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+
+- job:
+ name: devstack-tempest-ipv6
+ parent: devstack-ipv6
+ description: |
+ Base Tempest IPv6 job.
+ required-projects:
+ - git.openstack.org/openstack/tempest
+ timeout: 7200
+ roles:
+ - zuul: git.openstack.org/openstack-dev/devstack
+ vars:
+ devstack_services:
+ tempest: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute:
+ min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+ test_results_stage_name: test_results
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+ '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+ '{{ devstack_base_dir }}/tempest/tempest.log': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+ '{{ stage_dir }}/stackviz': logs
+ extensions_to_txt:
+ conf: true
+ log: true
+ yaml: true
+ yml: true
+ run: playbooks/devstack-tempest.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
name: tempest-full
parent: devstack-tempest
# This currently works from stable/pike on.
- branches:
- - master
- - stable/queens
- - stable/pike
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
description: |
Base integration test with Neutron networking and py27.
Former names for this job where:
@@ -56,6 +101,37 @@
ENABLE_FILE_INJECTION: true
- job:
+ name: tempest-full-oslo-master
+ parent: tempest-full
+ description: |
+ Integration test using current git of oslo libs.
+ This ensures that when oslo libs get released that they
+ do not break OpenStack server projects.
+
+ Former name for this job was
+ periodic-tempest-dsvm-oslo-latest-full-master.
+ timeout: 10800
+ required-projects:
+ - git.openstack.org/openstack/oslo.cache
+ - git.openstack.org/openstack/oslo.concurrency
+ - git.openstack.org/openstack/oslo.config
+ - git.openstack.org/openstack/oslo.context
+ - git.openstack.org/openstack/oslo.db
+ - git.openstack.org/openstack/oslo.i18n
+ - git.openstack.org/openstack/oslo.log
+ - git.openstack.org/openstack/oslo.messaging
+ - git.openstack.org/openstack/oslo.middleware
+ - git.openstack.org/openstack/oslo.policy
+ - git.openstack.org/openstack/oslo.privsep
+ - git.openstack.org/openstack/oslo.reports
+ - git.openstack.org/openstack/oslo.rootwrap
+ - git.openstack.org/openstack/oslo.serialization
+ - git.openstack.org/openstack/oslo.service
+ - git.openstack.org/openstack/oslo.utils
+ - git.openstack.org/openstack/oslo.versionedobjects
+ - git.openstack.org/openstack/oslo.vmware
+
+- job:
name: tempest-full-parallel
parent: tempest-full
voting: false
@@ -71,9 +147,10 @@
- job:
name: tempest-full-py3
parent: devstack-tempest
- branches:
- - master
- - stable/queens
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
description: |
Base integration test with Neutron networking and py3.
Former names for this job where:
@@ -93,9 +170,31 @@
c-bak: false
- job:
+ name: tempest-full-py3-ipv6
+ parent: devstack-tempest-ipv6
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking, IPv6 and py3.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
name: tempest-multinode-full
parent: devstack-tempest
- nodeset: openstack-two-node
+ nodeset: openstack-two-node-bionic
# Until the devstack changes are backported, only run this on master
branches:
- master
@@ -123,6 +222,13 @@
LIVE_MIGRATION_AVAILABLE: true
USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+- job:
+ name: tempest-multinode-full-py3
+ parent: tempest-multinode-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+
- nodeset:
name: openstack-bionic-node
nodes:
@@ -133,14 +239,23 @@
nodes:
- controller
+- nodeset:
+ name: openstack-opensuse150-node
+ nodes:
+ - name: controller
+ label: opensuse-150
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
- job:
- name: tempest-full-py36
+ name: tempest-full-py3-opensuse150
parent: tempest-full-py3
- nodeset: openstack-bionic-node
- branches:
- - master
+ nodeset: openstack-opensuse150-node
description: |
- Base integration test with Neutron networking and py36.
+ Base integration test with Neutron networking and py36 running
+ on openSUSE Leap 15.0
voting: false
- job:
@@ -151,7 +266,7 @@
description: |
This multinode integration job will run all the tests tagged as slow.
It enables the lvm multibackend setup to cover few scenario tests.
- This job will run only slow tests(API or Scenario) serially.
+ This job will run only slow tests (API or Scenario) serially.
Former names for this job were:
* legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
@@ -164,28 +279,47 @@
tempest_concurrency: 2
- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
name: tempest-full-rocky
parent: tempest-full
+ nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- job:
name: tempest-full-rocky-py3
parent: tempest-full-py3
+ nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- job:
name: tempest-full-queens
parent: tempest-full
+ nodeset: openstack-single-node-xenial
override-checkout: stable/queens
- job:
name: tempest-full-queens-py3
parent: tempest-full-py3
+ nodeset: openstack-single-node-xenial
override-checkout: stable/queens
- job:
name: tempest-full-pike
parent: tempest-full
+ nodeset: openstack-single-node-xenial
override-checkout: stable/pike
- job:
@@ -206,22 +340,26 @@
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
required-projects:
+ - git.openstack.org/openstack/airship-tempest-plugin
- git.openstack.org/openstack/almanach
- git.openstack.org/openstack/aodh
- git.openstack.org/openstack/barbican-tempest-plugin
+ - git.openstack.org/openstack/blazar-tempest-plugin
- git.openstack.org/openstack/ceilometer
- - git.openstack.org/openstack/cinder
- - git.openstack.org/openstack/congress
+ - git.openstack.org/openstack/cinder-tempest-plugin
+ - git.openstack.org/openstack/cloudkitty-tempest-plugin
+ - git.openstack.org/openstack/congress-tempest-plugin
- git.openstack.org/openstack/designate-tempest-plugin
- - git.openstack.org/openstack/ec2-api
+ - git.openstack.org/openstack/ec2api-tempest-plugin
- git.openstack.org/openstack/freezer
- git.openstack.org/openstack/freezer-api
- git.openstack.org/openstack/freezer-tempest-plugin
+ - git.openstack.org/openstack/gabbi-tempest
- git.openstack.org/openstack/gce-api
- git.openstack.org/openstack/glare
- - git.openstack.org/openstack/heat
+ - git.openstack.org/openstack/heat-tempest-plugin
- git.openstack.org/openstack/intel-nfv-ci-tests
- - git.openstack.org/openstack/ironic
+ - git.openstack.org/openstack/ironic-tempest-plugin
- git.openstack.org/openstack/ironic-inspector
- git.openstack.org/openstack/keystone-tempest-plugin
- git.openstack.org/openstack/kingbird
@@ -230,19 +368,21 @@
- git.openstack.org/openstack/magnum-tempest-plugin
- git.openstack.org/openstack/manila
- git.openstack.org/openstack/manila-tempest-plugin
- - git.openstack.org/openstack/mistral
+ - git.openstack.org/openstack/mistral-tempest-plugin
- git.openstack.org/openstack/mogan
- git.openstack.org/openstack/monasca-api
- git.openstack.org/openstack/monasca-log-api
- - git.openstack.org/openstack/murano
+ - git.openstack.org/openstack/monasca-tempest-plugin
+ - git.openstack.org/openstack/murano-tempest-plugin
+ - git.openstack.org/openstack/networking-ansible
- git.openstack.org/openstack/networking-bgpvpn
- git.openstack.org/openstack/networking-cisco
- git.openstack.org/openstack/networking-fortinet
- git.openstack.org/openstack/networking-generic-switch
- - git.openstack.org/openstack/networking-l2gw
+ - git.openstack.org/openstack/networking-l2gw-tempest-plugin
- git.openstack.org/openstack/networking-midonet
- - git.openstack.org/openstack/networking-plumgrid
- git.openstack.org/openstack/networking-sfc
+ - git.openstack.org/openstack/networking-spp
- git.openstack.org/openstack/neutron
- git.openstack.org/openstack/neutron-dynamic-routing
- git.openstack.org/openstack/neutron-fwaas
@@ -251,21 +391,25 @@
- git.openstack.org/openstack/neutron-vpnaas
- git.openstack.org/openstack/nova-lxd
- git.openstack.org/openstack/novajoin-tempest-plugin
+ - git.openstack.org/openstack/octavia
- git.openstack.org/openstack/octavia-tempest-plugin
- git.openstack.org/openstack/oswin-tempest-plugin
- git.openstack.org/openstack/panko
- git.openstack.org/openstack/patrole
+ - git.openstack.org/openstack/python-watcherclient
- git.openstack.org/openstack/qinling
- git.openstack.org/openstack/requirements
- git.openstack.org/openstack/sahara-tests
- git.openstack.org/openstack/senlin
- git.openstack.org/openstack/senlin-tempest-plugin
- git.openstack.org/openstack/tap-as-a-service
+ - git.openstack.org/openstack/telemetry-tempest-plugin
- git.openstack.org/openstack/tempest-horizon
- - git.openstack.org/openstack/trio2o
- - git.openstack.org/openstack/trove
+ - git.openstack.org/openstack/tobiko
+ - git.openstack.org/openstack/tripleo-common-tempest-plugin
+ - git.openstack.org/openstack/trove-tempest-plugin
- git.openstack.org/openstack/valet
- - git.openstack.org/openstack/vitrage
+ - git.openstack.org/openstack/vitrage-tempest-plugin
- git.openstack.org/openstack/vmware-nsx-tempest-plugin
- git.openstack.org/openstack/watcher-tempest-plugin
- git.openstack.org/openstack/zaqar-tempest-plugin
@@ -284,11 +428,49 @@
devstack_localrc:
TEMPEST_VOLUME_TYPE: volumev2
+- job:
+ name: tempest-full-test-account-py3
+ parent: tempest-full-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials instead of dynamic credentials and py3.
+ Former names for this job were:
+ - legacy-tempest-dsvm-full-test-accounts
+ - legacy-tempest-dsvm-neutron-full-test-accounts
+ - legacy-tempest-dsvm-identity-v3-test-accounts
+ vars:
+ devstack_localrc:
+ TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+ name: tempest-full-test-account-no-admin-py3
+ parent: tempest-full-test-account-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials and py3 without having an admin account.
+ Former name for this job was:
+ - legacy-tempest-dsvm-neutron-full-non-admin
+
+ vars:
+ devstack_localrc:
+ TEMPEST_HAS_ADMIN: False
+
+- job:
+ name: tempest-pg-full
+ parent: tempest-full
+ description: |
+ Base integration test with Neutron networking and py27 and PostgreSQL.
+ Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+ vars:
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+ DATABASE_TYPE: postgresql
+
- project:
templates:
- check-requirements
- integrated-gate
- - integrated-gate-py35
+ - integrated-gate-py3
- openstack-cover-jobs
- openstack-python-jobs
- openstack-python35-jobs
@@ -302,8 +484,15 @@
- ^playbooks/
- ^roles/
- ^.zuul.yaml$
+ - devstack-tempest-ipv6:
+ voting: false
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
- nova-multiattach:
- irrelevant-files:
+ # Define list of irrelevant files to use everywhere else
+ irrelevant-files: &tempest-irrelevant-files
- ^(test-|)requirements.txt$
- ^.*\.rst$
- ^doc/.*$
@@ -312,77 +501,29 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - ^tools/.*$
- tempest-full-parallel:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-full-py36:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-ipv6:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-rocky:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-rocky-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-queens:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-queens-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-pike:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-multinode-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-tox-plugin-sanity-check:
irrelevant-files:
- ^(test-|)requirements.txt$
- ^.*\.rst$
@@ -392,343 +533,86 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- - tempest-tox-plugin-sanity-check
+ # tools/ is not here since this relies on a script in tools/.
- tempest-slow:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - nova-cells-v1:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
voting: false
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade-py3:
+ irrelevant-files: *tempest-irrelevant-files
- devstack-plugin-ceph-tempest:
voting: false
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario001-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario002-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario003-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- puppet-openstack-integration-4-scenario004-tempest-centos-7:
voting: false
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^test-requirements.txt$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-neutron-full-ocata:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - interop-tempest-consistency:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-no-admin-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
gate:
jobs:
- nova-multiattach:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - tempest-slow:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade-multinode:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full:
+ irrelevant-files: *tempest-irrelevant-files
- neutron-grenade:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade-py3:
+ irrelevant-files: *tempest-irrelevant-files
experimental:
jobs:
- tempest-cinder-v2-api:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-all-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-all:
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-neutron-dvr-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr-ha-multinode-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-full-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-identity-v3-test-accounts:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-non-admin:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-cells-v1:
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-nova-v20-api:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- legacy-tempest-dsvm-lvm-multibackend:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-cinder-v1:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
- devstack-plugin-ceph-tempest-py3:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-pg-full:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-tempest-dsvm-neutron-full-opensuse-423:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-pg-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-opensuse150:
+ irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
- tempest-full-rocky
@@ -736,56 +620,8 @@
- tempest-full-queens
- tempest-full-queens-py3
- tempest-full-pike
- - legacy-periodic-tempest-dsvm-neutron-full-ocata:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
+ - legacy-periodic-tempest-dsvm-neutron-full-ocata
periodic:
jobs:
- - legacy-periodic-tempest-dsvm-full-test-accounts-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-neutron-full-test-accounts-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-neutron-full-non-admin-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - legacy-periodic-tempest-dsvm-all-master:
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
-
+ - tempest-all
+ - tempest-full-oslo-master
diff --git a/HACKING.rst b/HACKING.rst
index 5b9c0f1..eb6551a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -6,7 +6,7 @@
- Step 2: Read on
Tempest Specific Commandments
-------------------------------
+-----------------------------
- [T102] Cannot import OpenStack python clients in tempest/api &
tempest/scenario tests
@@ -35,6 +35,30 @@
- Clean up test data at the completion of each test
- Use configuration files for values that will vary by environment
+Supported OpenStack Components
+------------------------------
+
+Tempest's :ref:`library` and :ref:`plugin interface <tempest_plugin>` can be
+leveraged to support integration testing for virtually any OpenStack component.
+
+However, Tempest only offers **in-tree** integration testing coverage for the
+following components:
+
+* Cinder
+* Glance
+* Keystone
+* Neutron
+* Nova
+* Swift
+
+Historically, Tempest offered in-tree testing for other components as well, but
+since the introduction of the `External Plugin Interface`_, Tempest's in-tree
+testing scope has been limited to the projects above. Integration tests for
+projects not included above should go into one of the
+`relevant plugin projects`_.
+
+.. _External Plugin Interface: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/tempest-external-plugin-interface.html
+.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugin-registry.html#detected-plugins
Exception Handling
------------------
@@ -349,18 +373,19 @@
docstrings for the workflow in each test methods can be used instead. A good
example of this would be::
- class TestVolumeBootPattern(manager.ScenarioTest):
- """
- This test case attempts to reproduce the following steps:
+ class TestServerBasicOps(manager.ScenarioTest):
- * Create in Cinder some bootable volume importing a Glance image
- * Boot an instance from the bootable volume
- * Write content to the volume
- * Delete an instance and Boot a new instance from the volume
- * Check written content in the instance
- * Create a volume snapshot while the instance is running
- * Boot an additional instance from the new snapshot based volume
- * Check written content in the instance booted from snapshot
+ """The test suite for server basic operations
+
+ This smoke test case follows this basic set of operations:
+ * Create a keypair for use in launching an instance
+ * Create a security group to control network access in instance
+ * Add simple permissive rules to the security group
+ * Launch an instance
+ * Perform ssh to instance
+ * Verify metadata service
+ * Verify metadata on config_drive
+ * Terminate the instance
"""
Test Identification with Idempotent ID
@@ -430,7 +455,7 @@
by modifying Tempest's `lib installation script`_ for previous branches
(because DevStack is branched).
-.. _lib installation script: http://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/tempest
+.. _lib installation script: https://git.openstack.org/cgit/openstack-dev/devstack/tree/lib/tempest
2. Bug fix on core project needing Tempest changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/README.rst b/README.rst
index 307ceb3..73930f1 100644
--- a/README.rst
+++ b/README.rst
@@ -165,7 +165,7 @@
interface and when Z is incremented it's a bug fix release for the library.
Also note that both Y and Z are reset to 0 at each increment of X.
-.. _semver: http://semver.org/
+.. _semver: https://semver.org/
Configuration
-------------
@@ -218,7 +218,7 @@
argument is no longer required, however it may perform faster if included.
For more information on these options and details about stestr, please see the
-`stestr documentation <http://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
+`stestr documentation <https://stestr.readthedocs.io/en/latest/MANUAL.html>`_.
Python 3.x
----------
diff --git a/REVIEWING.rst b/REVIEWING.rst
index 8a1e152..31fedce 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -36,8 +36,11 @@
For any change that adds new functionality to either common functionality or an
out-of-band tool unit tests are required. This is to ensure we don't introduce
future regressions and to test conditions which we may not hit in the gate runs.
-Tests, and service clients aren't required to have unit tests since they should
-be self verifying by running them in the gate.
+API and scenario tests aren't required to have unit tests since they should
+be self-verifying by running them in the gate. All service clients, on the
+other hand, `must have`_ unit tests, as they belong to ``tempest/lib``.
+
+.. _must have: https://docs.openstack.org/tempest/latest/library.html#testing
API Stability
@@ -156,8 +159,32 @@
When to approve
---------------
-* Every patch needs two +2s before being approved.
-* It's ok to hold off on an approval until a subject matter expert reviews it
-* If a patch has already been approved but requires a trivial rebase to merge,
- you do not have to wait for a second +2, since the patch has already had
- two +2s.
+* It's OK to hold off on an approval until a subject matter expert reviews it.
+* Every patch needs two +2's before being approved.
+* However, a single Tempest core reviewer can approve patches without waiting
+ for another +2 in the following cases:
+
+ * If a patch has already been approved but requires a trivial rebase to
+ merge, then there is no need to wait for a second +2, since the patch has
+ already had two +2's.
+ * If any trivial patch set fixes one of the items below:
+
+ * Documentation or code comment typo
+ * Documentation ref link
+ * Example: `example`_
+
+ .. note::
+
+ Any other small documentation, CI job, or code change does not fall under
+ this category.
+
+ * If the patch **unblocks** a failing project gate, provided that:
+
+ * the project's PTL +1's the change
+ * the patch does not affect any other project's testing gates
+ * the patch does not cause any negative side effects
+
+ Note that such a policy should be used judiciously, as we should strive to
+ have two +2's on each patch set, prior to approval.
+
+.. _example: https://review.openstack.org/#/c/611032/
diff --git a/doc/source/data/tempest-plugins-registry.header b/doc/source/data/tempest-plugins-registry.header
index 0de12b7..831d8a6 100644
--- a/doc/source/data/tempest-plugins-registry.header
+++ b/doc/source/data/tempest-plugins-registry.header
@@ -3,9 +3,9 @@
job. You should edit the files data/tempest-plugins-registry.footer
and data/tempest-plugins-registry.header instead of this one.
-==========================
- Tempest Plugin Registry
-==========================
+=======================
+Tempest Plugin Registry
+=======================
Since we've created the external plugin mechanism, it's gotten used by
a lot of projects. The following is a list of plugins that currently
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 9958792..dc0e94c 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -96,7 +96,7 @@
that users don't have to worry about inadvertently installing a Tempest plugin
when they install another package.
-.. _Branchless Tempest Spec: http://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
+.. _Branchless Tempest Spec: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/branchless-tempest.html
The sole advantage to integrating a plugin into an existing python project is
that it enables you to land code changes at the same time you land test changes
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 01155a8..b51e701 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,6 +7,11 @@
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
+ environment:
+ # This enviroment variable is used by the optional tempest-gabbi
+ # job provided by the gabbi-tempest plugin. It can be safely ignored
+ # if that plugin is not being used.
+ GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path }}"
roles:
- setup-tempest-run-dir
- setup-tempest-data-dir
diff --git a/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
new file mode 100644
index 0000000..21b74a6
--- /dev/null
+++ b/releasenotes/notes/Placement-client-for-placement-based-minimum-bw-allocation-27ed0938118752b6.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ Add basic read-only Placement client to Tempest to make possible the
+ testing of the placement based bandwidth allocation feature.
+ The following API calls are available for tempest from now:
+
+ * GET /allocation_candidates
+ * GET /allocations/{consumer_uuid}
+
+ Add new config group ``placement``, with the config options:
+
+ * ``endpoint_type`` to use for communication with placement service.
+ * ``catalog_type`` of the placement service.
+ * ``region`` as the placement region name to use.
+ * ``min_microversion`` and ``max_microversion`` as the range between
+ placement API requests are sent.
diff --git a/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml b/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml
new file mode 100644
index 0000000..931d689
--- /dev/null
+++ b/releasenotes/notes/add-immutable-user-source-support-dd17772a997075e0.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Add a new config setting ``immutable_user_source`` in the
+ ``[identity-feature-enabled]`` group that defaults to false.
+ This setting, combined with the usage of the ``@testtools.skipIf()``
+ decorator, will allow tests that require user creation, deletion,
+ or modification to skip instead of failing in environments that
+ are LDAP-backed. In such environments, the user source is read-only,
+ so this feature flag is needed to allow such tests to gracefully skip
+ without having to blacklist them.
diff --git a/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml b/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml
new file mode 100644
index 0000000..f245dcb
--- /dev/null
+++ b/releasenotes/notes/add-redirect-param-bea1f6fbce629c70.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ A new parameter ``follow_redirects`` has been added to the class
+ ``RestClient``, which is passed through to ``ClosingHttp`` or
+ ``ClosingProxyHttp`` respectively. The default value is ``True``
+ which corresponds to the previous behaviour of following up to five
+ redirections before returning a response. Setting
+ ``follow_redirects = False`` allows to disable this behaviour, so
+ that any redirect that is received is directly returned to the caller.
+ This allows tests to verify that an API is responding with a redirect.
+fixes:
+ - |
+ [`bug 1616892 <https://bugs.launchpad.net/tempest/+bug/1616892>`_]
+ Tempest now allows tests to verify that an API responds with a
+ redirect.
diff --git a/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml b/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml
new file mode 100644
index 0000000..21068ec
--- /dev/null
+++ b/releasenotes/notes/agents-client-delete-method-de1a7fb3f845999c.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds the new method to AgentsClient that implements agent deletion
+ according to the API [0].
+ [0] https://developer.openstack.org/api-ref/network/v2/index.html#delete-agent
+
diff --git a/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml b/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml
new file mode 100644
index 0000000..a2e23fd
--- /dev/null
+++ b/releasenotes/notes/bug-1791007-328a8b9a43bfb157.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixed bug #1791007. ``tempest workspace register`` and ``tempest workspace rename`` CLI will
+ error if None or empty string is passed in --name arguments. Earlier both CLI used to accept
+ the None or empty string as name which was confusing.
+
+
diff --git a/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml b/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml
new file mode 100644
index 0000000..630908f
--- /dev/null
+++ b/releasenotes/notes/bug-1799883-6ea95fc64f70c9ef.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixed bug #1799883. ``tempest workspace register`` and ``tempest workspace move`` CLI
+ will now validate the value of the ``--path`` CLI argument and raise an exception if
+ it is None or empty string. Earlier both CLI actions were accepting None or empty path
+ which was confusing.
diff --git a/releasenotes/notes/config-image-api-v1-default-to-false-39d5f2xafc534ab1.yaml b/releasenotes/notes/config-image-api-v1-default-to-false-39d5f2xafc534ab1.yaml
new file mode 100644
index 0000000..5efa4a9
--- /dev/null
+++ b/releasenotes/notes/config-image-api-v1-default-to-false-39d5f2xafc534ab1.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Changed the default value of 'api_v1' config option in the
+ 'image-feature-enabled' group to False from True, because
+ glance v1 APIs are deprecated. Please set True explicitly
+ on the option if still testing glance v1 APIs.
diff --git a/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
new file mode 100644
index 0000000..d0c3a7d
--- /dev/null
+++ b/releasenotes/notes/deprecate-scheduler-available-filters-cbca2017ba3cf2aa.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+ - |
+ The ``scheduler_available_filters`` option is being deprecated in favor of
+ ``scheduler_enabled_filters``. The new name is more indicative of what the
+ option means. ``scheduler_enabled_filters``'s default value is set to the
+ default value of Nova's ``enabled_filters``.
+ ``scheduler_available_filters``'s default was `all`. There was confusion
+ around this value. Sometimes it was understood to mean the default Nova
+ filters are enabled, other times it was understood to mean all filters are
+ enabled. While `all` is still allowed for ``scheduler_enabled_filters`` for
+ backwards compatibility, it is strongly recommended to provide an explicit
+ list of filters that matches what's configured in nova.conf.
diff --git a/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml b/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml
new file mode 100644
index 0000000..36a9710
--- /dev/null
+++ b/releasenotes/notes/network-show-version-18e1707a4df0a3d3.yaml
@@ -0,0 +1,7 @@
+---
+features:
+- |
+ Add ``show_version`` function to the ``NetworkVersionsClient`` client. This
+ allows the possibility of getting details for Networking API.
+
+ .. API reference: https://developer.openstack.org/api-ref/network/v2/index.html#show-api-v2-details
diff --git a/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml b/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml
new file mode 100644
index 0000000..f22736f
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-find-test-caller-f4525cd043bfd1b6.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ ``tempest.lib.common.utils.misc.find_test_caller`` was deprecated during
+ Kilo release cycle in favor of
+ ``tempest.lib.common.utils.test_utils.find_test_caller``. The deprecated
+ version of ``find_test_caller`` is removed.
diff --git a/releasenotes/notes/tempest-default-run_validations-9640c41b6a4a9121.yaml b/releasenotes/notes/tempest-default-run_validations-9640c41b6a4a9121.yaml
new file mode 100644
index 0000000..8ff0b5c
--- /dev/null
+++ b/releasenotes/notes/tempest-default-run_validations-9640c41b6a4a9121.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ ``CONF.validation.run_validation`` default enabled.
+ This option required to be set ``true`` in order to run api tests
+ stability when the guest cooperation required. For example when
+ the guest needs react on Volume/Interface detach.
+ The ssh test makes sure the VM is alive and ready
+ when the detach needs to happen.
+ The option was enabled on the gate for a long time.
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 71b8e4f..e1787b6 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -56,3 +56,14 @@
(?x) # Ignore comments and whitespaces
# Line with only a comment.
(tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+ :default: ''
+
+ String of extra command line options to pass to tox.
+
+ Here is an example of running tox with --sitepackages option:
+
+ ::
+ vars:
+ tox_extra_args: --sitepackages
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index c89eb93..06918b5 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -2,3 +2,4 @@
tempest_test_regex: ''
tox_envlist: smoke
tempest_black_regex: ''
+tox_extra_args: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 54ddc71..16086aa 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -35,7 +35,7 @@
when: blacklist_stat.stat.exists
- name: Run Tempest
- command: tox -e {{tox_envlist}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+ command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
--concurrency={{tempest_concurrency|default(default_concurrency)}} \
--black-regex={{tempest_black_regex|quote}}
args:
diff --git a/setup.cfg b/setup.cfg
index 96ee7ea..fcbe956 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,7 +4,7 @@
description-file =
README.rst
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
classifier =
Intended Audience :: Information Technology
diff --git a/tempest/README.rst b/tempest/README.rst
index a5f4a92..b345032 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -9,7 +9,7 @@
OpenStack clouds.
As such Tempest tests come in many flavors, each with their own rules
-and guidelines. Below is the overview of the Tempest respository structure
+and guidelines. Below is the overview of the Tempest repository structure
to make this clear.
.. code-block:: console
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index 24ea8a1..40ed532 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -56,7 +56,7 @@
self.assertEqual(first_keyname, keypair_detail['name'])
self.assertEqual(user_id, keypair_detail['user_id'],
"The fetched keypair is not for requested user!")
- # Create a admin keypair
+ # Create an admin keypair
admin_keypair = self.create_keypair(keypair_type='ssh',
client=self.client)
admin_keypair.pop('private_key', None)
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 5a60dc6..b1a7c52 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -50,8 +50,6 @@
# a subnet so the instance being migrated has a single port, but
# we need that to make sure we are properly updating the port
# host bindings during the live migration.
- # TODO(mriedem): SSH validation before and after the instance is
- # live migrated would be a nice test wrinkle addition.
cls.set_network_resources(network=True, subnet=True)
super(LiveMigrationTestBase, cls).setup_credentials()
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index f0178aa..dfa801b 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -73,8 +73,17 @@
# search filter
fetched_list = (self.client.list_security_groups(all_tenants='true')
['security_groups'])
- # Now check if all created Security Groups are present in fetched list
- for sec_group in fetched_list:
- self.assertEqual(sec_group['tenant_id'], client_tenant_id,
- "Failed to get all security groups for "
- "non admin user.")
+ sec_group_id_list = [sg['id'] for sg in fetched_list]
+ # Now check that 'all_tenants='true' filter for non-admin user only
+ # provide the requested non-admin user's created security groups,
+ # not all security groups which include security groups created by
+ # other users.
+ for sec_group in security_group_list:
+ if sec_group['tenant_id'] == client_tenant_id:
+ self.assertIn(sec_group['id'], sec_group_id_list,
+ "Failed to get all security groups for "
+ "non admin user.")
+ else:
+ self.assertNotIn(sec_group['id'], sec_group_id_list,
+ "Non admin user shouldn't get other user's "
+ "security groups.")
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 5cd98f4..bebc8c5 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -105,7 +105,7 @@
asserts the servers are in the group and on different hosts.
"""
hosts = self._create_servers_with_group('anti-affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertNotEqual(hostnames[0], hostnames[1],
'Servers are on the same host: %s' % hosts)
@@ -120,6 +120,6 @@
asserts the servers are in the group and on same host.
"""
hosts = self._create_servers_with_group('affinity')
- hostnames = hosts.values()
+ hostnames = list(hosts.values())
self.assertEqual(hostnames[0], hostnames[1],
'Servers are on the different hosts: %s' % hosts)
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index ed8cf20..6b58939 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -70,6 +70,7 @@
"""The test suite for swapping of volume with admin user.
The following is the scenario outline:
+
1. Create a volume "volume1" with non-admin.
2. Create a volume "volume2" with non-admin.
3. Boot an instance "instance1" with non-admin.
@@ -82,6 +83,11 @@
is attached to "instance1" and "volume2" is in available state.
"""
+ # NOTE(mriedem): This is an uncommon scenario to call the compute API
+ # to swap volumes directly; swap volume is primarily only for volume
+ # live migration and retype callbacks from the volume service, and is slow
+ # so it's marked as such.
+ @decorators.attr(type='slow')
@decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
@utils.services('volume')
def test_volume_swap(self):
@@ -136,6 +142,11 @@
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ # NOTE(mriedem): This is an uncommon scenario to call the compute API
+ # to swap volumes directly; swap volume is primarily only for volume
+ # live migration and retype callbacks from the volume service, and is slow
+ # so it's marked as such.
+ @decorators.attr(type='slow')
@decorators.idempotent_id('e8f8f9d1-d7b7-4cd2-8213-ab85ef697b6e')
@utils.services('volume')
def test_volume_swap_with_multiattach(self):
@@ -146,13 +157,19 @@
volume1 = self.create_volume(multiattach=True)
volume2 = self.create_volume(multiattach=True)
- # Boot server1
- server1 = self.create_test_server(wait_until='ACTIVE')
+ # Create two servers and wait for them to be ACTIVE.
+ reservation_id = self.create_test_server(
+ wait_until='ACTIVE', min_count=2,
+ return_reservation_id=True)['reservation_id']
+ # Get the servers using the reservation_id.
+ servers = self.servers_client.list_servers(
+ reservation_id=reservation_id)['servers']
+ self.assertEqual(2, len(servers))
# Attach volume1 to server1
+ server1 = servers[0]
self.attach_volume(server1, volume1)
- # Boot server2
- server2 = self.create_test_server(wait_until='ACTIVE')
# Attach volume1 to server2
+ server2 = servers[1]
self.attach_volume(server2, volume1)
# Swap volume1 to volume2 on server1, volume1 should remain attached
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index ff2f99c..624a99e 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -331,8 +331,7 @@
# The compute image proxy APIs were deprecated in 2.35 so
# use the images client directly if the API microversion being
# used is >=2.36.
- if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.36", image.response, "lt"):
+ if not cls.is_requested_microversion_compatible('2.35'):
client = cls.images_client
else:
client = cls.compute_images_client
@@ -341,6 +340,9 @@
if wait_until is not None:
try:
+ wait_until = wait_until.upper()
+ if not cls.is_requested_microversion_compatible('2.35'):
+ wait_until = wait_until.lower()
waiters.wait_for_image_status(client, image_id, wait_until)
except lib_exc.NotFound:
if wait_until.upper() == 'ACTIVE':
@@ -455,7 +457,7 @@
else:
msg = ('When validation.connect_method equals floating, '
'validation_resources cannot be None')
- raise exceptions.InvalidParam(invalid_param=msg)
+ raise lib_exc.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
@@ -475,7 +477,7 @@
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
- :**kwargs: other parameters to create volume.
+ :param kwargs: other parameters to create volume.
:returns: The available volume.
"""
if 'size' not in kwargs:
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 29bd6da..c8221c2 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -38,7 +38,10 @@
@classmethod
def setup_clients(cls):
super(ImagesTestJSON, cls).setup_clients()
- cls.client = cls.compute_images_client
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.client = cls.compute_images_client
+ else:
+ cls.client = cls.images_client
@decorators.idempotent_id('aa06b52b-2db5-4807-b218-9441f75d74e3')
def test_delete_saving_image(self):
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index e292389..2400348 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -22,11 +22,11 @@
CONF = config.CONF
-class ImagesNegativeTestJSON(base.BaseV2ComputeTest):
+class ImagesNegativeTestBase(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
- super(ImagesNegativeTestJSON, cls).skip_checks()
+ super(ImagesNegativeTestBase, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@@ -37,9 +37,12 @@
@classmethod
def setup_clients(cls):
- super(ImagesNegativeTestJSON, cls).setup_clients()
+ super(ImagesNegativeTestBase, cls).setup_clients()
cls.client = cls.compute_images_client
+
+class ImagesNegativeTestJSON(ImagesNegativeTestBase):
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6cd5a89d-5b47-46a7-93bc-3916f0d84973')
def test_create_image_from_deleted_server(self):
@@ -82,6 +85,10 @@
self.assertRaises(lib_exc.NotFound, self.client.create_image,
test_uuid, name=snapshot_name)
+
+class ImagesDeleteNegativeTestJSON(ImagesNegativeTestBase):
+ max_microversion = '2.35'
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('381acb65-785a-4942-94ce-d8f8c84f1f0f')
def test_delete_image_with_invalid_image_id(self):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 058e7e6..3c152c9 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -44,7 +44,10 @@
@classmethod
def setup_clients(cls):
super(ImagesOneServerTestJSON, cls).setup_clients()
- cls.client = cls.compute_images_client
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.client = cls.compute_images_client
+ else:
+ cls.client = cls.images_client
def _get_default_flavor_disk_size(self, flavor_id):
flavor = self.flavors_client.show_flavor(flavor_id)['flavor']
@@ -52,6 +55,13 @@
@decorators.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
def test_create_delete_image(self):
+ if self.is_requested_microversion_compatible('2.35'):
+ MIN_DISK = 'minDisk'
+ MIN_RAM = 'minRam'
+ else:
+ MIN_DISK = 'min_disk'
+ MIN_RAM = 'min_ram'
+
# Create a new image
name = data_utils.rand_name('image')
meta = {'image_type': 'test'}
@@ -61,17 +71,22 @@
# Verify the image was created correctly
self.assertEqual(name, image['name'])
- self.assertEqual('test', image['metadata']['image_type'])
+ if self.is_requested_microversion_compatible('2.35'):
+ self.assertEqual('test', image['metadata']['image_type'])
+ else:
+ self.assertEqual('test', image['image_type'])
- original_image = self.client.show_image(self.image_ref)['image']
+ original_image = self.client.show_image(self.image_ref)
+ if self.is_requested_microversion_compatible('2.35'):
+ original_image = original_image['image']
# Verify minRAM is the same as the original image
- self.assertEqual(image['minRam'], original_image['minRam'])
+ self.assertEqual(image[MIN_RAM], original_image[MIN_RAM])
# Verify minDisk is the same as the original image or the flavor size
flavor_disk_size = self._get_default_flavor_disk_size(self.flavor_ref)
- self.assertIn(str(image['minDisk']),
- (str(original_image['minDisk']), str(flavor_disk_size)))
+ self.assertIn(str(image[MIN_DISK]),
+ (str(original_image[MIN_DISK]), str(flavor_disk_size)))
# Verify the image was deleted correctly
self.client.delete_image(image['id'])
@@ -86,7 +101,8 @@
# will return 400(Bad Request) if we attempt to send a name which has
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
- body = self.client.create_image(self.server_id, name=utf8_name)
+ body = self.compute_images_client.create_image(
+ self.server_id, name=utf8_name)
if api_version_utils.compare_version_header_to_response(
"OpenStack-API-Version", "compute 2.45", body.response, "lt"):
image_id = body['image_id']
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index bebc6ca..512c9d2 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -72,7 +72,10 @@
@classmethod
def setup_clients(cls):
super(ImagesOneServerNegativeTestJSON, cls).setup_clients()
- cls.client = cls.compute_images_client
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.client = cls.compute_images_client
+ else:
+ cls.client = cls.images_client
@classmethod
def resource_setup(cls):
@@ -122,7 +125,8 @@
# Return an error if snapshot name over 255 characters is passed
snapshot_name = ('a' * 256)
- self.assertRaises(lib_exc.BadRequest, self.client.create_image,
+ self.assertRaises(lib_exc.BadRequest,
+ self.compute_images_client.create_image,
self.server_id, name=snapshot_name)
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index d83d8df..2ac7de3 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -31,6 +31,7 @@
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
+ max_microversion = '2.35'
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index d37f8fc..81c59f9 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -22,6 +22,7 @@
class ListImageFiltersNegativeTestJSON(base.BaseV2ComputeTest):
+ max_microversion = '2.35'
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index e2dbd72..cbb65bb 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -21,6 +21,7 @@
class ListImagesTestJSON(base.BaseV2ComputeTest):
+ max_microversion = '2.35'
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0636ee4..bea23d9 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -23,6 +23,8 @@
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -38,11 +40,16 @@
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
+ if not CONF.validation.run_validation:
+ raise cls.skipException('Validation should be enabled to ensure '
+ 'guest OS is running and capable of '
+ 'processing ACPI events.')
@classmethod
def setup_credentials(cls):
# This test class requires network and subnet
- cls.set_network_resources(network=True, subnet=True)
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
super(AttachInterfacesTestBase, cls).setup_credentials()
@classmethod
@@ -51,8 +58,27 @@
cls.subnets_client = cls.os_primary.subnets_client
cls.ports_client = cls.os_primary.ports_client
+ def _wait_for_validation(self, server, validation_resources):
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.image_ssh_user,
+ self.image_ssh_password,
+ validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+ linux_client.validate_authentication()
+
def _create_server_get_interfaces(self):
- server = self.create_test_server(wait_until='ACTIVE')
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='ACTIVE')
+ # NOTE(artom) self.create_test_server adds cleanups, but this is
+ # apparently not enough? Add cleanup here.
+ self.addCleanup(self.delete_server, server['id'])
+ self._wait_for_validation(server, validation_resources)
ifs = (self.interfaces_client.list_interfaces(server['id'])
['interfaceAttachments'])
body = waiters.wait_for_interface_status(
@@ -143,7 +169,9 @@
iface = self.interfaces_client.create_interface(
server['id'], net_id=network_id,
fixed_ips=fixed_ips)['interfaceAttachment']
- self.addCleanup(self.ports_client.delete_port, iface['port_id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port,
+ iface['port_id'])
self._check_interface(iface, server_id=server['id'],
fixed_ip=ip_list[0])
return iface
@@ -228,23 +256,13 @@
if not (CONF.auth.use_dynamic_credentials and
CONF.auth.create_isolated_networks and
not CONF.network.shared_physical_network):
- raise self.skipException("Only owner network supports "
- "creating interface by fixed ip.")
+ raise self.skipException("Only owner network supports "
+ "creating interface by fixed ip.")
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- try:
- iface = self._test_create_interface(server)
- except lib_exc.BadRequest as e:
- msg = ('Multiple possible networks found, use a Network ID to be '
- 'more specific.')
- if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
- raise
- else:
- ifs.append(iface)
-
iface = self._test_create_interface_by_fixed_ips(server, ifs)
ifs.append(iface)
@@ -274,15 +292,26 @@
port_id = port['port']['id']
self.addCleanup(self.ports_client.delete_port, port_id)
- # create two servers
- _, servers = compute.create_test_server(
- self.os_primary, tenant_network=network,
- wait_until='ACTIVE', min_count=2)
+ # NOTE(artom) We create two servers one at a time because
+ # create_test_server doesn't support multiple validatable servers.
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+
+ def _create_validatable_server():
+ _, servers = compute.create_test_server(
+ self.os_primary, tenant_network=network,
+ wait_until='ACTIVE', validatable=True,
+ validation_resources=validation_resources)
+ return servers[0]
+
+ servers = [_create_validatable_server(), _create_validatable_server()]
+
# add our cleanups for the servers since we bypassed the base class
for server in servers:
self.addCleanup(self.delete_server, server['id'])
for server in servers:
+ self._wait_for_validation(server, validation_resources)
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
@@ -304,20 +333,61 @@
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
- interface_count = len(ifs)
- self.assertGreater(interface_count, 0)
+ original_interface_count = len(ifs) # This is the number of ports.
+ self.assertGreater(original_interface_count, 0)
+ # Get the starting list of IPs on the server.
+ addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+ # There should be one entry for the single network mapped to a list of
+ # addresses, which at this point should have at least one entry.
+ # Note that we could start with two addresses depending on how tempest
+ # is configured for using floating IPs.
+ self.assertEqual(1, len(addresses), addresses) # number of networks
+ # Keep track of the original addresses so we can know which IP is new.
+ original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
+ original_ip_count = len(original_ips)
+ self.assertGreater(original_ip_count, 0, addresses) # at least 1
network_id = ifs[0]['net_id']
+ # Add another fixed IP to the server. This should result in another
+ # fixed IP on the same network (and same port since we only have one
+ # port).
self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
- # Remove the fixed IP from server.
+ # Wait for the ips count to increase by one.
+
+ def _wait_for_ip_increase():
+ _addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+ return len(list(_addresses.values())[0]) == original_ip_count + 1
+
+ if not test_utils.call_until_true(
+ _wait_for_ip_increase, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to increase.')
+
+ # Remove the fixed IP that we just added.
server_detail = self.os_primary.servers_client.show_server(
server['id'])['server']
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
- if ip['OS-EXT-IPS:type'] == 'fixed':
+ if (ip['OS-EXT-IPS:type'] == 'fixed' and
+ ip['addr'] not in original_ips):
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
+ # Wait for the interface count to decrease by one.
+
+ def _wait_for_ip_decrease():
+ _addresses = self.os_primary.servers_client.list_addresses(
+ server['id'])['addresses']
+ return len(list(_addresses.values())[0]) == original_ip_count
+
+ if not test_utils.call_until_true(
+ _wait_for_ip_decrease, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise lib_exc.TimeoutException(
+ 'Timed out while waiting for IP count to decrease.')
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 0093752..0263b81 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -107,11 +107,10 @@
@utils.services('volume')
def test_delete_server_while_in_attached_volume(self):
# Delete a server while a volume is attached to it
- device = '/dev/%s' % CONF.compute.volume_device_name
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
- self.attach_volume(server, volume, device=device)
+ self.attach_volume(server, volume)
self.client.delete_server(server['id'])
waiters.wait_for_server_termination(self.client, server['id'])
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 40681cb..d40f937 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -138,6 +138,9 @@
except Exception:
return False
+ # NOTE(mriedem): This is really more like a scenario test and is slow so
+ # it's marked as such.
+ @decorators.attr(type='slow')
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
def test_tagged_boot_devices(self):
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 18a78f0..f0915de 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -80,7 +80,7 @@
@decorators.idempotent_id('fcdf192d-0f74-4d89-911f-1ec002b822c4')
def test_list_servers_status_non_existing(self):
# When invalid status is specified, up to microversion 2.37,
- # an empty list is returnd, and starting from microversion 2.38,
+ # an empty list is returned, and starting from microversion 2.38,
# a 400 error is returned in that case.
if self.is_requested_microversion_compatible('2.37'):
body = self.client.list_servers(status='non_existing_status')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f6494b5..f3d7476 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -265,6 +265,11 @@
self.client.start_server(self.server_id)
+ # NOTE(mriedem): Marked as slow because while rebuild and volume-backed is
+ # common, we don't actually change the image (you can't with volume-backed
+ # rebuild) so this isn't testing much outside normal rebuild
+ # (and it's slow).
+ @decorators.attr(type='slow')
@decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
@utils.services('volume')
def test_rebuild_server_with_volume_attached(self):
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 6f32b46..4f484e2 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -45,6 +45,10 @@
super(ServerPersonalityTestJSON, cls).setup_clients()
cls.client = cls.servers_client
+ # NOTE(mriedem): Marked as slow because personality (file injection) is
+ # deprecated in nova so we don't care as much about running this all the
+ # time (and it's slow).
+ @decorators.attr(type='slow')
@decorators.idempotent_id('3cfe87fd-115b-4a02-b942-7dc36a337fdf')
def test_create_server_with_personality(self):
file_contents = 'This is a test file.'
@@ -75,6 +79,10 @@
linux_client.exec_command(
'sudo cat %s' % file_path))
+ # NOTE(mriedem): Marked as slow because personality (file injection) is
+ # deprecated in nova so we don't care as much about running this all the
+ # time (and it's slow).
+ @decorators.attr(type='slow')
@decorators.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
def test_rebuild_server_with_personality(self):
validation_resources = self.get_test_validation_resources(
@@ -117,6 +125,10 @@
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server, personality=personality)
+ # NOTE(mriedem): Marked as slow because personality (file injection) is
+ # deprecated in nova so we don't care as much about running this all the
+ # time (and it's slow).
+ @decorators.attr(type='slow')
@decorators.idempotent_id('52f12ee8-5180-40cc-b417-31572ea3d555')
def test_can_create_server_with_max_number_personality_files(self):
# Server should be created successfully if maximum allowed number of
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index b0ef3bc..6629794 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -24,11 +24,11 @@
CONF = config.CONF
-class ServerRescueTestJSON(base.BaseV2ComputeTest):
+class ServerRescueTestBase(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
- super(ServerRescueTestJSON, cls).skip_checks()
+ super(ServerRescueTestBase, cls).skip_checks()
if not CONF.compute_feature_enabled.rescue:
msg = "Server rescue not available."
raise cls.skipException(msg)
@@ -36,11 +36,11 @@
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, router=True)
- super(ServerRescueTestJSON, cls).setup_credentials()
+ super(ServerRescueTestBase, cls).setup_credentials()
@classmethod
def resource_setup(cls):
- super(ServerRescueTestJSON, cls).resource_setup()
+ super(ServerRescueTestBase, cls).resource_setup()
password = data_utils.rand_password()
server = cls.create_test_server(adminPass=password,
@@ -50,6 +50,9 @@
'RESCUE')
cls.rescued_server_id = server['id']
+
+class ServerRescueTestJSON(ServerRescueTestBase):
+
@decorators.idempotent_id('fd032140-714c-42e4-a8fd-adcd8df06be6')
def test_rescue_unrescue_instance(self):
password = data_utils.rand_password()
@@ -62,6 +65,15 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
+
+class ServerRescueTestJSONUnderV235(ServerRescueTestBase):
+
+ max_microversion = '2.35'
+
+ # TODO(zhufl): After 2.35 we should switch to neutron client to create
+ # floating ip, but that will need admin credential, so the testcases will
+ # have to be added in somewhere in admin directory.
+
@decorators.idempotent_id('4842e0cf-e87d-4d9d-b61f-f4791da3cacc')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 1260c6b..caceb64 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -43,7 +43,6 @@
@classmethod
def resource_setup(cls):
super(ServerRescueNegativeTestJSON, cls).resource_setup()
- cls.device = CONF.compute.volume_device_name
cls.password = data_utils.rand_password()
rescue_password = data_utils.rand_password()
# Server for negative tests
@@ -125,8 +124,7 @@
self.assertRaises(lib_exc.Conflict,
self.servers_client.attach_volume,
self.server_id,
- volumeId=volume['id'],
- device='/dev/%s' % self.device)
+ volumeId=volume['id'])
@decorators.idempotent_id('f56e465b-fe10-48bf-b75d-646cda3a8bc9')
@utils.services('volume')
@@ -136,7 +134,7 @@
# Attach the volume to the server
server = self.servers_client.show_server(self.server_id)['server']
- self.attach_volume(server, volume, device='/dev/%s' % self.device)
+ self.attach_volume(server, volume)
# Rescue the server
self.servers_client.rescue_server(self.server_id,
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 0c1c05c..6cabf65 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -488,8 +488,11 @@
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
- params = {'name': image_name}
- images = self.compute_images_client.list_images(**params)['images']
+ if CONF.image_feature_enabled.api_v1:
+ kwargs = {'name': image_name}
+ else:
+ kwargs = {'params': {'name': image_name}}
+ images = self.images_client.list_images(**kwargs)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index caa445d..f7b5b4b 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -84,6 +84,11 @@
linux_client.validate_authentication()
volume = self.create_volume()
+
+ # NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
+ # no longer honors a user-supplied device name, in that case
+ # CONF.compute.volume_device_name must be set the equal value as
+ # the libvirt auto-assigned one
attachment = self.attach_volume(server, volume,
device=('/dev/%s' % self.device))
@@ -121,8 +126,7 @@
# List volume attachment of the server
server, _ = self._create_server()
volume_1st = self.create_volume()
- attachment_1st = self.attach_volume(server, volume_1st,
- device=('/dev/%s' % self.device))
+ attachment_1st = self.attach_volume(server, volume_1st)
body = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
self.assertEqual(1, len(body))
@@ -160,6 +164,9 @@
This test checks the attaching and detaching volumes from
a shelved or shelved offload instance.
+
+ Note that these are uncommon scenarios until blueprint detach-boot-volume
+ is implemented in the compute service.
"""
min_microversion = '2.20'
@@ -220,6 +227,9 @@
server, validation_resources)
self.assertEqual(number_of_volumes, counted_volumes)
+ # NOTE(mriedem): Marked as slow since this is an uncommon scenario until
+ # attach/detach root volume is supported in nova, and it's slow.
+ @decorators.attr(type='slow')
@decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
def test_attach_volume_shelved_or_offload_server(self):
# Create server, count number of volumes on it, shelve
@@ -228,8 +238,7 @@
volume = self.create_volume()
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
- attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ attachment = self.attach_volume(server, volume)
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(
@@ -245,6 +254,9 @@
# case of shelved_offloaded.
self.assertIsNotNone(volume_attachment['device'])
+ # NOTE(mriedem): Marked as slow since this is an uncommon scenario until
+ # attach/detach root volume is supported in nova, and it's slow.
+ @decorators.attr(type='slow')
@decorators.idempotent_id('b54e86dd-a070-49c4-9c07-59ae6dae15aa')
def test_detach_volume_shelved_or_offload_server(self):
# Count number of volumes on instance, shelve
@@ -255,7 +267,7 @@
self._shelve_server(server, validation_resources)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.attach_volume(server, volume)
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 8618148..6d08f90 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -35,9 +35,7 @@
def test_delete_attached_volume(self):
server = self.create_test_server(wait_until='ACTIVE')
volume = self.create_volume()
-
- path = "/dev/%s" % CONF.compute.volume_device_name
- self.attach_volume(server, volume, device=path)
+ self.attach_volume(server, volume)
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index e2ed5ef..03543ac 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -89,14 +89,10 @@
service = self.services_client.create_service(
name=name, type=s_type,
description=description)['OS-KSADM:service']
+ self.addCleanup(self.services_client.delete_service, service['id'])
services.append(service)
service_ids = [svc['id'] for svc in services]
- def delete_services():
- for service_id in service_ids:
- self.services_client.delete_service(service_id)
-
- self.addCleanup(delete_services)
# List and Verify Services
body = self.services_client.list_services()['OS-KSADM:services']
found = [serv for serv in body if serv['id'] in service_ids]
diff --git a/tempest/api/identity/admin/v2/test_tenants.py b/tempest/api/identity/admin/v2/test_tenants.py
index cda721c..f68754e 100644
--- a/tempest/api/identity/admin/v2/test_tenants.py
+++ b/tempest/api/identity/admin/v2/test_tenants.py
@@ -50,7 +50,7 @@
'been sent in response for create')
body = self.tenants_client.show_tenant(tenant_id)['tenant']
desc2 = body['description']
- self.assertEqual(desc2, tenant_desc, 'Description does not appear'
+ self.assertEqual(desc2, tenant_desc, 'Description does not appear '
'to be set')
self.tenants_client.delete_tenant(tenant_id)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index ba19ff7..23fe788 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -20,6 +20,10 @@
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def resource_setup(cls):
@@ -27,10 +31,6 @@
cls.projects = list()
cls.creds_list = [['project_id', 'user_id', 'id'],
['access', 'secret']]
- u_name = data_utils.rand_name('user')
- u_desc = '%s description' % u_name
- u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_password()
for _ in range(2):
project = cls.projects_client.create_project(
data_utils.rand_name('project'),
@@ -38,12 +38,8 @@
cls.addClassResourceCleanup(
cls.projects_client.delete_project, project['id'])
cls.projects.append(project['id'])
-
- cls.user_body = cls.users_client.create_user(
- name=u_name, description=u_desc, password=u_password,
- email=u_email, project_id=cls.projects[0])['user']
- cls.addClassResourceCleanup(
- cls.users_client.delete_user, cls.user_body['id'])
+ cls.user_body = cls.users_client.show_user(
+ cls.os_primary.credentials.user_id)['user']
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 302a0e5..a79cbc3 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.identity import base
from tempest import clients
from tempest import config
@@ -32,6 +34,10 @@
self.domains_client.update_domain(domain_id, enabled=False)
self.domains_client.delete_domain(domain_id)
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
@decorators.idempotent_id('d6110661-6a71-49a7-a453-b5e26640ff6d')
def test_default_project_id(self):
# create a domain
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index c4e0622..c0b18ca 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -21,6 +21,10 @@
class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
custom_config = {
"identity": {
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index 56f7d32..b3c68fb 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -20,6 +20,10 @@
class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.attr(type=['negative', 'gate'])
@decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index eef93c2..625568d 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -20,6 +20,10 @@
class EndPointGroupsTest(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index 148b368..50f3186 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -14,9 +14,12 @@
# under the License.
from tempest.api.identity import base
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class BaseListProjectsTestJSON(base.BaseIdentityV3AdminTest):
@@ -60,34 +63,12 @@
cls.p3['id'])
cls.project_ids.append(cls.p3['id'])
- @decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
- def test_list_projects(self):
- # List projects
- list_projects = self.projects_client.list_projects()['projects']
-
- for p in self.project_ids:
- show_project = self.projects_client.show_project(p)['project']
- self.assertIn(show_project, list_projects)
-
- @decorators.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
- def test_list_projects_with_domains(self):
- # List projects with domain
- self._list_projects_with_params(
- [self.p1], [self.p2, self.p3], {'domain_id': self.domain['id']},
- 'domain_id')
-
@decorators.idempotent_id('0fe7a334-675a-4509-b00e-1c4b95d5dae8')
def test_list_projects_with_enabled(self):
# List the projects with enabled
self._list_projects_with_params(
[self.p1], [self.p2, self.p3], {'enabled': False}, 'enabled')
- @decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
- def test_list_projects_with_name(self):
- # List projects with name
- self._list_projects_with_params(
- [self.p1], [self.p2, self.p3], {'name': self.p1_name}, 'name')
-
@decorators.idempotent_id('6edc66f5-2941-4a17-9526-4073311c1fac')
def test_list_projects_with_parent(self):
# List projects with parent
@@ -97,3 +78,51 @@
self.assertNotEmpty(fetched_projects)
for project in fetched_projects:
self.assertEqual(self.p3['parent_id'], project['parent_id'])
+
+
+class ListProjectsStaticTestJSON(BaseListProjectsTestJSON):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
+
+ @classmethod
+ def resource_setup(cls):
+ super(ListProjectsStaticTestJSON, cls).resource_setup()
+ cls.domain_id = CONF.identity.default_domain_id
+ cls.project_ids = list()
+ cls.p1_name = cls.os_primary.credentials.project_name
+ cls.p1 = cls.projects_client.show_project(
+ cls.os_primary.credentials.project_id)['project']
+ cls.project_ids.append(cls.p1['id'])
+ p2_name = data_utils.rand_name('project')
+ cls.p2 = cls.projects_client.create_project(
+ p2_name, domain_id=cls.domain_id)['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.p2['id'])
+ cls.project_ids.append(cls.p2['id'])
+
+ @decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
+ def test_list_projects(self):
+ # List projects
+ list_projects = self.projects_client.list_projects()['projects']
+
+ for p in self.project_ids:
+ show_project = self.projects_client.show_project(p)['project']
+ self.assertIn(show_project, list_projects)
+
+ @decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
+ def test_list_projects_with_name(self):
+ # List projects with name
+ self._list_projects_with_params(
+ [self.p1], [self.p2], {'name': self.p1_name}, 'name')
+
+ @decorators.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
+ def test_list_projects_with_domains(self):
+ # List projects with domain
+ key = 'domain_id'
+ params = {key: self.domain_id}
+ # Verify both projects are in the self.domain_id which is the default
+ # domain
+ self._list_projects_with_params(
+ [self.p1, self.p2], [], params, key)
diff --git a/tempest/api/identity/admin/v3/test_oauth_consumers.py b/tempest/api/identity/admin/v3/test_oauth_consumers.py
index 062cce5..7a85f84 100644
--- a/tempest/api/identity/admin/v3/test_oauth_consumers.py
+++ b/tempest/api/identity/admin/v3/test_oauth_consumers.py
@@ -21,6 +21,10 @@
class OAUTHConsumersV3Test(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
def _create_consumer(self):
"""Creates a consumer with a random description."""
diff --git a/tempest/api/identity/admin/v3/test_project_tags.py b/tempest/api/identity/admin/v3/test_project_tags.py
index d05173b..b7878a8 100644
--- a/tempest/api/identity/admin/v3/test_project_tags.py
+++ b/tempest/api/identity/admin/v3/test_project_tags.py
@@ -25,6 +25,10 @@
class IdentityV3ProjectTagsTest(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
@decorators.idempotent_id('7c123aac-999d-416a-a0fb-84b915ab10de')
@testtools.skipUnless(CONF.identity_feature_enabled.project_tags,
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index 6ddf42e..f75edaa 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -31,7 +31,7 @@
'been sent in response for create')
body = self.projects_client.show_project(project_id)['project']
desc2 = body['description']
- self.assertEqual(desc2, project_desc, 'Description does not appear'
+ self.assertEqual(desc2, project_desc, 'Description does not appear '
'to be set')
@decorators.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
diff --git a/tempest/api/identity/admin/v3/test_projects_negative.py b/tempest/api/identity/admin/v3/test_projects_negative.py
index 33a9c8c..12f1d4a 100644
--- a/tempest/api/identity/admin/v3/test_projects_negative.py
+++ b/tempest/api/identity/admin/v3/test_projects_negative.py
@@ -22,6 +22,22 @@
class ProjectsNegativeTestJSON(base.BaseIdentityV3AdminTest):
@decorators.attr(type=['negative'])
+ @decorators.idempotent_id('8d68c012-89e0-4394-8d6b-ccd7196def97')
+ def test_project_delete_by_unauthorized_user(self):
+ # Non-admin user should not be able to delete a project
+ project = self.setup_test_project()
+ self.assertRaises(
+ lib_exc.Forbidden, self.non_admin_projects_client.delete_project,
+ project['id'])
+
+
+class ProjectsNegativeStaticTestJSON(base.BaseIdentityV3AdminTest):
+ # NOTE: force_tenant_isolation is true in the base class by default but
+ # overridden to false here to allow test execution for clouds using the
+ # pre-provisioned credentials provider.
+ force_tenant_isolation = False
+
+ @decorators.attr(type=['negative'])
@decorators.idempotent_id('24c49279-45dd-4155-887a-cb738c2385aa')
def test_list_projects_by_unauthorized_user(self):
# Non-admin user should not be able to list projects
@@ -63,15 +79,6 @@
self.projects_client.create_project, project_name)
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('8d68c012-89e0-4394-8d6b-ccd7196def97')
- def test_project_delete_by_unauthorized_user(self):
- # Non-admin user should not be able to delete a project
- project = self.setup_test_project()
- self.assertRaises(
- lib_exc.Forbidden, self.non_admin_projects_client.delete_project,
- project['id'])
-
- @decorators.attr(type=['negative'])
@decorators.idempotent_id('7965b581-60c1-43b7-8169-95d4ab7fc6fb')
def test_delete_non_existent_project(self):
# Attempt to delete a non existent project should fail
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 8ae43d6..5f1b58d 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -42,10 +42,10 @@
user = self.create_test_user(password=user_password)
# Create a couple projects
- project1_name = data_utils.rand_name(name='project')
+ project1_name = data_utils.rand_name(name=self.__class__.__name__)
project1 = self.setup_test_project(name=project1_name)
- project2_name = data_utils.rand_name(name='project')
+ project2_name = data_utils.rand_name(name=self.__class__.__name__)
project2 = self.setup_test_project(name=project2_name)
self.addCleanup(self.projects_client.delete_project, project2['id'])
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 2530072..54a5ab7 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -39,7 +39,6 @@
# Use alt_username as the trustee
self.trust_id = None
self.create_trustor_and_roles()
- self.addCleanup(self.cleanup_user_and_roles)
def tearDown(self):
if self.trust_id:
@@ -50,11 +49,13 @@
def create_trustor_and_roles(self):
# create a project that trusts will be granted on
- trustor_project_name = data_utils.rand_name(name='project')
+ trustor_project_name = data_utils.rand_name(
+ name=self.__class__.__name__)
project = self.projects_client.create_project(
trustor_project_name,
domain_id=CONF.identity.default_domain_id)['project']
self.trustor_project_id = project['id']
+ self.addCleanup(self.projects_client.delete_project, project['id'])
self.assertIsNotNone(self.trustor_project_id)
# Create a trustor User
@@ -69,6 +70,7 @@
email=u_email,
project_id=self.trustor_project_id,
domain_id=CONF.identity.default_domain_id)['user']
+ self.addCleanup(self.users_client.delete_user, user['id'])
self.trustor_user_id = user['id']
# And two roles, one we'll delegate and one we won't
@@ -76,10 +78,12 @@
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
role = self.roles_client.create_role(name=self.delegated_role)['role']
+ self.addCleanup(self.roles_client.delete_role, role['id'])
self.delegated_role_id = role['id']
role = self.roles_client.create_role(
name=self.not_delegated_role)['role']
+ self.addCleanup(self.roles_client.delete_role, role['id'])
self.not_delegated_role_id = role['id']
# Assign roles to trustor
@@ -109,16 +113,6 @@
os = clients.Manager(credentials=creds)
self.trustor_client = os.trusts_client
- def cleanup_user_and_roles(self):
- if self.trustor_user_id:
- self.users_client.delete_user(self.trustor_user_id)
- if self.trustor_project_id:
- self.projects_client.delete_project(self.trustor_project_id)
- if self.delegated_role_id:
- self.roles_client.delete_role(self.delegated_role_id)
- if self.not_delegated_role_id:
- self.roles_client.delete_role(self.not_delegated_role_id)
-
def create_trust(self, impersonate=True, expires=None):
trust_create = self.trustor_client.create_trust(
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 3813568..8955a93 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -28,6 +28,14 @@
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+ @classmethod
+ def skip_checks(cls):
+ super(UsersV3TestJSON, cls).skip_checks()
+ if CONF.identity_feature_enabled.immutable_user_source:
+ raise cls.skipException('Skipped because environment has an '
+ 'immutable user source and solely '
+ 'provides read-only access to users.')
+
@decorators.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
def test_user_update(self):
# Test case to check if updating of user attributes is successful.
diff --git a/tempest/api/identity/v2/test_ec2_credentials.py b/tempest/api/identity/v2/test_ec2_credentials.py
index 237e728..9981ef8 100644
--- a/tempest/api/identity/v2/test_ec2_credentials.py
+++ b/tempest/api/identity/v2/test_ec2_credentials.py
@@ -57,18 +57,19 @@
self.creds.user_id,
tenant_id=self.creds.tenant_id)["credential"]
created_creds.append(creds1['access'])
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, creds1['access'])
+
# create second ec2 credentials
creds2 = self.non_admin_users_client.create_user_ec2_credential(
self.creds.user_id,
tenant_id=self.creds.tenant_id)["credential"]
created_creds.append(creds2['access'])
- # add credentials to be cleaned up
- self.addCleanup(
- self.non_admin_users_client.delete_user_ec2_credential,
- self.creds.user_id, creds1['access'])
self.addCleanup(
self.non_admin_users_client.delete_user_ec2_credential,
self.creds.user_id, creds2['access'])
+
# get the list of user ec2 credentials
resp = self.non_admin_users_client.list_user_ec2_credentials(
self.creds.user_id)["credentials"]
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 6d6baca..13b5161 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -133,6 +133,13 @@
'Security compliance not available.')
@decorators.idempotent_id('a7ad8bbf-2cff-4520-8c1d-96332e151658')
def test_user_account_lockout(self):
+ if (CONF.identity.user_lockout_failure_attempts <= 0 or
+ CONF.identity.user_lockout_duration <= 0):
+ raise self.skipException(
+ "Both CONF.identity.user_lockout_failure_attempts and "
+ "CONF.identity.user_lockout_duration should be greater than "
+ "zero to test this feature")
+
password = self.creds.password
# First, we login using the correct credentials
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index aa57daf..c938cee 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -185,7 +185,7 @@
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
- LOG.debug("Creating an image"
+ LOG.debug("Creating an image "
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index 0208780..e19d8c8 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -33,8 +33,8 @@
self.assertIn(image_id, self._list_image_ids_as_alt())
body = self.image_member_client.list_image_members(image_id)
members = body['members']
- member = members[0]
self.assertEqual(len(members), 1, str(members))
+ member = members[0]
self.assertEqual(member['member_id'], self.alt_tenant_id)
self.assertEqual(member['image_id'], image_id)
self.assertEqual(member['status'], 'accepted')
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 5068fc4..eaf477c 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -15,7 +15,9 @@
from tempest.api.network import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
class AgentManagementTestJSON(base.BaseAdminNetworkTest):
@@ -46,11 +48,6 @@
agent.pop('configurations', None)
self.assertIn(self.agent, agents)
- @decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
- def test_list_agents_non_admin(self):
- body = self.agents_client.list_agents()
- self.assertEmpty(body["agents"])
-
@decorators.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
def test_show_agent(self):
body = self.admin_agents_client.show_agent(self.agent['id'])
@@ -86,3 +83,11 @@
origin_agent = {'description': description}
self.admin_agents_client.update_agent(agent_id=self.agent['id'],
agent=origin_agent)
+
+ @decorators.idempotent_id('b33af888-b6ac-4e68-a0ca-0444c2696cf9')
+ @decorators.attr(type=['negative'])
+ def test_delete_agent_negative(self):
+ non_existent_id = data_utils.rand_uuid()
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.admin_agents_client.delete_agent, non_existent_id)
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index e79f8c3..a075b51 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -14,7 +14,9 @@
# under the License.
from tempest.api.network import base
+from tempest.common import identity
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -30,7 +32,6 @@
quota_driver = neutron.db.quota_db.DbQuotaDriver
"""
- force_tenant_isolation = True
@classmethod
def skip_checks(cls):
@@ -39,27 +40,36 @@
msg = "quotas extension not enabled."
raise cls.skipException(msg)
+ def setUp(self):
+ super(QuotasNegativeTest, self).setUp()
+ name = data_utils.rand_name('test_project_')
+ description = data_utils.rand_name('desc_')
+ self.project = identity.identity_utils(self.os_admin).create_project(
+ name=name, description=description)
+ self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
+ self.project['id'])
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
def test_network_quota_exceeding(self):
# Set the network quota to two
- self.admin_quotas_client.update_quotas(self.networks_client.tenant_id,
- network=2)
- self.addCleanup(self.admin_quotas_client.reset_quotas,
- self.networks_client.tenant_id)
+ self.admin_quotas_client.update_quotas(self.project['id'], network=2)
# Create two networks
- n1 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n1 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(self.admin_networks_client.delete_network,
n1['network']['id'])
- n2 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n2 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(self.admin_networks_client.delete_network,
n2['network']['id'])
# Try to create a third network while the quota is two
with self.assertRaisesRegex(
lib_exc.Conflict,
r"Quota exceeded for resources: \['network'\].*"):
- n3 = self.networks_client.create_network()
- self.addCleanup(self.networks_client.delete_network,
+ n3 = self.admin_networks_client.create_network(
+ tenant_id=self.project['id'])
+ self.addCleanup(self.admin_networks_client.delete_network,
n3['network']['id'])
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index a7355f3..6ce86fb 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -111,7 +111,8 @@
def _verify_gateway_port(self, router_id):
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
- device_id=router_id)
+ device_id=router_id,
+ device_owner="network:router_gateway")
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
diff --git a/tempest/api/network/test_agent_management_negative.py b/tempest/api/network/test_agent_management_negative.py
new file mode 100644
index 0000000..d1c02ce
--- /dev/null
+++ b/tempest/api/network/test_agent_management_negative.py
@@ -0,0 +1,28 @@
+# Copyright 2018 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.lib import decorators
+
+
+class AgentManagementNegativeTest(base.BaseNetworkTest):
+
+ @decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
+ @decorators.attr(type=['negative'])
+ def test_list_agents_non_admin(self):
+ """Validate that non-admin user cannot list agents."""
+ # Listing agents requires admin_only permissions.
+ body = self.agents_client.list_agents()
+ self.assertEmpty(body["agents"])
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index 0730d58..3ab2909 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -135,7 +135,7 @@
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
- ('Real port IP %s shall be equal to EUI-64 %s'
+ ('Real port IP %s shall be equal to EUI-64 %s '
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
real_ip, eui_ip,
ra_mode if ra_mode else "Off",
@@ -167,7 +167,7 @@
self._clean_network()
self.assertNotEqual(eui_ip, real_ip,
('Real port IP %s equal to EUI-64 %s when '
- 'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
+ 'ipv6_ra_mode=Off and ipv6_address_mode=Off, '
'but shall be taken from fixed IPs') % (
real_ip, eui_ip))
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 8b03631..be3cf65 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -27,22 +27,6 @@
class RoutersTest(base.BaseNetworkTest):
- def _cleanup_router(self, router):
- self.delete_router(router)
-
- def _create_router(self, name=None, admin_state_up=False,
- external_network_id=None, enable_snat=None):
- # associate a cleanup with created routers to avoid quota limits
- router = self.create_router(name, admin_state_up,
- external_network_id, enable_snat)
- self.addCleanup(self._cleanup_router, router)
- return router
-
- def _create_subnet(self, network, gateway='', cidr=None):
- subnet = self.create_subnet(network, gateway, cidr)
- self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
- return subnet
-
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
interface = self.routers_client.add_router_interface(
router_id, subnet_id=subnet_id)
@@ -70,10 +54,11 @@
def test_create_show_list_update_delete_router(self):
# Create a router
router_name = data_utils.rand_name(self.__class__.__name__ + '-router')
- router = self._create_router(
- name=router_name,
+ router = self.create_router(
+ router_name,
admin_state_up=False,
external_network_id=CONF.network.public_network_id)
+ self.addCleanup(self.delete_router, router)
self.assertEqual(router['name'], router_name)
self.assertEqual(router['admin_state_up'], False)
self.assertEqual(
@@ -106,8 +91,10 @@
name=network_name)['network']
self.addCleanup(self.networks_client.delete_network,
network['id'])
- subnet = self._create_subnet(network)
- router = self._create_router()
+ subnet = self.create_subnet(network)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
# Add router interface with subnet id
interface = self.routers_client.add_router_interface(
router['id'], subnet_id=subnet['id'])
@@ -129,8 +116,10 @@
name=network_name)['network']
self.addCleanup(self.networks_client.delete_network,
network['id'])
- self._create_subnet(network)
- router = self._create_router()
+ subnet = self.create_subnet(network)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
port_body = self.ports_client.create_port(
network_id=network['id'])
# add router interface to port created above
@@ -188,7 +177,8 @@
test_routes = []
routes_num = 4
# Create a router
- router = self._create_router(admin_state_up=True)
+ router = self.create_router(admin_state_up=True)
+ self.addCleanup(self.delete_router, router)
self.addCleanup(
self._delete_extra_routes,
router['id'])
@@ -201,6 +191,7 @@
self.addCleanup(self.networks_client.delete_network,
network['id'])
subnet = self.create_subnet(network, cidr=next_cidr)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
next_cidr = next_cidr.next()
# Add router interface with subnet id
@@ -247,7 +238,8 @@
@decorators.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
def test_update_router_admin_state(self):
- router = self._create_router()
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
self.assertFalse(router['admin_state_up'])
# Update router admin state
update_body = self.routers_client.update_router(router['id'],
@@ -264,14 +256,18 @@
name=network_name)['network']
self.addCleanup(self.networks_client.delete_network,
network01['id'])
+ network_name = data_utils.rand_name(self.__class__.__name__)
network02 = self.networks_client.create_network(
- name=data_utils.rand_name(self.__class__.__name__))['network']
+ name=network_name)['network']
self.addCleanup(self.networks_client.delete_network,
network02['id'])
- subnet01 = self._create_subnet(network01)
+ subnet01 = self.create_subnet(network01)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet01['id'])
sub02_cidr = self.cidr.next()
- subnet02 = self._create_subnet(network02, cidr=sub02_cidr)
- router = self._create_router()
+ subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet02['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
@@ -288,8 +284,10 @@
name=network_name)['network']
self.addCleanup(self.networks_client.delete_network,
network['id'])
- subnet = self._create_subnet(network)
- router = self._create_router()
+ subnet = self.create_subnet(network)
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+ router = self.create_router()
+ self.addCleanup(self.delete_router, router)
fixed_ip = [{'subnet_id': subnet['id']}]
interface = self._add_router_interface_with_subnet_id(router['id'],
subnet['id'])
diff --git a/tempest/api/network/test_versions.py b/tempest/api/network/test_versions.py
index 2f01e50..020cb5c 100644
--- a/tempest/api/network/test_versions.py
+++ b/tempest/api/network/test_versions.py
@@ -29,7 +29,7 @@
"""
result = self.network_versions_client.list_versions()
- expected_versions = ('v2.0')
+ expected_versions = ('v2.0',)
expected_resources = ('id', 'links', 'status')
received_list = result.values()
@@ -38,3 +38,14 @@
for resource in expected_resources:
self.assertIn(resource, version)
self.assertIn(version['id'], expected_versions)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('e64b7216-3178-4263-967c-d389290988bf')
+ def test_show_api_v2_details(self):
+ """Test that GET /v2.0/ returns expected resources."""
+ current_version = 'v2.0'
+ expected_resources = ('subnet', 'network', 'port')
+ result = self.network_versions_client.show_version(current_version)
+ actual_resources = [r['name'] for r in result['resources']]
+ for resource in expected_resources:
+ self.assertIn(resource, actual_resources)
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index 765bc6d..e9ca0b1 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -38,9 +38,9 @@
def test_read_object_with_rights(self):
# attempt to read object using authorized user
# update X-Container-Read metadata ACL
- tenant_name = self.os_roles_operator_alt.credentials.tenant_name
- username = self.os_roles_operator_alt.credentials.username
- cont_headers = {'X-Container-Read': tenant_name + ':' + username}
+ tenant_id = self.os_roles_operator_alt.credentials.tenant_id
+ user_id = self.os_roles_operator_alt.credentials.user_id
+ cont_headers = {'X-Container-Read': tenant_id + ':' + user_id}
container_client = self.os_roles_operator.container_client
resp_meta, _ = (
container_client.create_update_or_delete_container_metadata(
@@ -66,9 +66,9 @@
def test_write_object_with_rights(self):
# attempt to write object using authorized user
# update X-Container-Write metadata ACL
- tenant_name = self.os_roles_operator_alt.credentials.tenant_name
- username = self.os_roles_operator_alt.credentials.username
- cont_headers = {'X-Container-Write': tenant_name + ':' + username}
+ tenant_id = self.os_roles_operator_alt.credentials.tenant_id
+ user_id = self.os_roles_operator_alt.credentials.user_id
+ cont_headers = {'X-Container-Write': tenant_id + ':' + user_id}
container_client = self.os_roles_operator.container_client
resp_meta, _ = (
container_client.create_update_or_delete_container_metadata(
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index 982c4a1..fcd9a7c 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -31,9 +31,10 @@
Quotas are set by adding meta values to the container,
and are validated when set:
- - X-Container-Meta-Quota-Bytes:
+
+ - X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- - X-Container-Meta-Quota-Count:
+ - X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 607fc43..affed6b 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -72,8 +72,8 @@
]
# Returns a tuple of VOLUME_STATS values
- expected_list = list(map(operator.itemgetter(*VOLUME_STATS),
- cinder_pools))
- observed_list = list(map(operator.itemgetter(*VOLUME_STATS),
- capabilities))
+ expected_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
+ cinder_pools)))
+ observed_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
+ capabilities)))
self.assertEqual(expected_list, observed_list)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 731a055..f695f51 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -215,6 +215,7 @@
max_microversion = 'latest'
@decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
+ @decorators.skip_because(bug='1770179')
def test_reset_group_snapshot_status(self):
# Create volume type
volume_type = self.create_volume_type()
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 915aeec..5c7ab15 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -45,13 +45,6 @@
cls.addClassResourceCleanup(cls.admin_quotas_client.update_quota_set,
cls.demo_tenant_id, **cleanup_quota_set)
- cls.shared_quota_set = {'gigabytes': 2 * CONF.volume.volume_size,
- 'volumes': 1}
-
- cls.admin_quotas_client.update_quota_set(
- cls.demo_tenant_id,
- **cls.shared_quota_set)
-
# NOTE(gfidente): no need to delete in tearDown as
# they are created using utility wrapper methods.
cls.volume = cls.create_volume()
@@ -59,6 +52,8 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('bf544854-d62a-47f2-a681-90f7a47d86b6')
def test_quota_volumes(self):
+ self.admin_quotas_client.update_quota_set(self.demo_tenant_id,
+ volumes=1, gigabytes=-1)
self.assertRaises(lib_exc.OverLimit,
self.volumes_client.create_volume,
size=CONF.volume.volume_size)
@@ -66,17 +61,18 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('2dc27eee-8659-4298-b900-169d71a91374')
def test_quota_volume_gigabytes(self):
- # NOTE(gfidente): quota set needs to be changed for this test
- # or we may be limited by the volumes or snaps quota number, not by
- # actual gigs usage; next line ensures shared set is restored.
- self.addCleanup(self.admin_quotas_client.update_quota_set,
- self.demo_tenant_id,
- **self.shared_quota_set)
- new_quota_set = {'gigabytes': CONF.volume.volume_size,
- 'volumes': 2, 'snapshots': 1}
self.admin_quotas_client.update_quota_set(
- self.demo_tenant_id,
- **new_quota_set)
+ self.demo_tenant_id, gigabytes=CONF.volume.volume_size, volumes=-1)
self.assertRaises(lib_exc.OverLimit,
self.volumes_client.create_volume,
- size=CONF.volume.volume_size)
+ size=CONF.volume.volume_size * 2)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('d321dc21-d8c6-401f-95fe-49f4845f1a6d')
+ def test_volume_extend_gigabytes_quota_deviation(self):
+ self.admin_quotas_client.update_quota_set(
+ self.demo_tenant_id, gigabytes=CONF.volume.volume_size)
+ self.assertRaises(lib_exc.OverLimit,
+ self.volumes_client.extend_volume,
+ self.volume['id'],
+ new_size=CONF.volume.volume_size * 2)
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 1077524..9e24176 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -193,8 +193,13 @@
'is_public': is_public}
updated_vol_type = self.admin_volume_types_client.update_volume_type(
volume_type['id'], **kwargs)['volume_type']
-
- # Verify volume type details were updated
self.assertEqual(name, updated_vol_type['name'])
self.assertEqual(description, updated_vol_type['description'])
self.assertEqual(is_public, updated_vol_type['is_public'])
+
+ # Verify volume type details were updated
+ fetched_volume_type = self.admin_volume_types_client.show_volume_type(
+ volume_type['id'])['volume_type']
+ self.assertEqual(name, fetched_volume_type['name'])
+ self.assertEqual(description, fetched_volume_type['description'])
+ self.assertEqual(is_public, fetched_volume_type['is_public'])
diff --git a/tempest/clients.py b/tempest/clients.py
index 204ce08..e5d5be1 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -82,10 +82,8 @@
self.schemas_client = self.image_v2.SchemasClient()
self.namespace_properties_client = \
self.image_v2.NamespacePropertiesClient()
- self.namespace_tags_client = \
- self.image_v2.NamespaceTagsClient()
- self.image_versions_client = \
- self.image_v2.VersionsClient()
+ self.namespace_tags_client = self.image_v2.NamespaceTagsClient()
+ self.image_versions_client = self.image_v2.VersionsClient()
def _set_compute_clients(self):
self.agents_client = self.compute.AgentsClient()
@@ -284,8 +282,7 @@
self.volume_v3.QuotaClassesClient()
self.volume_scheduler_stats_v2_client = \
self.volume_v3.SchedulerStatsClient()
- self.volume_transfers_v2_client = \
- self.volume_v3.TransfersClient()
+ self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
self.volume_v2_availability_zone_client = \
self.volume_v3.AvailabilityZoneClient()
self.volume_v2_limits_client = self.volume_v3.LimitsClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ce91825..7ea0099 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -162,7 +162,6 @@
if CONF.service_available.swift:
spec.append([CONF.object_storage.operator_role])
spec.append([CONF.object_storage.reseller_admin_role])
- spec.append([CONF.object_storage.operator_role])
if admin:
spec.append('admin')
resources = []
@@ -195,7 +194,6 @@
if test_resource.network:
account['resources'] = {}
- if test_resource.network:
account['resources']['network'] = test_resource.network['name']
accounts.append(account)
if os.path.exists(account_file):
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 29abd49..2f54f9a 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -279,7 +279,7 @@
self.admin_id,
self.admin_role_id)
except Exception as ex:
- LOG.exception("Failed removing role from project which still"
+ LOG.exception("Failed removing role from project which still "
"exists, exception: %s", ex)
def _project_exists(self, project_id):
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 83cf42c..1a08246 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -720,11 +720,11 @@
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
- self.client = manager.compute_images_client
+ self.client = manager.image_client_v2
def list(self):
client = self.client
- images = client.list_images({"all_tenants": True})['images']
+ images = client.list_images(params={"all_tenants": True})['images']
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index 84c6d9a..3e84b82 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -243,8 +243,8 @@
parser.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
'this file contains a separate test '
- 'on each newline. This command'
- 'supports files created by the tempest'
+ 'on each newline. This command '
+ 'supports files created by the tempest '
'run ``--list-tests`` command')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 6c2fee8..d25d3ca 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -365,11 +365,11 @@
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
- else:
- if cfgname == 'identity':
- # Keystone is a required service for tempest
- continue
- if catalog_type not in services:
+ if cfgname == 'identity':
+ # Keystone is a required service for tempest
+ continue
+ if catalog_type not in services:
+ try:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
@@ -377,7 +377,13 @@
if update:
change_option(codename_match[cfgname],
'service_available', False)
- else:
+ except KeyError:
+ print('%s is a third party plugin, cannot be verified '
+ 'automatically, but it is suggested that it is set to '
+ 'False because %s service is not available ' % (
+ cfgname, catalog_type))
+ else:
+ try:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
@@ -391,6 +397,11 @@
avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
+ except KeyError:
+ print('%s is a third party plugin, cannot be verified '
+ 'automatically, but it is suggested that it is set to '
+ 'True because %s service is available ' % (
+ cfgname, catalog_type))
return avail_services
diff --git a/tempest/cmd/workspace.py b/tempest/cmd/workspace.py
index 929a584..d0c4b28 100644
--- a/tempest/cmd/workspace.py
+++ b/tempest/cmd/workspace.py
@@ -86,6 +86,7 @@
def rename_workspace(self, old_name, new_name):
self._populate()
self._name_exists(old_name)
+ self._invalid_name_check(new_name)
self._workspace_name_exists(new_name)
self.workspaces[new_name] = self.workspaces.pop(old_name)
self._write_file()
@@ -93,7 +94,7 @@
@lockutils.synchronized('workspaces', external=True)
def move_workspace(self, name, path):
self._populate()
- path = os.path.abspath(os.path.expanduser(path))
+ path = os.path.abspath(os.path.expanduser(path)) if path else path
self._name_exists(name)
self._validate_path(path)
self.workspaces[name] = path
@@ -114,6 +115,7 @@
@lockutils.synchronized('workspaces', external=True)
def remove_workspace_directory(self, workspace_path):
+ self._validate_path(workspace_path)
shutil.rmtree(workspace_path)
@lockutils.synchronized('workspaces', external=True)
@@ -128,7 +130,17 @@
name))
sys.exit(1)
+ def _invalid_name_check(self, name):
+ if not name:
+ print("None or empty name is specified."
+ " Please specify correct name for workspace.")
+ sys.exit(1)
+
def _validate_path(self, path):
+ if not path:
+ print("None or empty path is specified for workspace."
+ " Please specify correct workspace path.")
+ sys.exit(1)
if not os.path.exists(path):
print("Path does not exist.")
sys.exit(1)
@@ -137,10 +149,11 @@
def register_new_workspace(self, name, path, init=False):
"""Adds the new workspace and writes out the new workspace config"""
self._populate()
- path = os.path.abspath(os.path.expanduser(path))
+ path = os.path.abspath(os.path.expanduser(path)) if path else path
# This only happens when register is called from outside of init
if not init:
self._validate_path(path)
+ self._invalid_name_check(name)
self._workspace_name_exists(name)
self.workspaces[name] = path
self._write_file()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index f2730b3..1489e60 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -44,15 +44,14 @@
def is_scheduler_filter_enabled(filter_name):
"""Check the list of enabled compute scheduler filters from config.
- This function checks whether the given compute scheduler filter is
- available and configured in the config file. If the
- scheduler_available_filters option is set to 'all' (Default value. which
- means default filters are configured in nova) in tempest.conf then, this
- function returns True with assumption that requested filter 'filter_name'
- is one of available filter in nova ("nova.scheduler.filters.all_filters").
+ This function checks whether the given compute scheduler filter is enabled
+ in the nova config file. If the scheduler_enabled_filters option is set to
+ 'all' in tempest.conf then, this function returns True with assumption that
+ requested filter 'filter_name' is one of the enabled filters in nova
+ ("nova.scheduler.filters.all_filters").
"""
- filters = CONF.compute_feature_enabled.scheduler_available_filters
+ filters = CONF.compute_feature_enabled.scheduler_enabled_filters
if not filters:
return False
if 'all' in filters:
@@ -79,23 +78,22 @@
:param wait_until: Server status to wait for the server to reach after
its creation.
:param volume_backed: Whether the server is volume backed or not.
- If this is true, a volume will be created and
- create server will be requested with
- 'block_device_mapping_v2' populated with below
- values:
- --------------------------------------------
- bd_map_v2 = [{
- 'uuid': volume['volume']['id'],
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': True}]
- kwargs['block_device_mapping_v2'] = bd_map_v2
- ---------------------------------------------
- If server needs to be booted from volume with other
- combination of bdm inputs than mentioned above, then
- pass the bdm inputs explicitly as kwargs and image_id
- as empty string ('').
+ If this is true, a volume will be created and create server will be
+ requested with 'block_device_mapping_v2' populated with below values:
+
+ .. code-block:: python
+
+ bd_map_v2 = [{
+ 'uuid': volume['volume']['id'],
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': True}]
+ kwargs['block_device_mapping_v2'] = bd_map_v2
+
+ If server needs to be booted from volume with other combination of bdm
+ inputs than mentioned above, then pass the bdm inputs explicitly as
+ kwargs and image_id as empty string ('').
:param name: Name of the server to be provisioned. If not defined a random
string ending with '-instance' will be generated.
:param flavor: Flavor of the server to be provisioned. If not defined,
@@ -170,10 +168,19 @@
'imageRef': image_id,
'size': CONF.volume.volume_size}
volume = volumes_client.create_volume(**params)
- waiters.wait_for_volume_resource_status(volumes_client,
- volume['volume']['id'],
- 'available')
-
+ try:
+ waiters.wait_for_volume_resource_status(volumes_client,
+ volume['volume']['id'],
+ 'available')
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ try:
+ volumes_client.delete_volume(volume['volume']['id'])
+ volumes_client.wait_for_resource_deletion(
+ volume['volume']['id'])
+ except Exception as exc:
+ LOG.exception("Deleting volume %s failed, exception %s",
+ volume['volume']['id'], exc)
bd_map_v2 = [{
'uuid': volume['volume']['id'],
'source_type': 'volume',
diff --git a/tempest/common/identity.py b/tempest/common/identity.py
index eaf651b..525110b 100644
--- a/tempest/common/identity.py
+++ b/tempest/common/identity.py
@@ -64,7 +64,8 @@
should not be used for testing identity features.
:param clients: a client manager.
- :return
+ :return: v2 or v3 of CredsClient
+ :rtype: V2CredsClient or V3CredsClient
"""
if CONF.identity.auth_version == 'v2':
client = clients.identity_client
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 52ccfa9..49d9742 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -98,6 +98,7 @@
def get_nic_name_by_ip(self, address):
cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
nic = self.exec_command(cmd)
+ LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
return nic.strip().strip(":").split('@')[0].lower()
def get_dns_servers(self):
diff --git a/tempest/config.py b/tempest/config.py
index 810e7f4..e431754 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -101,7 +101,7 @@
deprecated_group='identity'),
cfg.StrOpt('admin_domain_name',
default='Default',
- help="Admin domain name for authentication (Keystone V3)."
+ help="Admin domain name for authentication (Keystone V3). "
"The same domain applies to user and project",
deprecated_group='identity'),
]
@@ -170,15 +170,35 @@
cfg.IntOpt('user_lockout_failure_attempts',
default=2,
help="The number of unsuccessful login attempts the user is "
- "allowed before having the account locked."),
+ "allowed before having the account locked. This only "
+ "takes effect when identity-feature-enabled."
+ "security_compliance is set to 'True'. For more details, "
+ "refer to keystone config options keystone.conf:"
+ "security_compliance.lockout_failure_attempts. "
+ "This feature is disabled by default in keystone."),
cfg.IntOpt('user_lockout_duration',
default=5,
help="The number of seconds a user account will remain "
- "locked."),
+ "locked. This only takes "
+ "effect when identity-feature-enabled.security_compliance "
+ "is set to 'True'. For more details, refer to "
+ "keystone config options "
+ "keystone.conf:security_compliance.lockout_duration. "
+ "Setting this option will have no effect unless you also "
+ "set identity.user_lockout_failure_attempts."),
cfg.IntOpt('user_unique_last_password_count',
default=2,
help="The number of passwords for a user that must be unique "
- "before an old password can be reused."),
+ "before an old password can be reused. This only takes "
+ "effect when identity-feature-enabled.security_compliance "
+ "is set to 'True'. "
+ "This config option corresponds to keystone.conf: "
+ "security_compliance.unique_last_password_count, whose "
+ "default value is 0 meaning disabling this feature. "
+ "NOTE: This config option value must be same as "
+ "keystone.conf: security_compliance.unique_last_password_"
+ "count otherwise test might fail"
+ ),
]
service_clients_group = cfg.OptGroup(name='service-clients',
@@ -242,7 +262,13 @@
cfg.BoolOpt('application_credentials',
default=False,
help='Does the environment have application credentials '
- 'enabled?')
+ 'enabled?'),
+ cfg.BoolOpt('immutable_user_source',
+ default=False,
+ help='Set to True if the environment has a read-only '
+ 'user source. This will skip all tests that attempt to '
+ 'create, delete, or modify users. This should not be set '
+ 'to True if using dynamic credentials')
]
compute_group = cfg.OptGroup(name='compute',
@@ -339,6 +365,38 @@
"with format 'X.Y' or string 'latest'"),
]
+placement_group = cfg.OptGroup(name='placement',
+ title='Placement Service Options')
+
+PlacementGroup = [
+ cfg.StrOpt('endpoint_type',
+ default='public',
+ choices=['public', 'admin', 'internal'],
+ help="The endpoint type to use for the placement service."),
+ cfg.StrOpt('catalog_type',
+ default='placement',
+ help="Catalog type of the Placement service."),
+ cfg.StrOpt('region',
+ default='RegionOne',
+ help="The placement region name to use. If empty, the value "
+ "of [identity]/region is used instead. If no such region "
+ "is found in the service catalog, the first region found "
+ "is used."),
+ cfg.StrOpt('min_microversion',
+ default=None,
+ help="Lower version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+ cfg.StrOpt('max_microversion',
+ default=None,
+ help="Upper version of the test target microversion range. "
+ "The format is 'X.Y', where 'X' and 'Y' are int values. "
+ "Valid values are string with format 'X.Y' or string "
+ "'latest'"),
+]
+
+
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
@@ -463,20 +521,30 @@
cfg.BoolOpt('config_drive',
default=True,
help='Enable special configuration drive with metadata.'),
- cfg.ListOpt('scheduler_available_filters',
- default=['all'],
- help="A list of enabled filters that nova will accept as hints"
- " to the scheduler when creating a server. A special "
- "entry 'all' indicates all filters that are included "
- "with nova are enabled. Empty list indicates all filters "
- "are disabled. The full list of available filters is in "
- "nova.conf: filter_scheduler.enabled_filters. If the "
+ cfg.ListOpt('scheduler_enabled_filters',
+ default=["RetryFilter", "AvailabilityZoneFilter",
+ "ComputeFilter", "ComputeCapabilitiesFilter",
+ "ImagePropertiesFilter",
+ "ServerGroupAntiAffinityFilter",
+ "ServerGroupAffinityFilter"],
+ help="A list of enabled filters that Nova will accept as "
+ "hints to the scheduler when creating a server. If the "
"default value is overridden in nova.conf by the test "
"environment (which means that a different set of "
"filters is enabled than what is included in Nova by "
- "default) then, this option must be configured to "
+ "default), then this option must be configured to "
"contain the same filters that Nova uses in the test "
- "environment."),
+ "environment. A special entry 'all' indicates all "
+ "filters that are included with Nova are enabled. If "
+ "using 'all', be sure to enable all filters in "
+ "nova.conf, as tests can fail in unpredictable ways if "
+ "Nova's and Tempest's enabled filters don't match. "
+ "Empty list indicates all filters are disabled. The "
+ "full list of enabled filters is in nova.conf: "
+ "filter_scheduler.enabled_filters.",
+ deprecated_opts=[cfg.DeprecatedOpt(
+ 'scheduler_available_filters',
+ group='compute-feature-enabled')]),
cfg.BoolOpt('swap_volume',
default=False,
help='Does the test environment support in-place swapping of '
@@ -547,7 +615,7 @@
'test v2 APIs only so this config option '
'will be removed.'),
cfg.BoolOpt('api_v1',
- default=True,
+ default=False,
help="Is the v1 image API enabled",
deprecated_for_removal=True,
deprecated_reason='Glance v1 APIs are deprecated and v2 APIs '
@@ -672,9 +740,11 @@
ValidationGroup = [
cfg.BoolOpt('run_validation',
- default=False,
+ default=True,
help='Enable ssh on created servers and creation of additional'
- ' validation resources to enable remote access'),
+ ' validation resources to enable remote access.'
+ ' In case the guest does not support ssh set it'
+ ' to false'),
cfg.BoolOpt('security_group',
default=True,
help='Enable/disable security groups.'),
@@ -1058,6 +1128,7 @@
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
(debug_group, DebugGroup),
+ (placement_group, PlacementGroup),
(None, DefaultGroup)
]
diff --git a/tempest/lib/api_schema/response/volume/qos.py b/tempest/lib/api_schema/response/volume/qos.py
new file mode 100644
index 0000000..d1b3910
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/qos.py
@@ -0,0 +1,123 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+show_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'consumer': {'type': 'string'},
+ 'specs': {'type': ['object', 'null']},
+ },
+ 'additionalProperties': False,
+ 'required': ['name', 'id', 'specs']
+ },
+ 'links': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string',
+ 'format': 'uri'},
+ 'rel': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ 'required': ['href', 'rel']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs', 'links']
+ }
+}
+
+delete_qos = {'status_code': [202]}
+
+list_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'specs': {
+ 'type': 'object',
+ 'patternProperties': {'^.+$': {'type': 'string'}}
+ },
+ 'consumer': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['specs', 'id', 'name']
+ }
+ }
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs']
+ }
+}
+
+set_qos_key = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_specs': {
+ 'type': 'object',
+ 'patternProperties': {'^.+$': {'type': 'string'}}
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_specs']
+ }
+}
+
+unset_qos_key = {'status_code': [202]}
+associate_qos = {'status_code': [202]}
+
+show_association_qos = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'qos_associations': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'association_type': {'type': 'string'},
+ 'id': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False,
+ 'required': ['association_type', 'id', 'name']
+ }
+ },
+ },
+ 'additionalProperties': False,
+ 'required': ['qos_associations']
+ }
+}
+
+disassociate_qos = {'status_code': [202]}
+disassociate_all_qos = {'status_code': [202]}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 2dd9d00..8e6d3d5 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -324,7 +324,7 @@
pass
if expiry is None:
raise ValueError(
- "time data '{data}' does not match any of the"
+ "time data '{data}' does not match any of the "
"expected formats: {formats}".format(
data=expiry_string, formats=self.EXPIRY_DATE_FORMATS))
return expiry
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 82fcd0b..ac40eef 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -233,8 +233,8 @@
if self._is_test_case(module, node))
for node in test_cases:
for subnode in filter(self._is_test_method, node.body):
- test_name = '%s.%s' % (node.name, subnode.name)
- tests[module_name]['tests'][test_name] = subnode
+ test_name = '%s.%s' % (node.name, subnode.name)
+ tests[module_name]['tests'][test_name] = subnode
return tests
@staticmethod
diff --git a/tempest/lib/common/fixed_network.py b/tempest/lib/common/fixed_network.py
index 875a79d..926c3a4 100644
--- a/tempest/lib/common/fixed_network.py
+++ b/tempest/lib/common/fixed_network.py
@@ -24,7 +24,7 @@
"""Get a full network dict from just a network name
:param str name: the name of the network to use
- :param NetworksClient compute_networks_client: The network client
+ :param network.NetworksClient compute_networks_client: The network client
object to use for making the network lists api request
:return: The full dictionary for the network in question
:rtype: dict
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index 738c37f..8c1a802 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -19,7 +19,8 @@
class ClosingProxyHttp(urllib3.ProxyManager):
def __init__(self, proxy_url, disable_ssl_certificate_validation=False,
- ca_certs=None, timeout=None):
+ ca_certs=None, timeout=None, follow_redirects=True):
+ self.follow_redirects = follow_redirects
kwargs = {}
if disable_ssl_certificate_validation:
@@ -50,9 +51,14 @@
new_headers = dict(original_headers, connection='close')
new_kwargs = dict(kwargs, headers=new_headers)
- # Follow up to 5 redirections. Don't raise an exception if
- # it's exceeded but return the HTTP 3XX response instead.
- retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ if self.follow_redirects:
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ else:
+ # Do not follow redirections. Don't raise an exception if
+ # a redirect is found, but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(redirect=False)
r = super(ClosingProxyHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
return Response(r), r.data
@@ -60,7 +66,8 @@
class ClosingHttp(urllib3.poolmanager.PoolManager):
def __init__(self, disable_ssl_certificate_validation=False,
- ca_certs=None, timeout=None):
+ ca_certs=None, timeout=None, follow_redirects=True):
+ self.follow_redirects = follow_redirects
kwargs = {}
if disable_ssl_certificate_validation:
@@ -93,9 +100,14 @@
new_headers = dict(original_headers, connection='close')
new_kwargs = dict(kwargs, headers=new_headers)
- # Follow up to 5 redirections. Don't raise an exception if
- # it's exceeded but return the HTTP 3XX response instead.
- retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ if self.follow_redirects:
+ # Follow up to 5 redirections. Don't raise an exception if
+ # it's exceeded but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(raise_on_redirect=False, redirect=5)
+ else:
+ # Do not follow redirections. Don't raise an exception if
+ # a redirect is found, but return the HTTP 3XX response instead.
+ retry = urllib3.util.Retry(redirect=False)
r = super(ClosingHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
return Response(r), r.data
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 9a35b76..bbf5e89 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -12,9 +12,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import base64
-
import jsonschema
+from oslo_serialization import base64
from oslo_utils import timeutils
import six
@@ -46,9 +45,7 @@
try:
if isinstance(instance, six.text_type):
instance = instance.encode('utf-8')
- base64.decodestring(instance)
- except base64.binascii.Error:
- return False
+ base64.decode_as_bytes(instance)
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index e2fd722..e612bd1 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -70,6 +70,7 @@
:param str http_timeout: Timeout in seconds to wait for the http request to
return
:param str proxy_url: http proxy url to use.
+ :param bool follow_redirects: Set to false to stop following redirects.
"""
# The version of the API this client implements
@@ -82,7 +83,7 @@
build_interval=1, build_timeout=60,
disable_ssl_certificate_validation=False, ca_certs=None,
trace_requests='', name=None, http_timeout=None,
- proxy_url=None):
+ proxy_url=None, follow_redirects=True):
self.auth_provider = auth_provider
self.service = service
self.region = region
@@ -107,11 +108,11 @@
self.http_obj = http.ClosingProxyHttp(
proxy_url,
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+ timeout=http_timeout, follow_redirects=follow_redirects)
else:
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
- timeout=http_timeout)
+ timeout=http_timeout, follow_redirects=follow_redirects)
def get_headers(self, accept_type=None, send_type=None):
"""Return the default headers which will be used with outgoing requests
@@ -373,7 +374,7 @@
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
- """Get the versions on a endpoint from the keystone catalog
+ """Get the versions on an endpoint from the keystone catalog
This method will make a GET request on the baseurl from the keystone
catalog to return a list of API versions. It is expected that a GET
@@ -525,7 +526,7 @@
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
- raise exceptions.ResponseWithEntity()
+ raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index c5df590..3483c51 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -50,8 +50,7 @@
(e.g. 'prefixfoo-namebar-154876201')
:rtype: string
"""
- randbits = str(random.randint(1, 0x7fffffff))
- rand_name = randbits
+ rand_name = str(random.randint(1, 0x7fffffff))
if name:
rand_name = name + '-' + rand_name
if prefix:
@@ -65,9 +64,9 @@
:param int length: The length of password that you expect to set
(If it's smaller than 3, it's same as 3.)
:return: a random password. The format is
- '<random upper letter>-<random number>-<random special character>
- -<random ascii letters or digit characters or special symbols>'
- (e.g. 'G2*ac8&lKFFgh%2')
+ ``'<random upper letter>-<random number>-<random special character>
+ -<random ascii letters or digit characters or special symbols>'``
+ (e.g. ``G2*ac8&lKFFgh%2``)
:rtype: string
"""
upper = random.choice(string.ascii_uppercase)
diff --git a/tempest/lib/common/utils/misc.py b/tempest/lib/common/utils/misc.py
index f13b4c8..2b0fcd5 100644
--- a/tempest/lib/common/utils/misc.py
+++ b/tempest/lib/common/utils/misc.py
@@ -14,8 +14,6 @@
# under the License.
from oslo_log import log as logging
-from tempest.lib.common.utils import test_utils
-
LOG = logging.getLogger(__name__)
@@ -28,10 +26,3 @@
instances[cls] = cls()
return instances[cls]
return getinstance
-
-
-def find_test_caller(*args, **kwargs):
- LOG.warning("tempest.lib.common.utils.misc.find_test_caller is deprecated "
- "in favor of tempest.lib.common.utils.test_utils."
- "find_test_caller")
- test_utils.find_test_caller(*args, **kwargs)
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 833cfd6..90debd9 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -18,7 +18,6 @@
import importlib
import inspect
import sys
-import warnings
from debtcollector import removals
from oslo_log import log as logging
@@ -32,9 +31,9 @@
from tempest.lib.services import image
from tempest.lib.services import network
from tempest.lib.services import object_storage
+from tempest.lib.services import placement
from tempest.lib.services import volume
-warnings.simplefilter("once")
LOG = logging.getLogger(__name__)
@@ -46,6 +45,7 @@
"""
return {
'compute': compute,
+ 'placement': placement,
'identity.v2': identity.v2,
'identity.v3': identity.v3,
'image.v1': image.v1,
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 691c7fd..3949437 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -114,8 +114,7 @@
def check_user_has_flag_on_inherited_to_project(
self, project_id, user_id, role_id):
- """Checks whether a user has a role assignment"""
- """with the inherited_to_projects flag on a project."""
+ """Check if user has an inherited project role on project"""
resp, body = self.head(
"OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
% (project_id, user_id, role_id))
@@ -142,8 +141,7 @@
def check_group_has_flag_on_inherited_to_project(
self, project_id, group_id, role_id):
- """Checks whether a group has a role assignment"""
- """with the inherited_to_projects flag on a project."""
+ """Check if group has an inherited project role on project"""
resp, body = self.head(
"OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
% (project_id, group_id, role_id))
diff --git a/tempest/lib/services/network/agents_client.py b/tempest/lib/services/network/agents_client.py
index 5068121..9fa4672 100644
--- a/tempest/lib/services/network/agents_client.py
+++ b/tempest/lib/services/network/agents_client.py
@@ -37,6 +37,16 @@
uri = '/agents/%s' % agent_id
return self.show_resource(uri, **fields)
+ def delete_agent(self, agent_id):
+ """Delete agent.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#delete-agent
+ """
+ uri = '/agents/%s' % agent_id
+ return self.delete_resource(uri)
+
def list_agents(self, **filters):
"""List all agents.
diff --git a/tempest/lib/services/network/versions_client.py b/tempest/lib/services/network/versions_client.py
index f87fe87..807f416 100644
--- a/tempest/lib/services/network/versions_client.py
+++ b/tempest/lib/services/network/versions_client.py
@@ -12,32 +12,36 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_serialization import jsonutils as json
+from tempest.lib.common import rest_client
from tempest.lib.services.network import base
class NetworkVersionsClient(base.BaseNetworkClient):
def list_versions(self):
- """Do a GET / to fetch available API version information."""
+ """Do a GET / to fetch available API version information.
- version_url = self._get_base_version_url()
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#list-api-versions
+ """
- # Note: we do a raw_request here because we want to use
+ # Note: we do a self.get('/') here because we want to use
# an unversioned URL, not "v2/$project_id/".
- # Since raw_request doesn't log anything, we do that too.
- start = time.time()
- self._log_request_start('GET', version_url)
- response, body = self.raw_request(version_url, 'GET')
- self._error_checker(response, body)
- end = time.time()
- self._log_request('GET', version_url, response,
- secs=(end - start), resp_body=body)
-
- self.response_checker('GET', response, body)
- self.expected_success(200, response.status)
+ resp, body = self.get('/')
body = json.loads(body)
- return body
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_version(self, version):
+ """Do a GET /<version> to fetch available resources.
+
+ For more information, please refer to the official API reference:
+ https://developer.openstack.org/api-ref/network/v2/index.html#show-api-v2-details
+ """
+
+ resp, body = self.get(version + '/')
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
new file mode 100644
index 0000000..5c20c57
--- /dev/null
+++ b/tempest/lib/services/placement/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement.placement_client import \
+ PlacementClient
+
+__all__ = ['PlacementClient']
diff --git a/tempest/lib/services/placement/base_placement_client.py b/tempest/lib/services/placement/base_placement_client.py
new file mode 100644
index 0000000..505a515
--- /dev/null
+++ b/tempest/lib/services/placement/base_placement_client.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common import api_version_utils
+from tempest.lib.common import rest_client
+
+PLACEMENT_MICROVERSION = None
+
+
+class BasePlacementClient(rest_client.RestClient):
+
+ api_microversion_header_name = 'OpenStack-API-Version'
+ version_header_value = 'placement %s'
+
+ def get_headers(self):
+ headers = super(BasePlacementClient, self).get_headers()
+ if PLACEMENT_MICROVERSION:
+ headers[self.api_microversion_header_name] = \
+ self.version_header_value % PLACEMENT_MICROVERSION
+ return headers
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, chunked=False):
+ resp, resp_body = super(BasePlacementClient, self).request(
+ method, url, extra_headers, headers, body, chunked)
+ if (PLACEMENT_MICROVERSION and
+ PLACEMENT_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
+ api_version_utils.assert_version_header_matches_request(
+ self.api_microversion_header_name,
+ self.version_header_value % PLACEMENT_MICROVERSION,
+ resp)
+ return resp, resp_body
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
new file mode 100644
index 0000000..2c6d919
--- /dev/null
+++ b/tempest/lib/services/placement/placement_client.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class PlacementClient(base_placement_client.BasePlacementClient):
+
+ def list_allocation_candidates(self, **params):
+ """List allocation candidates.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/placement/#list-allocation-candidates
+ """
+ url = '/allocation_candidates'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_allocations(self, consumer_uuid):
+ """List all allocation records for the consumer.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/placement/#list-allocations
+ """
+ url = '/allocations/%s' % consumer_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
index 58a80b7..da9eb8b 100644
--- a/tempest/lib/services/volume/v1/types_client.py
+++ b/tempest/lib/services/volume/v1/types_client.py
@@ -149,10 +149,11 @@
extra_specs):
"""Update a volume_type extra spec.
- volume_type_id: Id of volume_type.
- extra_spec_name: Name of the extra spec to be updated.
- extra_spec: A dictionary of with key as extra_spec_name and the
- updated value.
+ :param volume_type_id: Id of volume_type.
+ :param extra_spec_name: Name of the extra spec to be updated.
+ :param extra_specs: A dictionary of with key as extra_spec_name and the
+ updated value.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index f2d2d21..fb64333 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -104,7 +104,12 @@
return rest_client.ResponseBody(resp, body)
def import_backup(self, **kwargs):
- """Import backup metadata record."""
+ """Import backup metadata record.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/index.html#import-a-backup
+ """
post_body = json.dumps({'backup-record': kwargs})
resp, body = self.post("backups/import_record", post_body)
body = json.loads(body)
diff --git a/tempest/lib/services/volume/v3/qos_client.py b/tempest/lib/services/volume/v3/qos_client.py
index 8f4d37f..5205590 100644
--- a/tempest/lib/services/volume/v3/qos_client.py
+++ b/tempest/lib/services/volume/v3/qos_client.py
@@ -14,6 +14,7 @@
from oslo_serialization import jsonutils as json
+from tempest.lib.api_schema.response.volume import qos as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -45,15 +46,15 @@
"""
post_body = json.dumps({'qos_specs': kwargs})
resp, body = self.post('qos-specs', post_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.show_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_qos(self, qos_id, force=False):
"""Delete the specified QoS specification."""
resp, body = self.delete(
"qos-specs/%s?force=%s" % (qos_id, force))
- self.expected_success(202, resp.status)
+ self.validate_response(schema.delete_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def list_qos(self):
@@ -61,7 +62,7 @@
url = 'qos-specs'
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.list_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def show_qos(self, qos_id):
@@ -69,7 +70,7 @@
url = "qos-specs/%s" % qos_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def set_qos_key(self, qos_id, **kwargs):
@@ -82,7 +83,7 @@
put_body = json.dumps({"qos_specs": kwargs})
resp, body = self.put('qos-specs/%s' % qos_id, put_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.set_qos_key, resp, body)
return rest_client.ResponseBody(resp, body)
def unset_qos_key(self, qos_id, keys):
@@ -96,7 +97,7 @@
"""
put_body = json.dumps({'keys': keys})
resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.unset_qos_key, resp, body)
return rest_client.ResponseBody(resp, body)
def associate_qos(self, qos_id, vol_type_id):
@@ -104,7 +105,7 @@
url = "qos-specs/%s/associate" % qos_id
url += "?vol_type_id=%s" % vol_type_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.associate_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def show_association_qos(self, qos_id):
@@ -112,7 +113,7 @@
url = "qos-specs/%s/associations" % qos_id
resp, body = self.get(url)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.validate_response(schema.show_association_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def disassociate_qos(self, qos_id, vol_type_id):
@@ -120,12 +121,12 @@
url = "qos-specs/%s/disassociate" % qos_id
url += "?vol_type_id=%s" % vol_type_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.disassociate_qos, resp, body)
return rest_client.ResponseBody(resp, body)
def disassociate_all_qos(self, qos_id):
"""Disassociate the specified QoS with all associations."""
url = "qos-specs/%s/disassociate_all" % qos_id
resp, body = self.get(url)
- self.expected_success(202, resp.status)
+ self.validate_response(schema.disassociate_all_qos, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 13ecd15..1405785 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -153,6 +153,7 @@
:param extra_spec_name: Name of the extra spec to be updated.
:param extra_specs: A dictionary of with key as extra_spec_name and the
updated value.
+
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/block-storage/v3/index.html#update-extra-specification-for-volume-type
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 11c5767..fec2950 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -182,7 +182,7 @@
:param id: A checked resource id
:raises lib_exc.DeleteErrorException: If the specified resource is on
- the status the delete was failed.
+ the status the delete was failed.
"""
try:
volume = self.show_volume(id)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index dff50a9..dbb9acc 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -225,8 +225,12 @@
if size is None:
size = CONF.volume.volume_size
if imageRef:
- image = self.compute_images_client.show_image(imageRef)['image']
- min_disk = image.get('minDisk')
+ if CONF.image_feature_enabled.api_v1:
+ resp = self.image_client.check_image(imageRef)
+ image = common_image.get_image_meta_from_headers(resp)
+ else:
+ image = self.image_client.show_image(imageRef)
+ min_disk = image.get('min_disk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
@@ -374,12 +378,12 @@
server=None):
"""Get a SSH client to a remote server
- @param ip_address the server floating or fixed IP address to use
- for ssh validation
- @param username name of the Linux account on the remote server
- @param private_key the SSH private key to use
- @param server: server dict, used for debugging purposes
- @return a RemoteClient object
+ :param ip_address: the server floating or fixed IP address to use
+ for ssh validation
+ :param username: name of the Linux account on the remote server
+ :param private_key: the SSH private key to use
+ :param server: server dict, used for debugging purposes
+ :return: a RemoteClient object
"""
if username is None:
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 8c210d5..008d1ae 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -29,11 +29,12 @@
For both LUKS and cryptsetup encryption types, this test performs
the following:
- * Creates an image in Glance
- * Boots an instance from the image
- * Creates an encryption type (as admin)
- * Creates a volume of that encryption type (as a regular user)
- * Attaches and detaches the encrypted volume to the instance
+
+ * Creates an image in Glance
+ * Boots an instance from the image
+ * Creates an encryption type (as admin)
+ * Creates a volume of that encryption type (as a regular user)
+ * Attaches and detaches the encrypted volume to the instance
"""
@classmethod
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 8827610..4be2b29 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -230,6 +230,33 @@
self.assertNotEqual(src_host, dst_host)
+ @decorators.idempotent_id('03fd1562-faad-11e7-9ea0-fa163e65f5ce')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_server_connectivity_live_migration(self):
+ keypair = self.create_keypair()
+ server = self._setup_server(keypair)
+ floating_ip = self._setup_network(server, keypair)
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ block_migration = (CONF.compute_feature_enabled.
+ block_migration_for_live_migration)
+ self.admin_servers_client.live_migrate_server(
+ server['id'], host=None, block_migration=block_migration,
+ disk_over_commit=False)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ @decorators.skip_because(bug='1788403')
@decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index c1132cf..7992585 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -374,39 +374,37 @@
def test_network_basic_ops(self):
"""Basic network operation test
- For a freshly-booted VM with an IP address ("port") on a given
- network:
+ For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address. This implies, but
- does not guarantee (see the ssh check that follows), that the
- VM has been assigned the correct IP address and has
- connectivity to the Tempest host.
+ does not guarantee (see the ssh check that follows), that the
+ VM has been assigned the correct IP address and has
+ connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
- ssh server hosted at the IP address. This check guarantees
- that the IP address is associated with the target VM.
+ ssh server hosted at the IP address. This check guarantees
+ that the IP address is associated with the target VM.
- the Tempest host can ssh into the VM via the IP address and
- successfully execute the following:
+ successfully execute the following:
- - ping an external IP address, implying external connectivity.
+ - ping an external IP address, implying external connectivity.
- - ping an external hostname, implying that dns is correctly
- configured.
+ - ping an external hostname, implying that dns is correctly
+ configured.
- - ping an internal IP address, implying connectivity to another
- VM on the same network.
+ - ping an internal IP address, implying connectivity to another
+ VM on the same network.
- detach the floating-ip from the VM and verify that it becomes
- unreachable
+ unreachable
- associate detached floating ip to a new VM and verify connectivity.
- VMs are created with unique keypair so connectivity also asserts that
- floating IP is associated with the new VM instead of the old one
+ VMs are created with unique keypair so connectivity also asserts
+ that floating IP is associated with the new VM instead of the old
+ one
Verifies that floating IP status is updated correctly after each change
-
-
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
@@ -445,30 +443,25 @@
def test_connectivity_between_vms_on_different_networks(self):
"""Test connectivity between VMs on different networks
- For a freshly-booted VM with an IP address ("port") on a given
- network:
+ For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address.
-
- the Tempest host can ssh into the VM via the IP address and
- successfully execute the following:
+ successfully execute the following:
- - ping an external IP address, implying external connectivity.
-
- - ping an external hostname, implying that dns is correctly
- configured.
-
- - ping an internal IP address, implying connectivity to another
- VM on the same network.
+ - ping an external IP address, implying external connectivity.
+ - ping an external hostname, implying that dns is correctly
+ configured.
+ - ping an internal IP address, implying connectivity to another
+ VM on the same network.
- Create another network on the same tenant with subnet, create
- an VM on the new network.
+ an VM on the new network.
- - Ping the new VM from previous VM failed since the new network
- was not attached to router yet.
-
- - Attach the new network to the router, Ping the new VM from
- previous VM succeed.
+ - Ping the new VM from previous VM failed since the new network
+ was not attached to router yet.
+ - Attach the new network to the router, Ping the new VM from
+ previous VM succeed.
"""
self._setup_network_and_servers()
@@ -476,9 +469,14 @@
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._create_new_network(create_gateway=True)
- self._create_server(self.new_net)
- self._check_network_internal_connectivity(network=self.new_net,
- should_connect=False)
+ new_server = self._create_server(self.new_net)
+ new_server_ips = [addr['addr'] for addr in
+ new_server['addresses'][self.new_net['name']]]
+
+ # Assert that pinging the new VM fails since the new network is not
+ # connected to a router
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ new_server_ips, should_connect=False)
router_id = self.router['id']
self.routers_client.add_router_interface(
router_id, subnet_id=self.new_subnet['id'])
@@ -486,8 +484,9 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.remove_router_interface,
router_id, subnet_id=self.new_subnet['id'])
- self._check_network_internal_connectivity(network=self.new_net,
- should_connect=True)
+
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ new_server_ips, should_connect=True)
@decorators.idempotent_id('c5adff73-e961-41f1-b4a9-343614f18cfa')
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
@@ -555,34 +554,42 @@
def test_subnet_details(self):
"""Tests that subnet's extra configuration details are affecting VMs.
- This test relies on non-shared, isolated tenant networks.
+ This test relies on non-shared, isolated tenant networks.
- NOTE: Neutron subnets push data to servers via dhcp-agent, so any
- update in subnet requires server to actively renew its DHCP lease.
+ NOTE: Neutron subnets push data to servers via dhcp-agent, so any
+ update in subnet requires server to actively renew its DHCP lease.
- 1. Configure subnet with dns nameserver
- 2. retrieve the VM's configured dns and verify it matches the one
- configured for the subnet.
- 3. update subnet's dns
- 4. retrieve the VM's configured dns and verify it matches the new one
- configured for the subnet.
+ 1. Configure subnet with dns nameserver
+ 2. retrieve the VM's configured dns and verify it matches the one
+ configured for the subnet.
+ 3. update subnet's dns
+ 4. retrieve the VM's configured dns and verify it matches the new one
+ configured for the subnet.
- TODO(yfried): add host_routes
+ TODO(yfried): add host_routes
- any resolution check would be testing either:
- * l3 forwarding (tested in test_network_basic_ops)
- * Name resolution of an external DNS nameserver - out of scope for
- Tempest
+ any resolution check would be testing either:
+
+ * l3 forwarding (tested in test_network_basic_ops)
+ * Name resolution of an external DNS nameserver - out of scope for
+ Tempest
"""
# this test check only updates (no actual resolution) so using
# arbitrary ip addresses as nameservers, instead of parsing CONF
initial_dns_server = '1.2.3.4'
alt_dns_server = '9.8.7.6'
- # renewal should be immediate.
- # Timeouts are suggested by salvatore-orlando in
+ # Original timeouts are suggested by salvatore-orlando in
# https://bugs.launchpad.net/neutron/+bug/1412325/comments/3
- renew_delay = CONF.network.build_interval
+ #
+ # Compared to that renew_delay was increased, because
+ # busybox's udhcpc accepts SIGUSR1 as a renew request. Internally
+ # it goes into RENEW_REQUESTED state. If it receives a 2nd SIGUSR1
+ # signal while in that state then it calls the deconfig script
+ # ("/sbin/cirros-dhcpc deconfig" in sufficiently new cirros versions)
+ # which leads to the address being transiently deconfigured which
+ # for our case is unwanted.
+ renew_delay = 3 * CONF.network.build_interval
renew_timeout = CONF.network.build_timeout
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
@@ -742,7 +749,7 @@
2. Remove router from all l3-agents
3. Verify connectivity is down
4. Assign router to new l3-agent (or old one if no new agent is
- available)
+ available)
5. Verify connectivity
"""
@@ -823,7 +830,8 @@
prevents traffic to pass through the VM. Anti-spoof rules are not
required in cases where the VM routes traffic through it.
- The test steps are :
+ The test steps are:
+
1. Create a new network.
2. Connect (hotplug) the VM to a new network.
3. Check the VM can ping a server on the new network ("peer")
@@ -832,7 +840,7 @@
spoofed interface (VM cannot ping the peer).
6. Disable port-security of the spoofed port- set the flag to false.
7. Retest 3rd step and check that the Security Group allows pings via
- the spoofed interface.
+ the spoofed interface.
"""
spoof_mac = "00:00:00:00:00:01"
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index e4e39c3..438ee01 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -12,13 +12,18 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+from oslo_log import log as logging
+
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+from tempest.lib import exceptions
from tempest.scenario import manager
CONF = config.CONF
+LOG = logging.getLogger(__name__)
class TestGettingAddress(manager.NetworkScenarioTest):
@@ -154,8 +159,31 @@
% (network_id, ports))
mac6 = ports[0]
nic = ssh.get_nic_name_by_mac(mac6)
+ # NOTE(slaweq): on RHEL based OS ifcfg file for new interface is
+ # needed to make IPv6 working on it, so if
+ # /etc/sysconfig/network-scripts directory exists ifcfg-%(nic)s file
+ # should be added in it
+ if self._sysconfig_network_scripts_dir_exists(ssh):
+ try:
+ ssh.exec_command(
+ 'echo -e "DEVICE=%(nic)s\\nIPV6INIT=yes" | '
+ 'sudo tee /etc/sysconfig/network-scripts/ifcfg-%(nic)s; '
+ 'sudo /sbin/service network restart' % {'nic': nic})
+ except exceptions.SSHExecCommandFailed as e:
+ # NOTE(slaweq): Sometimes it can happen that this SSH command
+ # will fail because of some error from network manager in
+ # guest os.
+ # But even then doing ip link set up below is fine and
+ # IP address should be configured properly.
+ LOG.debug("Error during restarting %(nic)s interface on "
+ "instance. Error message: %(error)s",
+ {'nic': nic, 'error': e})
ssh.exec_command("sudo ip link set %s up" % nic)
+ def _sysconfig_network_scripts_dir_exists(self, ssh):
+ return "False" not in ssh.exec_command(
+ 'test -d /etc/sysconfig/network-scripts/ || echo "False"')
+
def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
net_list = self.prepare_network(address6_mode=address6_mode,
n_subnets6=n_subnets6,
@@ -252,6 +280,7 @@
dualnet=True)
@decorators.idempotent_id('9178ad42-10e4-47e9-8987-e02b170cc5cd')
+ @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_dualnet_multi_prefix_slaac(self):
self._prepare_and_test(address6_mode='slaac', n_subnets6=2,
diff --git a/tempest/scenario/test_object_storage_basic_ops.py b/tempest/scenario/test_object_storage_basic_ops.py
index cbe321e..b635ca0 100644
--- a/tempest/scenario/test_object_storage_basic_ops.py
+++ b/tempest/scenario/test_object_storage_basic_ops.py
@@ -24,15 +24,15 @@
def test_swift_basic_ops(self):
"""Test swift basic ops.
- * get swift stat.
- * create container.
- * upload a file to the created container.
- * list container's objects and assure that the uploaded file is
- present.
- * download the object and check the content
- * delete object from container.
- * list container's objects and assure that the deleted file is gone.
- * delete a container.
+ * get swift stat.
+ * create container.
+ * upload a file to the created container.
+ * list container's objects and assure that the uploaded file is
+ present.
+ * download the object and check the content
+ * delete object from container.
+ * list container's objects and assure that the deleted file is gone.
+ * delete a container.
"""
self.get_swift_stat()
container_name = self.create_container()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 28a2d64..2b7926a 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -63,28 +63,28 @@
a. a security group open to incoming ssh connection
b. a VM with a floating ip
5. create a general empty security group (same as "default", but
- without rules allowing in-tenant traffic)
+ without rules allowing in-tenant traffic)
tests:
1. _verify_network_details
2. _verify_mac_addr: for each access point verify that
- (subnet, fix_ip, mac address) are as defined in the port list
+ (subnet, fix_ip, mac address) are as defined in the port list
3. _test_in_tenant_block: test that in-tenant traffic is disabled
- without rules allowing it
+ without rules allowing it
4. _test_in_tenant_allow: test that in-tenant traffic is enabled
- once an appropriate rule has been created
+ once an appropriate rule has been created
5. _test_cross_tenant_block: test that cross-tenant traffic is disabled
- without a rule allowing it on destination tenant
+ without a rule allowing it on destination tenant
6. _test_cross_tenant_allow:
* test that cross-tenant traffic is enabled once an appropriate
- rule has been created on destination tenant.
+ rule has been created on destination tenant.
* test that reverse traffic is still blocked
* test than reverse traffic is enabled once an appropriate rule has
- been created on source tenant
- 7._test_port_update_new_security_group:
- * test that traffic is blocked with default security group
- * test that traffic is enabled after updating port with new security
- group having appropriate rule
+ been created on source tenant
+ 7. _test_port_update_new_security_group:
+ * test that traffic is blocked with default security group
+ * test that traffic is enabled after updating port with new
+ security group having appropriate rule
8. _test_multiple_security_groups: test multiple security groups can be
associated with the vm
@@ -93,11 +93,13 @@
2. Public network is defined and reachable from the Tempest host
3. Public router can either be:
* defined, in which case all tenants networks can connect directly
- to it, and cross tenant check will be done on the private IP of the
- destination tenant
+ to it, and cross tenant check will be done on the private IP of
+ the destination tenant
+
or
+
* not defined (empty string), in which case each tenant will have
- its own router connected to the public network
+ its own router connected to the public network
"""
credentials = ['primary', 'alt', 'admin']
diff --git a/tempest/test.py b/tempest/test.py
index f2babbb..c3c58dc 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -259,6 +259,7 @@
based on the result of an API call are discouraged.
The following checks are implemented in `test.py` already:
+
- check that alt credentials are available when requested by the test
- check that admin credentials are available when requested by the test
- check that the identity version specified by the test is marked as
@@ -310,6 +311,7 @@
`os_[type]`:
Valid values in `credentials` are:
+
- 'primary':
A normal user is provisioned.
It can be used only once. Multiple entries will be ignored.
@@ -581,7 +583,7 @@
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.__setupclass_called:
- raise RuntimeError("setUpClass does not calls the super's"
+ raise RuntimeError("setUpClass does not calls the super's "
"setUpClass in the " +
self.__class__.__name__)
at_exit_set.add(self.__class__)
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 47f4ad6..1593464 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -131,8 +131,13 @@
self.assertIn(fault, six.text_type(ex))
else:
self.assertNotIn(fault, six.text_type(ex))
+ if compute_base.BaseV2ComputeTest.is_requested_microversion_compatible(
+ '2.35'):
+ status = 'ACTIVE'
+ else:
+ status = 'active'
wait_for_image_status.assert_called_once_with(
- compute_images_client, image_id, 'active')
+ compute_images_client, image_id, status)
servers_client.show_server.assert_called_once_with(
mock.sentinel.server_id)
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index fd9af08..b349bba 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -106,6 +106,8 @@
cp = account_generator.get_credential_provider(self.opts)
admin_creds = cp.default_admin_creds
self.assertEqual(self.opts.os_tenant_name, admin_creds.tenant_name)
+ self.assertEqual(self.opts.os_username, admin_creds.username)
+ self.assertEqual(self.opts.os_password, admin_creds.password)
class TestAccountGeneratorV3(TestAccountGeneratorV2):
@@ -208,9 +210,9 @@
resources = account_generator.generate_resources(
self.cred_provider, admin=True)
resource_types = [k for k, _ in resources]
- # all options on, expect six credentials
- self.assertEqual(6, len(resources))
- # Ensure create_user was invoked 6 times (6 distinct users)
+ # all options on, expect five credentials
+ self.assertEqual(5, len(resources))
+ # Ensure create_user was invoked 5 times (5 distinct users)
self.assertEqual(5, self.user_create_fixture.mock.call_count)
self.assertIn('primary', resource_types)
self.assertIn('alt', resource_types)
@@ -222,6 +224,30 @@
self.assertIsNotNone(resource[1].router)
self.assertIsNotNone(resource[1].subnet)
+ def test_generate_resources_swift_no_admin(self):
+ cfg.CONF.set_default('swift', True, group='service_available')
+ cfg.CONF.set_default('operator_role', 'fake_operator',
+ group='object-storage')
+ cfg.CONF.set_default('reseller_admin_role', 'fake_reseller',
+ group='object-storage')
+ resources = account_generator.generate_resources(
+ self.cred_provider, admin=False)
+ resource_types = [k for k, _ in resources]
+ # No Admin, swift, expect four credentials only
+ self.assertEqual(4, len(resources))
+ # Ensure create_user was invoked 4 times (4 distinct users)
+ self.assertEqual(4, self.user_create_fixture.mock.call_count)
+ self.assertIn('primary', resource_types)
+ self.assertIn('alt', resource_types)
+ self.assertNotIn('admin', resource_types)
+ self.assertIn(['fake_operator'], resource_types)
+ self.assertIn(['fake_reseller'], resource_types)
+ self.assertNotIn(['fake_owner'], resource_types)
+ for resource in resources:
+ self.assertIsNotNone(resource[1].network)
+ self.assertIsNotNone(resource[1].router)
+ self.assertIsNotNone(resource[1].subnet)
+
class TestGenerateResourcesV3(TestGenerateResourcesV2):
@@ -267,14 +293,14 @@
# Ordered args in [0], keyword args in [1]
accounts, f = yaml_dump_mock.call_args[0]
self.assertEqual(handle, f)
- self.assertEqual(6, len(accounts))
+ self.assertEqual(5, len(accounts))
if self.domain_is_in:
self.assertIn('domain_name', accounts[0].keys())
else:
self.assertNotIn('domain_name', accounts[0].keys())
self.assertEqual(1, len([x for x in accounts if
x.get('types') == ['admin']]))
- self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
+ self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
for account in accounts:
self.assertIn('resources', account)
self.assertIn('network', account.get('resources'))
@@ -298,14 +324,14 @@
# Ordered args in [0], keyword args in [1]
accounts, f = yaml_dump_mock.call_args[0]
self.assertEqual(handle, f)
- self.assertEqual(6, len(accounts))
+ self.assertEqual(5, len(accounts))
if self.domain_is_in:
self.assertIn('domain_name', accounts[0].keys())
else:
self.assertNotIn('domain_name', accounts[0].keys())
self.assertEqual(1, len([x for x in accounts if
x.get('types') == ['admin']]))
- self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
+ self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
for account in accounts:
self.assertIn('resources', account)
self.assertIn('network', account.get('resources'))
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
new file mode 100644
index 0000000..495d127
--- /dev/null
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -0,0 +1,563 @@
+# Copyright 2018 AT&T Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from oslo_serialization import jsonutils as json
+from tempest import clients
+from tempest.cmd import cleanup_service
+from tempest import config
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib import fake_http
+
+
+class MockFunctionsBase(base.TestCase):
+
+ def _create_response(self, body, status, headers):
+ if status:
+ if body:
+ body = json.dumps(body)
+ resp = fake_http.fake_http_response(headers, status=status), body
+ return resp
+ else:
+ return body
+
+ def _create_fixtures(self, fixtures_to_make):
+ mocked_fixtures = []
+ for fixture in fixtures_to_make:
+ func, body, status = fixture
+ mocked_response = self._create_response(body, status, None)
+ if mocked_response == 'error':
+ mocked_func = self.useFixture(fixtures.MockPatch(
+ func, side_effect=Exception("error")))
+ else:
+ mocked_func = self.useFixture(fixtures.MockPatch(
+ func, return_value=mocked_response))
+ mocked_fixtures.append(mocked_func)
+ return mocked_fixtures
+
+ def run_function_with_mocks(self, function_to_run, functions_to_mock):
+ """Mock a service client function for testing.
+
+ :param function_to_run: The service client function to call.
+ :param functions_to_mock: a list of tuples containing the function
+ to mock, the response body, and the response status.
+ EX:
+ ('tempest.lib.common.rest_client.RestClient.get',
+ {'users': ['']},
+ 200)
+ """
+ mocked_fixtures = self._create_fixtures(functions_to_mock)
+ func_return = function_to_run()
+ return func_return, mocked_fixtures
+
+
+class BaseCmdServiceTests(MockFunctionsBase):
+
+ def setUp(self):
+ super(BaseCmdServiceTests, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+ self.useFixture(fixtures.MockPatch(
+ 'tempest.cmd.cleanup_service._get_network_id',
+ return_value=''))
+ cleanup_service.init_conf()
+ self.conf_values = {"flavors": cleanup_service.CONF_FLAVORS[0],
+ "images": cleanup_service.CONF_IMAGES[0],
+ "projects": cleanup_service.CONF_PROJECTS[0],
+ "users": cleanup_service.CONF_USERS[0],
+ }
+
+ # Static list to ensure global service saved items are not deleted
+ saved_state = {"users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
+ "flavors": {u'42': u'm1.tiny'},
+ "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
+ "roles": {u'3efrt74r45hn': u'president'},
+ "projects": {u'f38ohgp93jj032': u'manhattan'},
+ "domains": {u'default': u'Default'}
+ }
+ # Mocked methods
+ get_method = 'tempest.lib.common.rest_client.RestClient.get'
+ delete_method = 'tempest.lib.common.rest_client.RestClient.delete'
+ log_method = 'tempest.cmd.cleanup_service.LOG.exception'
+ # Override parameters
+ service_class = 'BaseService'
+ response = None
+ service_name = 'default'
+
+ def _create_cmd_service(self, service_type, is_save_state=False,
+ is_preserve=False, is_dry_run=False):
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ os = clients.Manager(creds)
+ return getattr(cleanup_service, service_type)(
+ os,
+ is_save_state=is_save_state,
+ is_preserve=is_preserve,
+ is_dry_run=is_dry_run,
+ data={},
+ saved_state_json=self.saved_state
+ )
+
+ def _test_delete(self, mocked_fixture_tuple_list, fail=False):
+ serv = self._create_cmd_service(self.service_class)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list,
+ )
+ for fixture in fixtures:
+ if fail is False and fixture.mock.return_value == 'exception':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called_once()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ else:
+ fixture.mock.assert_called_once()
+ self.assertFalse(serv.data)
+
+ def _test_dry_run_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_dry_run=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for fixture in fixtures:
+ if fixture.mock.return_value == 'delete':
+ fixture.mock.assert_not_called()
+ elif self.service_name in self.saved_state.keys():
+ fixture.mock.assert_called_once()
+ for key in self.saved_state[self.service_name].keys():
+ self.assertNotIn(key, fixture.mock.call_args[0][0])
+ else:
+ fixture.mock.assert_called_once()
+
+ def _test_saved_state_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_save_state=True)
+ _, fixtures = self.run_function_with_mocks(
+ serv.run,
+ mocked_fixture_tuple_list
+ )
+ for item in self.response[self.service_name]:
+ self.assertIn(item['id'],
+ serv.data[self.service_name])
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+
+ def _test_is_preserve_true(self, mocked_fixture_tuple_list):
+ serv = self._create_cmd_service(self.service_class, is_preserve=True)
+ resp, fixtures = self.run_function_with_mocks(
+ serv.list,
+ mocked_fixture_tuple_list
+ )
+ for fixture in fixtures:
+ fixture.mock.assert_called_once()
+ self.assertIn(resp[0], self.response[self.service_name])
+ for rsp in resp:
+ self.assertNotIn(rsp['id'], self.conf_values.values())
+ self.assertNotIn(rsp['name'], self.conf_values.values())
+
+
+class TestDomainService(BaseCmdServiceTests):
+
+ service_class = 'DomainService'
+ service_name = 'domains'
+ response = {
+ "domains": [
+ {
+ "description": "Destroy all humans",
+ "enabled": True,
+ "id": "5a75994a3",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/5a75994a3"
+ },
+ "name": "Sky_net"
+ },
+ {
+ "description": "Owns users and tenants on Identity API",
+ "enabled": False,
+ "id": "default",
+ "links": {
+ "self": "http://example.com/identity/v3/domains/default"
+ },
+ "name": "Default"
+ }
+ ]
+ }
+
+ mock_update = ("tempest.lib.services.identity.v3."
+ "domains_client.DomainsClient.update_domain")
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None),
+ (self.mock_update, 'update', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None),
+ (self.mock_update, 'update', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestProjectsService(BaseCmdServiceTests):
+
+ service_class = 'ProjectService'
+ service_name = 'projects'
+ response = {
+ "projects": [
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "f38ohgp93jj032",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/f38ohgp93jj032"
+ },
+ "name": "manhattan",
+ "parent_id": None
+ },
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "098f89d3292ri4jf4",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/098f89d3292ri4jf4"
+ },
+ "name": "Apollo",
+ "parent_id": None
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['projects'].append(
+ {
+ "is_domain": False,
+ "description": None,
+ "domain_id": "default",
+ "enabled": True,
+ "id": "r343q98h09f3092",
+ "links": {
+ "self": "http://example.com/identity/v3/projects"
+ "/r343q98h09f3092"
+ },
+ "name": cleanup_service.CONF_PROJECTS[0],
+ "parent_id": None
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestImagesService(BaseCmdServiceTests):
+
+ service_class = 'ImageService'
+ service_name = 'images'
+ response = {
+ "images": [
+ {
+ "status": "ACTIVE",
+ "name": "stratus-0.3.2-x86_64-disk",
+ "id": "34yhwr-4t3q",
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "34yhwr-4t3q",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ },
+ {
+ "status": "ACTIVE",
+ "name": "cirros-0.3.2-x86_64-disk",
+ "id": "1bea47ed-f6a9",
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "1bea47ed-f6a9",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['images'].append(
+ {
+ "status": "ACTIVE",
+ "name": "cirros-0.3.2-x86_64-disk",
+ "id": cleanup_service.CONF_IMAGES[0],
+ "updated": "2014-11-03T16:40:10Z",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/images/"
+ "None",
+ "rel": "self"}],
+ "created": "2014-10-30T08:23:39Z",
+ "minDisk": 0,
+ "minRam": 0,
+ "progress": 0,
+ "metadata": {},
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestFlavorService(BaseCmdServiceTests):
+
+ service_class = 'FlavorService'
+ service_name = 'flavors'
+ response = {
+ "flavors": [
+ {
+ "disk": 1,
+ "id": "42",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/1",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/1",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ },
+ {
+ "disk": 2,
+ "id": "13",
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/2",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/2",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 202),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['flavors'].append(
+ {
+ "disk": 3,
+ "id": cleanup_service.CONF_FLAVORS[0],
+ "links": [{
+ "href": "http://openstack.ex.com/v2/openstack/flavors/3",
+ "rel": "self"}, {
+ "href": "http://openstack.ex.com/openstack/flavors/3",
+ "rel": "bookmark"}],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 1,
+ "vcpus": 1
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+
+class TestRoleService(BaseCmdServiceTests):
+
+ service_class = 'RoleService'
+ service_name = 'roles'
+ response = {
+ "roles": [
+ {
+ "domain_id": "FakeDomain",
+ "id": "3efrt74r45hn",
+ "name": "president",
+ "links": {
+ "self": "http://ex.com/identity/v3/roles/3efrt74r45hn"
+ }
+ },
+ {
+ "domain_id": 'FakeDomain',
+ "id": "39ruo5sdk040",
+ "name": "vice-p",
+ "links": {
+ "self": "http://ex.com/identity/v3/roles/39ruo5sdk040"
+ }
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+
+class TestUserService(BaseCmdServiceTests):
+
+ service_class = 'UserService'
+ service_name = 'users'
+ response = {
+ "users": [
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "e812fb332456423fdv1b1320121qwe2",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "e812fb332456423fdv1b1320121qwe2",
+ },
+ "name": "Thunder",
+ "password_expires_at": "3102-11-06T15:32:17.000000",
+ },
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "32rwef64245tgr20121qw324bgg",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "32rwef64245tgr20121qw324bgg",
+ },
+ "name": "Lightning",
+ "password_expires_at": "1893-11-06T15:32:17.000000",
+ }
+ ]
+ }
+
+ def test_delete_fail(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, 'error', None),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock, fail=True)
+
+ def test_delete_pass(self):
+ delete_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, None, 204),
+ (self.log_method, 'exception', None)]
+ self._test_delete(delete_mock)
+
+ def test_dry_run(self):
+ dry_mock = [(self.get_method, self.response, 200),
+ (self.delete_method, "delete", None)]
+ self._test_dry_run_true(dry_mock)
+
+ def test_save_state(self):
+ self._test_saved_state_true([(self.get_method, self.response, 200)])
+
+ def test_preserve_list(self):
+ self.response['users'].append(
+ {
+ "domain_id": "TempestDomain",
+ "enabled": True,
+ "id": "23ads5tg3rtrhe30121qwhyth",
+ "links": {
+ "self": "http://example.com/identity/v3/users/"
+ "23ads5tg3rtrhe30121qwhyth",
+ },
+ "name": cleanup_service.CONF_USERS[0],
+ "password_expires_at": "1893-11-06T15:32:17.000000",
+ })
+ self._test_is_preserve_true([(self.get_method, self.response, 200)])
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index f55df30..5091841 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -24,11 +24,16 @@
import six
from tempest.cmd import run
+from tempest.cmd import workspace
+from tempest import config
+from tempest.lib.common.utils import data_utils
from tempest.tests import base
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
+CONF = config.CONF
+
class TestTempestRun(base.TestCase):
@@ -55,6 +60,12 @@
self.assertEqual(['i_am_a_fun_little_regex'],
self.run_cmd._build_regex(args))
+ def test__build_regex_smoke_regex(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, "smoke", True)
+ setattr(args, 'regex', 'i_am_a_fun_little_regex')
+ self.assertEqual(['smoke'], self.run_cmd._build_regex(args))
+
class TestRunReturnCode(base.TestCase):
def setUp(self):
@@ -97,6 +108,27 @@
subprocess.call(['stestr', 'init'])
self.assertRunExit(['tempest', 'run', '--regex', 'passing'], 0)
+ def test_tempest_run_failing(self):
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
+
+ def test_tempest_run_blackregex_failing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+
+ def test_tempest_run_blackregex_passing(self):
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
+ def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+
def test_tempest_run_fails(self):
self.assertRunExit(['tempest', 'run'], 1)
@@ -140,7 +172,57 @@
'--regex', 'passing'], 0)
+class TestConfigPathCheck(base.TestCase):
+ def setUp(self):
+ super(TestConfigPathCheck, self).setUp()
+ self.run_cmd = run.TempestRun(None, None)
+
+ def test_tempest_run_set_config_path(self):
+ # Note: (mbindlish) This test is created for the bug id: 1783751
+ # Checking TEMPEST_CONFIG_DIR and TEMPEST_CONFIG is actually
+ # getting set in os environment when some data has passed to
+ # set the environment.
+
+ self.run_cmd._set_env("/fakedir/fakefile")
+ self.assertEqual("/fakedir/fakefile", CONF._path)
+ self.assertIn('TEMPEST_CONFIG_DIR', os.environ)
+ self.assertEqual("/fakedir/fakefile",
+ os.path.join(os.environ['TEMPEST_CONFIG_DIR'],
+ os.environ['TEMPEST_CONFIG']))
+
+ def test_tempest_run_set_config_no_path(self):
+ # Note: (mbindlish) This test is created for the bug id: 1783751
+ # Checking TEMPEST_CONFIG_DIR and TEMPEST_CONFIG should have no value
+ # in os environment when no data has passed to set the environment.
+
+ self.run_cmd._set_env("")
+ self.assertFalse(CONF._path)
+ self.assertNotIn('TEMPEST_CONFIG_DIR', os.environ)
+ self.assertNotIn('TEMPEST_CONFIG', os.environ)
+
+
class TestTakeAction(base.TestCase):
+ def setUp(self):
+ super(TestTakeAction, self).setUp()
+ self.name = data_utils.rand_name('workspace')
+ self.path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.path, ignore_errors=True)
+ store_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, store_dir, ignore_errors=True)
+ self.store_file = os.path.join(store_dir, 'workspace.yaml')
+ self.workspace_manager = workspace.WorkspaceManager(
+ path=self.store_file)
+ self.workspace_manager.register_new_workspace(self.name, self.path)
+
+ def _setup_test_dirs(self):
+ self.directory = tempfile.mkdtemp(prefix='tempest-unit')
+ self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
+ self.test_dir = os.path.join(self.directory, 'tests')
+ os.mkdir(self.test_dir)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+
def test_workspace_not_registered(self):
class Exception_(Exception):
pass
@@ -169,18 +251,9 @@
self.assertIn(workspace, exit_msg)
def test_config_file_specified(self):
- # Setup test dirs
- self.directory = tempfile.mkdtemp(prefix='tempest-unit')
- self.addCleanup(shutil.rmtree, self.directory)
- self.test_dir = os.path.join(self.directory, 'tests')
- os.mkdir(self.test_dir)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
-
+ self._setup_test_dirs()
tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
parsed_args = mock.Mock()
- parsed_args.config_file = []
parsed_args.workspace = None
parsed_args.state = None
@@ -191,3 +264,97 @@
m.return_value = 0
self.assertEqual(0, tempest_run.take_action(parsed_args))
m.assert_called()
+
+ def test_no_config_file_no_workspace_no_state(self):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+
+ parsed_args.workspace = None
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command'):
+ self.assertRaises(SystemExit, tempest_run.take_action, parsed_args)
+
+ def test_config_file_workspace_registered(self):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = '.stestr.conf'
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_workspace_registered_no_config_no_state(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = None
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_not_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_no_config_file_no_workspace_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+
+ parsed_args.workspace = None
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command'):
+ self.assertRaises(SystemExit, tempest_run.take_action, parsed_args)
+ mock_init_state.assert_not_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_workspace_registered_no_config_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = self.name
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = ''
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_called()
+
+ @mock.patch('tempest.cmd.run.TempestRun._init_state')
+ def test_no_workspace_config_file_state_true(self, mock_init_state):
+ self._setup_test_dirs()
+ tempest_run = run.TempestRun(app=mock.Mock(), app_args=mock.Mock())
+ parsed_args = mock.Mock()
+ parsed_args.workspace = None
+ parsed_args.workspace_path = self.store_file
+ parsed_args.state = True
+ parsed_args.list_tests = False
+ parsed_args.config_file = '.stestr.conf'
+
+ with mock.patch('stestr.commands.run_command') as m:
+ m.return_value = 0
+ self.assertEqual(0, tempest_run.take_action(parsed_args))
+ m.assert_called()
+ mock_init_state.assert_called()
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index c260343..8dbba38 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -12,11 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
import fixtures
import mock
from oslo_serialization import jsonutils as json
from tempest import clients
+from tempest.cmd import init
from tempest.cmd import verify_tempest_config
from tempest.common import credentials_factory
from tempest import config
@@ -234,8 +237,8 @@
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
- print_mock.assert_called_once_with('api_v2', 'image-feature-enabled',
- False, True)
+ print_mock.assert_called_with('api_v2', 'image-feature-enabled',
+ False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
# This test verifies that wrong config api_v2 = True is detected
@@ -250,8 +253,8 @@
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
- print_mock.assert_called_once_with('api_v2', 'image-feature-enabled',
- False, True)
+ print_mock.assert_called_with('api_v2', 'image-feature-enabled',
+ False, True)
def test_verify_glance_version_no_v1(self):
# This test verifies that wrong config api_v1 = True is detected
@@ -271,8 +274,7 @@
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
- print_mock.assert_called_once_with('api_v1', 'image-feature-enabled',
- False, True)
+ print_mock.assert_not_called()
def test_verify_glance_version_no_version(self):
# This test verifies that wrong config api_v1 = True is detected
@@ -566,3 +568,64 @@
extensions_client = verify_tempest_config.get_extension_client(
os, service)
self.assertIsInstance(extensions_client, rest_client.RestClient)
+
+ def test_get_extension_client_sysexit(self):
+ creds = credentials_factory.get_credentials(
+ fill_in=False, username='fake_user', project_name='fake_project',
+ password='fake_password')
+ os = clients.Manager(creds)
+ self.assertRaises(SystemExit,
+ verify_tempest_config.get_extension_client,
+ os, 'fakeservice')
+
+ def test_get_config_file(self):
+ conf_dir = os.path.join(os.getcwd(), 'etc/')
+ conf_file = "tempest.conf.sample"
+ local_sample_conf_file = os.path.join(conf_dir, conf_file)
+
+ def fake_environ_get(key, default=None):
+ if key == 'TEMPEST_CONFIG_DIR':
+ return conf_dir
+ elif key == 'TEMPEST_CONFIG':
+ return 'tempest.conf.sample'
+ return default
+
+ with mock.patch('os.environ.get', side_effect=fake_environ_get,
+ autospec=True):
+ init_cmd = init.TempestInit(None, None)
+ init_cmd.generate_sample_config(os.path.join(conf_dir, os.pardir))
+ self.assertTrue(os.path.isfile(local_sample_conf_file),
+ local_sample_conf_file)
+
+ file_pointer = verify_tempest_config._get_config_file()
+ self.assertEqual(local_sample_conf_file, file_pointer.name)
+
+ with open(local_sample_conf_file, 'r+') as f:
+ local_sample_conf_contents = f.read()
+ self.assertEqual(local_sample_conf_contents, file_pointer.read())
+
+ if file_pointer:
+ file_pointer.close()
+
+ def test_print_and_or_update_true(self):
+ with mock.patch.object(
+ verify_tempest_config, 'change_option') as test_mock:
+ verify_tempest_config.print_and_or_update(
+ 'fakeservice', 'fake-service-available', False, True)
+ test_mock.assert_called_once_with(
+ 'fakeservice', 'fake-service-available', False)
+
+ def test_print_and_or_update_false(self):
+ with mock.patch.object(
+ verify_tempest_config, 'change_option') as test_mock:
+ verify_tempest_config.print_and_or_update(
+ 'fakeservice', 'fake-service-available', False, False)
+ test_mock.assert_not_called()
+
+ def test_contains_version_positive_data(self):
+ self.assertTrue(
+ verify_tempest_config.contains_version('v1.', ['v1.0', 'v2.0']))
+
+ def test_contains_version_negative_data(self):
+ self.assertFalse(
+ verify_tempest_config.contains_version('v5.', ['v1.0', 'v2.0']))
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index 3ed8a10..b4f6c5f 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -122,22 +122,157 @@
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+ def test_workspace_manager_rename_no_name_exist(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ self.name, no_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty name is specified."
+ " Please specify correct name for workspace.\n")
+
+ def test_workspace_manager_rename_with_existing_name(self):
+ new_name = self.name
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ self.name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace already exists with name: %s.\n"
+ % new_name)
+
+ def test_workspace_manager_rename_no_exist_old_name(self):
+ old_name = ""
+ new_name = data_utils.rand_uuid()
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.rename_workspace,
+ old_name, new_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % old_name)
+
+ def test_workspace_manager_rename_integer_data(self):
+ old_name = self.name
+ new_name = 12345
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
+ def test_workspace_manager_rename_alphanumeric_data(self):
+ old_name = self.name
+ new_name = 'abc123'
+ self.workspace_manager.rename_workspace(old_name, new_name)
+ self.assertIsNone(self.workspace_manager.get_workspace(old_name))
+ self.assertIsNotNone(self.workspace_manager.get_workspace(new_name))
+
def test_workspace_manager_move(self):
new_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
self.workspace_manager.move_workspace(self.name, new_path)
self.assertEqual(
self.workspace_manager.get_workspace(self.name), new_path)
+ # NOTE(mbindlish): Also checking for the workspace that it
+ # shouldn't exist in old path
+ self.assertNotEqual(
+ self.workspace_manager.get_workspace(self.name), self.path)
+
+ def test_workspace_manager_move_wrong_path(self):
+ new_path = 'wrong/path'
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ self.name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "Path does not exist.\n")
+
+ def test_workspace_manager_move_wrong_workspace(self):
+ workspace_name = "wrong_workspace_name"
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
+
+ def test_workspace_manager_move_no_workspace_name(self):
+ workspace_name = ""
+ new_path = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, new_path, ignore_errors=True)
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ workspace_name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % workspace_name)
+
+ def test_workspace_manager_move_no_workspace_path(self):
+ new_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.move_workspace,
+ self.name, new_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
def test_workspace_manager_remove_entry(self):
self.workspace_manager.remove_workspace_entry(self.name)
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
+ def test_workspace_manager_remove_entry_no_name(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ no_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % no_name)
+
+ def test_workspace_manager_remove_entry_wrong_name(self):
+ wrong_name = "wrong_name"
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_entry,
+ wrong_name)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "A workspace was not found with name: %s\n"
+ % wrong_name)
+
def test_workspace_manager_remove_directory(self):
path = self.workspace_manager.remove_workspace_entry(self.name)
self.workspace_manager.remove_workspace_directory(path)
self.assertIsNone(self.workspace_manager.get_workspace(self.name))
+ def test_workspace_manager_remove_directory_no_path(self):
+ no_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ remove_workspace_directory,
+ no_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
+
def test_path_expansion(self):
name = data_utils.rand_uuid()
path = os.path.join("~", name)
@@ -154,8 +289,11 @@
nonexistent_name)
self.assertEqual(1, ex.code)
self.assertEqual(mock_stdout.getvalue(),
- "A workspace was not found with name: %s\n" %
- nonexistent_name)
+ "A workspace was not found with name: %s\n"
+ % nonexistent_name)
+
+ def test_workspace_name_exists(self):
+ self.assertIsNone(self.workspace_manager._name_exists(self.name))
def test_workspace_name_already_exists(self):
duplicate_name = self.name
@@ -169,6 +307,11 @@
"A workspace already exists with name: %s.\n"
% duplicate_name)
+ def test_workspace_name_exists_check_new_name(self):
+ new_name = "fake_name"
+ self.assertIsNone(self.workspace_manager.
+ _workspace_name_exists(new_name))
+
def test_workspace_manager_path_not_exist(self):
fake_path = "fake_path"
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
@@ -179,8 +322,55 @@
self.assertEqual(mock_stdout.getvalue(),
"Path does not exist.\n")
+ def test_validate_path_exists(self):
+ new_path = self.path
+ self.assertIsNone(self.workspace_manager.
+ _validate_path(new_path))
+
def test_workspace_manager_list_workspaces(self):
listed = self.workspace_manager.list_workspaces()
self.assertEqual(1, len(listed))
self.assertIn(self.name, listed)
self.assertEqual(self.path, listed.get(self.name))
+
+ def test_register_new_workspace_no_name(self):
+ no_name = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ register_new_workspace,
+ no_name, self.path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty name is specified."
+ " Please specify correct name for workspace.\n")
+
+ def test_register_new_workspace_no_path(self):
+ no_path = ""
+ with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
+ ex = self.assertRaises(SystemExit,
+ self.workspace_manager.
+ register_new_workspace,
+ self.name, no_path)
+ self.assertEqual(1, ex.code)
+ self.assertEqual(mock_stdout.getvalue(),
+ "None or empty path is specified for workspace."
+ " Please specify correct workspace path.\n")
+
+ def test_register_new_workspace_integer_data(self):
+ workspace_name = 12345
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
+
+ def test_register_new_workspace_alphanumeric_data(self):
+ workspace_name = 'abc123'
+ self.workspace_manager.register_new_workspace(
+ workspace_name, self.path)
+ self.assertIsNotNone(
+ self.workspace_manager.get_workspace(workspace_name))
+ self.assertEqual(
+ self.workspace_manager.get_workspace(workspace_name), self.path)
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index be54130..25e99d5 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -32,6 +32,7 @@
super(ConfigFixture, self).setUp()
self.conf.set_default('build_interval', 10, group='compute')
self.conf.set_default('build_timeout', 10, group='compute')
+ self.conf.set_default('image_ref', 'fake_image_id', group='compute')
self.conf.set_default('disable_ssl_certificate_validation', True,
group='identity')
self.conf.set_default('uri', 'http://fake_uri.com/auth',
diff --git a/tempest/tests/files/setup.cfg b/tempest/tests/files/setup.cfg
index bd68708..a81d31e 100644
--- a/tempest/tests/files/setup.cfg
+++ b/tempest/tests/files/setup.cfg
@@ -3,7 +3,7 @@
version = 1
summary = Fake Project for testing wrapper scripts
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/tempest/latest/
classifier =
Intended Audience :: Information Technology
diff --git a/tempest/tests/lib/common/test_http.py b/tempest/tests/lib/common/test_http.py
index 02436e0..a19153f 100644
--- a/tempest/tests/lib/common/test_http.py
+++ b/tempest/tests/lib/common/test_http.py
@@ -167,3 +167,24 @@
'%s://%s:%i' % (proxy.scheme,
proxy.host,
proxy.port))
+
+
+class TestClosingHttpRedirects(base.TestCase):
+ def test_redirect_default(self):
+ connection = http.ClosingHttp()
+ self.assertTrue(connection.follow_redirects)
+
+ def test_redirect_off(self):
+ connection = http.ClosingHttp(follow_redirects=False)
+ self.assertFalse(connection.follow_redirects)
+
+
+class TestClosingProxyHttpRedirects(base.TestCase):
+ def test_redirect_default(self):
+ connection = http.ClosingProxyHttp(proxy_url=PROXY_URL)
+ self.assertTrue(connection.follow_redirects)
+
+ def test_redirect_off(self):
+ connection = http.ClosingProxyHttp(follow_redirects=False,
+ proxy_url=PROXY_URL)
+ self.assertFalse(connection.follow_redirects)
diff --git a/tempest/tests/lib/services/image/v2/test_resource_types_client.py b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
index 2e3b117..741b4eb 100644
--- a/tempest/tests/lib/services/image/v2/test_resource_types_client.py
+++ b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
@@ -17,7 +17,7 @@
from tempest.tests.lib.services import base
-class TestResouceTypesClient(base.BaseServiceTest):
+class TestResourceTypesClient(base.BaseServiceTest):
FAKE_LIST_RESOURCETYPES = {
"resource_types": [
{
@@ -49,21 +49,21 @@
}
def setUp(self):
- super(TestResouceTypesClient, self).setUp()
+ super(TestResourceTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = resource_types_client.ResourceTypesClient(fake_auth,
'image',
'regionOne')
- def _test_list_resouce_types(self, bytes_body=False):
+ def _test_list_resource_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_types,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_RESOURCETYPES,
bytes_body)
- def test_list_resouce_types_with_str_body(self):
- self._test_list_resouce_types()
+ def test_list_resource_types_with_str_body(self):
+ self._test_list_resource_types()
- def test_list_resouce_types_with_bytes_body(self):
- self._test_list_resouce_types(bytes_body=True)
+ def test_list_resource_types_with_bytes_body(self):
+ self._test_list_resource_types(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_agents_client.py b/tempest/tests/lib/services/network/test_agents_client.py
new file mode 100644
index 0000000..aabc6ce
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_agents_client.py
@@ -0,0 +1,37 @@
+# Copyright 2018 AT&T Corporation.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import agents_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAgentsClient(base.BaseServiceTest):
+
+ FAKE_AGENT_ID = "d32019d3-bc6e-4319-9c1d-6123f4135a88"
+
+ def setUp(self):
+ super(TestAgentsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.agents_client = agents_client.AgentsClient(
+ fake_auth, "network", "regionOne")
+
+ def test_delete_agent(self):
+ self.check_service_client_function(
+ self.agents_client.delete_agent,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ agent_id=self.FAKE_AGENT_ID)
diff --git a/tempest/tests/lib/services/network/test_versions_client.py b/tempest/tests/lib/services/network/test_versions_client.py
index 026dc6d..188fc31 100644
--- a/tempest/tests/lib/services/network/test_versions_client.py
+++ b/tempest/tests/lib/services/network/test_versions_client.py
@@ -12,63 +12,92 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-
from tempest.lib.services.network import versions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestNetworkVersionsClient(base.BaseServiceTest):
-
- FAKE_INIT_VERSION = {
- "version": {
- "id": "v2.0",
- "links": [
- {
- "href": "http://openstack.example.com/v2.0/",
- "rel": "self"
- },
- {
- "href": "http://docs.openstack.org/",
- "rel": "describedby",
- "type": "text/html"
- }
- ],
- "status": "CURRENT"
- }
- }
+ VERSION = "v2.0"
FAKE_VERSIONS_INFO = {
- "versions": [FAKE_INIT_VERSION["version"]]
- }
-
- FAKE_VERSION_INFO = copy.deepcopy(FAKE_INIT_VERSION)
-
- FAKE_VERSION_INFO["version"]["media-types"] = [
- {
- "base": "application/json",
- "type": "application/vnd.openstack.network+json;version=2.0"
- }
+ "versions": [
+ {
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://openstack.example.com/%s/" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT"
+ }
]
+ }
+
+ FAKE_VERSION_DETAILS = {
+ "resources": [
+ {
+ "collection": "subnets",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/subnets" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "subnet"
+ },
+ {
+ "collection": "networks",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/networks" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "network"
+ },
+ {
+ "collection": "ports",
+ "links": [
+ {
+ "href": "http://openstack.example.com:9696/"
+ "%s/ports" % VERSION,
+ "rel": "self"
+ }
+ ],
+ "name": "port"
+ }
+ ]
+ }
def setUp(self):
super(TestNetworkVersionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
- self.versions_client = (
- versions_client.NetworkVersionsClient
- (fake_auth, 'compute', 'regionOne'))
+ self.versions_client = versions_client.NetworkVersionsClient(
+ fake_auth, 'compute', 'regionOne')
- def _test_versions_client(self, bytes_body=False):
+ def _test_versions_client(self, func, body, bytes_body=False, **kwargs):
self.check_service_client_function(
- self.versions_client.list_versions,
- 'tempest.lib.common.rest_client.RestClient.raw_request',
- self.FAKE_VERSIONS_INFO,
- bytes_body,
- 200)
+ func, 'tempest.lib.common.rest_client.RestClient.raw_request',
+ body, bytes_body, 200, **kwargs)
def test_list_versions_client_with_str_body(self):
- self._test_versions_client()
+ self._test_versions_client(self.versions_client.list_versions,
+ self.FAKE_VERSIONS_INFO)
def test_list_versions_client_with_bytes_body(self):
- self._test_versions_client(bytes_body=True)
+ self._test_versions_client(self.versions_client.list_versions,
+ self.FAKE_VERSIONS_INFO, bytes_body=True)
+
+ def test_show_version_client_with_str_body(self):
+ self._test_versions_client(self.versions_client.show_version,
+ self.FAKE_VERSION_DETAILS,
+ version=self.VERSION)
+
+ def test_show_version_client_with_bytes_body(self):
+ self._test_versions_client(self.versions_client.show_version,
+ self.FAKE_VERSION_DETAILS, bytes_body=True,
+ version=self.VERSION)
diff --git a/tempest/tests/lib/services/placement/__init__.py b/tempest/tests/lib/services/placement/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/lib/services/placement/__init__.py
diff --git a/tempest/tests/lib/services/placement/test_placement_client.py b/tempest/tests/lib/services/placement/test_placement_client.py
new file mode 100644
index 0000000..1396a85
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_placement_client.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement import placement_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestPlacementClient(base.BaseServiceTest):
+ FAKE_ALLOCATION_CANDIDATES = {
+ 'allocation_requests': [
+ {'allocations': {
+ 'rp-uuid': {'resources': {'VCPU': 42}}
+ }}
+ ],
+ 'provider_summaries': {
+ 'rp-uuid': {
+ 'resources': {
+ 'VCPU': {'used': 0, 'capacity': 64},
+ 'MEMORY_MB': {'capacity': 11196, 'used': 0},
+ 'DISK_GB': {'capacity': 19, 'used': 0}
+ },
+ 'traits': ["HW_CPU_X86_SVM"],
+ }
+ }
+ }
+
+ FAKE_ALLOCATIONS = {
+ 'allocations': {
+ 'rp-uuid-1': {
+ 'resources': {
+ 'NET_BW_IGR_KILOBIT_PER_SEC': 1
+ },
+ 'generation': 14
+ },
+ 'rp-uuid2': {
+ 'resources': {
+ 'MEMORY_MB': 256,
+ 'VCPU': 1
+ },
+ 'generation': 9
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestPlacementClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = placement_client.PlacementClient(
+ fake_auth, 'placement', 'regionOne')
+
+ def _test_list_allocation_candidates(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocation_candidates,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATION_CANDIDATES,
+ to_utf=bytes_body,
+ **{'resources1': 'NET_BW_IGR_KILOBIT_PER_SEC:1'})
+
+ def test_list_allocation_candidates_with_str_body(self):
+ self._test_list_allocation_candidates()
+
+ def test_list_allocation_candidates_with_bytes_body(self):
+ self._test_list_allocation_candidates(bytes_body=True)
+
+ def _test_list_allocations(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_allocations,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ALLOCATIONS,
+ to_utf=bytes_body,
+ **{'consumer_uuid': 'foo-bar'})
+
+ def test_list_allocations_with_str_body(self):
+ self._test_list_allocations()
+
+ def test_list_allocations_with_bytes_body(self):
+ self._test_list_allocations(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_hosts_client.py b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
index 67ae4fd..09bc0b1 100644
--- a/tempest/tests/lib/services/volume/v3/test_hosts_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_hosts_client.py
@@ -18,7 +18,7 @@
from tempest.tests.lib.services import base
-class TestQuotasClient(base.BaseServiceTest):
+class TestHostsClient(base.BaseServiceTest):
FAKE_LIST_HOSTS = {
"hosts": [
{
@@ -66,7 +66,7 @@
}
def setUp(self):
- super(TestQuotasClient, self).setUp()
+ super(TestHostsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = hosts_client.HostsClient(fake_auth,
'volume',
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
new file mode 100644
index 0000000..7021a3f
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -0,0 +1,281 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.volume.v3 import types_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestTypesClient(base.BaseServiceTest):
+ FAKE_CREATE_VOLUME_TYPE = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'name': 'vol-type-001',
+ 'description': 'volume type 0001',
+ 'is_public': True,
+ 'os-volume-type-access:is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_DEFAULT_VOLUME_TYPE_INFO = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'qos_specs_id': None,
+ 'name': 'volume-type-test',
+ 'description': 'default volume type',
+ 'is_public': True,
+ 'os-volume-type-access:is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_UPDATE_VOLUME_TYPE = {
+ 'volume_type': {
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'qos_specs_id': None,
+ 'name': 'volume-type-test',
+ 'description': 'default volume type',
+ 'os-volume-type-access:is_public': True,
+ 'is_public': True,
+ 'extra_specs': {
+ 'volume_backend_name': 'rbd'
+ }
+ }
+ }
+
+ FAKE_VOLUME_TYPES = {
+ 'volume_types': [
+ {
+ 'name': 'volume_type01',
+ 'qos_specs_id': None,
+ 'extra_specs': {
+ 'volume_backend_name': 'lvmdriver-1'
+ },
+ 'os-volume-type-access:is_public': True,
+ 'is_public': True,
+ 'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
+ 'description': None
+ },
+ {
+ 'name': 'volume_type02',
+ 'qos_specs_id': None,
+ 'extra_specs': {
+ 'volume_backend_name': 'lvmdriver-1'
+ },
+ 'os-volume-type-access:is_public': True,
+ 'is_public': True,
+ 'id': '8eb69a46-df97-4e41-9586-9a40a7533803',
+ 'description': None
+ }
+ ]
+ }
+
+ FAKE_VOLUME_TYPE_EXTRA_SPECS = {
+ 'extra_specs': {
+ 'capabilities': 'gpu'
+ }
+ }
+
+ FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS = {
+ 'capabilities': 'gpu'
+ }
+
+ FAKE_VOLUME_TYPE_ACCESS = {
+ 'volume_type_access': [{
+ 'volume_type_id': '3c67e124-39ad-4ace-a507-8bb7bf510c26',
+ 'project_id': 'f270b245cb11498ca4031deb7e141cfa'
+ }]
+ }
+
+ def setUp(self):
+ super(TestTypesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = types_client.TypesClient(fake_auth,
+ 'volume',
+ 'regionOne')
+
+ def _test_list_volume_types(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_volume_types,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPES,
+ bytes_body)
+
+ def _test_show_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+ to_utf=bytes_body,
+ volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
+
+ def _test_create_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_CREATE_VOLUME_TYPE,
+ to_utf=bytes_body,
+ name='volume-type-test')
+
+ def _test_delete_volume_type(self):
+ self.check_service_client_function(
+ self.client.delete_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_list_volume_types_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_volume_types_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_show_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_specs_name='capabilities',
+ to_utf=bytes_body)
+
+ def _test_create_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff",
+ extra_specs=self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body)
+
+ def _test_delete_volume_type_extra_specs(self):
+ self.check_service_client_function(
+ self.client.delete_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_spec_name='volume_backend_name')
+
+ def _test_update_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_UPDATE_VOLUME_TYPE,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ to_utf=bytes_body,
+ name='update-volume-type-test',
+ description='test update volume type description')
+
+ def _test_update_volume_type_extra_specs(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_volume_type_extra_specs,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ extra_spec_name='capabilities',
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
+ extra_specs=self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
+ to_utf=bytes_body)
+
+ def _test_add_type_access(self):
+ self.check_service_client_function(
+ self.client.add_type_access,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_remove_type_access(self):
+ self.check_service_client_function(
+ self.client.remove_type_access,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ {}, status=202,
+ volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
+
+ def _test_list_type_access(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_type_access,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_VOLUME_TYPE_ACCESS,
+ volume_type_id='3c67e124-39ad-4ace-a507-8bb7bf510c26',
+ to_utf=bytes_body)
+
+ def test_list_volume_types_with_str_body(self):
+ self._test_list_volume_types()
+
+ def test_list_volume_types_with_bytes_body(self):
+ self._test_list_volume_types(bytes_body=True)
+
+ def test_show_volume_type_with_str_body(self):
+ self._test_show_volume_type()
+
+ def test_show_volume_type_with_bytes_body(self):
+ self._test_show_volume_type(bytes_body=True)
+
+ def test_create_volume_type_str_body(self):
+ self._test_create_volume_type()
+
+ def test_create_volume_type_with_bytes_body(self):
+ self._test_create_volume_type(bytes_body=True)
+
+ def test_list_volume_types_extra_specs_with_str_body(self):
+ self._test_list_volume_types_extra_specs()
+
+ def test_list_volume_types_extra_specs_with_bytes_body(self):
+ self._test_list_volume_types_extra_specs(bytes_body=True)
+
+ def test_show_volume_type_extra_specs_with_str_body(self):
+ self._test_show_volume_type_extra_specs()
+
+ def test_show_volume_type_extra_specs_with_bytes_body(self):
+ self._test_show_volume_type_extra_specs(bytes_body=True)
+
+ def test_create_volume_type_extra_specs_with_str_body(self):
+ self._test_create_volume_type_extra_specs()
+
+ def test_create_volume_type_extra_specs_with_bytes_body(self):
+ self._test_create_volume_type_extra_specs(bytes_body=True)
+
+ def test_delete_volume_type_extra_specs(self):
+ self._test_delete_volume_type_extra_specs()
+
+ def test_update_volume_type_with_str_body(self):
+ self._test_update_volume_type()
+
+ def test_update_volume_type_with_bytes_body(self):
+ self._test_update_volume_type(bytes_body=True)
+
+ def test_delete_volume_type(self):
+ self._test_delete_volume_type()
+
+ def test_update_volume_type_extra_specs_with_str_body(self):
+ self._test_update_volume_type_extra_specs()
+
+ def test_update_volume_type_extra_specs_with_bytes_body(self):
+ self._test_update_volume_type_extra_specs(bytes_body=True)
+
+ def test_add_type_access(self):
+ self._test_add_type_access()
+
+ def test_remove_type_access(self):
+ self._test_remove_type_access()
+
+ def test_list_type_access_with_str_body(self):
+ self._test_list_type_access()
+
+ def test_list_type_access_with_bytes_body(self):
+ self._test_list_type_access(bytes_body=True)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 4eb78fb..3772774 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -74,7 +74,8 @@
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
-projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
+content = r.read().decode('utf-8')[4:]
+projects = sorted(filter(is_in_openstack_namespace, json.loads(content)))
# Retrieve projects having no deb, puppet, ui or spec namespace as those
# namespaces do not contains tempest plugins.
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index b27b23a..111c9ce 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -41,8 +41,6 @@
set -ex
(
-declare -A plugins
-
if [[ -r doc/source/data/tempest-plugins-registry.header ]]; then
cat doc/source/data/tempest-plugins-registry.header
fi
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 8b4f913..661329b 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -47,7 +47,7 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
# List of projects having tempest plugin stale or unmaintained from long time
-BLACKLIST="trio2o"
+BLACKLIST="networking-plumgrid,trio2o"
# Function to clone project using zuul-cloner or from git
function clone_project() {
@@ -105,8 +105,8 @@
# Function to run sanity check on each project
function plugin_sanity_check() {
- clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
- && uninstall_project "$1" && "$TVENV" pip install .
+ clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
+ && uninstall_project "$1" && "$TVENV" pip install .
}
# Log status
diff --git a/tox.ini b/tox.ini
index 65960b0..4068054 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,7 +20,7 @@
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=160
PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
usedevelop = True
install_command = pip install {opts} {packages}
whitelist_externals = *
@@ -48,6 +48,10 @@
coverage xml -o cover/coverage.xml
coverage report
+[testenv:debug]
+basepython = python3
+commands = oslo_debug_helper -t tempest/tests {posargs}
+
[testenv:all]
envdir = .tox/tempest
sitepackages = {[tempestenv]sitepackages}
@@ -61,6 +65,26 @@
tempest run --regex {posargs}
[testenv:all-plugin]
+# DEPRECATED
+# NOTE(andreaf) The all-plugin tox env uses sitepackages
+# so that plugins installed outsite of Tempest virtual environment
+# can be discovered. After the implementation during the Queens
+# release cycle of the goal of moving Tempest plugins in dedicated
+# git repos, this environment should not be used anymore. "all"
+# should be used instead with the appropriate regex filtering.
+sitepackages = True
+# 'all' includes slow tests
+setenv =
+ {[tempestenv]setenv}
+ OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
+deps = {[tempestenv]deps}
+commands =
+ echo "WARNING: The all-plugin env is deprecated and will be removed"
+ echo "WARNING Please use the 'all' environment for Tempest plugins."
+ find . -type f -name "*.pyc" -delete
+ tempest run --regex {posargs}
+
+[testenv:all-site-packages]
sitepackages = True
# 'all' includes slow tests
setenv =
@@ -162,6 +186,7 @@
commands = {posargs}
[testenv:docs]
+basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
@@ -172,6 +197,7 @@
whitelist_externals = rm
[testenv:pep8]
+basepython = python3
commands =
flake8 {posargs}
check-uuid
@@ -196,6 +222,7 @@
import-order-style = pep8
[testenv:releasenotes]
+basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt