Merge "Document branch support policy for extended maint branches"
diff --git a/.zuul.yaml b/.zuul.yaml
index 8dcb935..3fee6ff 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -101,6 +101,19 @@
Base integration test with Neutron networking and py36.
voting: false
+# TODO(gmann): needs to migrate this to zuulv3
+- job:
+ name: tempest-scenario-multinode-lvm-multibackend
+ parent: legacy-dsvm-base-multinode
+ run: playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
+ post-run: playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
+ timeout: 10800
+ required-projects:
+ - openstack-infra/devstack-gate
+ - openstack/neutron
+ - openstack/tempest
+ nodeset: ubuntu-xenial-2-node
+
- job:
name: tempest-full-queens
parent: tempest-full
@@ -199,6 +212,19 @@
- openstack/zaqar-tempest-plugin
- openstack/zun-tempest-plugin
+- job:
+ name: tempest-cinder-v2-api
+ parent: devstack-tempest
+ branches:
+ - master
+ description: |
+ This job runs the cinder API test against v2 endpoint.
+ vars:
+ tox_envlist: all
+ tempest_test_regex: api.*volume
+ devstack_localrc:
+ TEMPEST_VOLUME_TYPE: volumev2
+
- project:
check:
jobs:
@@ -259,6 +285,17 @@
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
- tempest-tox-plugin-sanity-check
+ - tempest-scenario-multinode-lvm-multibackend:
+ voting: false
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
gate:
jobs:
- nova-multiattach
@@ -284,6 +321,16 @@
- ^setup.cfg$
- ^tempest/hacking/.*$
- ^tempest/tests/.*$
+ - tempest-cinder-v2-api:
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
periodic-stable:
jobs:
- tempest-full-queens
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 3bc1d0c..fa76770 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -354,6 +354,10 @@
.. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+ * `2.49`_
+
+ .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+
* `2.54`_
.. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id4
diff --git a/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml b/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
new file mode 100644
index 0000000..e07f551
--- /dev/null
+++ b/playbooks/tempest-scenario-multinode-lvm-multibackend/post.yaml
@@ -0,0 +1,15 @@
+- hosts: primary
+ tasks:
+
+ - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
+ synchronize:
+ src: '{{ ansible_user_dir }}/workspace/'
+ dest: '{{ zuul.executor.log_root }}'
+ mode: pull
+ copy_links: true
+ verify_host: true
+ rsync_opts:
+ - --include=/logs/**
+ - --include=*/
+ - --exclude=*
+ - --prune-empty-dirs
diff --git a/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml b/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
new file mode 100644
index 0000000..03f64f9
--- /dev/null
+++ b/playbooks/tempest-scenario-multinode-lvm-multibackend/run.yaml
@@ -0,0 +1,65 @@
+- hosts: primary
+ name: Autoconverted job tempest-scenario-multinode-lvm-multibackend
+ from old job gate-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend-ubuntu-xenial-nv
+ tasks:
+
+ - name: Ensure legacy workspace directory
+ file:
+ path: '{{ ansible_user_dir }}/workspace'
+ state: directory
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ cat > clonemap.yaml << EOF
+ clonemap:
+ - name: openstack-infra/devstack-gate
+ dest: devstack-gate
+ EOF
+ /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
+ git://git.openstack.org \
+ openstack-infra/devstack-gate
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ cat << 'EOF' >>"/tmp/dg-local.conf"
+ [[local|localrc]]
+ ENABLE_IDENTITY_V2=False
+ TEMPEST_USE_TEST_ACCOUNTS=True
+ # Enable lvm multiple backends to run multi backend slow scenario tests.
+ # Note: multi backend experimental job exclude the slow scenario tests.
+ CINDER_ENABLED_BACKENDS=lvm:lvmdriver-1,lvm:lvmdriver-2
+
+ EOF
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
+
+ - shell:
+ cmd: |
+ set -e
+ set -x
+ export PYTHONUNBUFFERED=true
+ export DEVSTACK_GATE_TEMPEST=1
+ # Run scenario and nova migration tests with concurrency 2
+ export DEVSTACK_GATE_TEMPEST_REGEX='(^tempest\.(scenario|api\.compute\.admin\.test_(live_|)migration))'
+ export TEMPEST_CONCURRENCY=2
+ export DEVSTACK_GATE_NEUTRON=1
+ export DEVSTACK_GATE_TLSPROXY=1
+ export BRANCH_OVERRIDE=default
+ if [ "$BRANCH_OVERRIDE" != "default" ] ; then
+ export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
+ fi
+ export DEVSTACK_GATE_TOPOLOGY="multinode"
+
+ cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
+ ./safe-devstack-vm-gate-wrap.sh
+ executable: /bin/bash
+ chdir: '{{ ansible_user_dir }}/workspace'
+ environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/tempest/README.rst b/tempest/README.rst
index 62821de..a5f4a92 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -12,12 +12,12 @@
and guidelines. Below is the overview of the Tempest respository structure
to make this clear.
- .. code-block:: console
+.. code-block:: console
- tempest/
- api/ - API tests
- scenario/ - complex scenario tests
- tests/ - unit tests for Tempest internals
+ tempest/
+ api/ - API tests
+ scenario/ - complex scenario tests
+ tests/ - unit tests for Tempest internals
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index c2bdf7e..df534bc 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -46,13 +46,19 @@
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.quotas_client.tenant_id
- cls.default_quota_set = set(('injected_file_content_bytes',
- 'metadata_items', 'injected_files',
- 'ram', 'floating_ips',
- 'fixed_ips', 'key_pairs',
- 'injected_file_path_bytes',
- 'instances', 'security_group_rules',
- 'cores', 'security_groups'))
+ cls.default_quota_set = set(('metadata_items', 'ram', 'key_pairs',
+ 'instances', 'cores',
+ 'server_group_members', 'server_groups'))
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['fixed_ips', 'floating_ips',
+ 'security_group_rules',
+ 'security_groups'])
+ if cls.is_requested_microversion_compatible('2.56'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'injected_files'])
@decorators.idempotent_id('3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d')
def test_get_default_quotas(self):
@@ -69,13 +75,19 @@
# Admin can update all the resource quota limits for a tenant
default_quota_set = self.adm_client.show_default_quota_set(
self.demo_tenant_id)['quota_set']
- new_quota_set = {'injected_file_content_bytes': 20480,
- 'metadata_items': 256, 'injected_files': 10,
- 'ram': 10240, 'floating_ips': 20, 'fixed_ips': 10,
- 'key_pairs': 200, 'injected_file_path_bytes': 512,
- 'instances': 20, 'security_group_rules': 20,
- 'cores': 2, 'security_groups': 20,
- 'server_groups': 20, 'server_group_members': 20}
+ new_quota_set = {'metadata_items': 256, 'ram': 10240,
+ 'key_pairs': 200, 'instances': 20,
+ 'server_groups': 20,
+ 'server_group_members': 20, 'cores': 2}
+ if self.is_requested_microversion_compatible('2.35'):
+ new_quota_set.update({'fixed_ips': 10, 'floating_ips': 20,
+ 'security_group_rules': 20,
+ 'security_groups': 20})
+ if self.is_requested_microversion_compatible('2.56'):
+ new_quota_set.update({'injected_file_content_bytes': 20480,
+ 'injected_file_path_bytes': 512,
+ 'injected_files': 10})
+
# Update limits for all quota resources
quota_set = self.adm_client.update_quota_set(
self.demo_tenant_id,
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 5ef7ee4..f90ff92 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -22,12 +22,12 @@
CONF = config.CONF
-class QuotasAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
+class QuotasAdminNegativeTestBase(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def setup_clients(cls):
- super(QuotasAdminNegativeTestJSON, cls).setup_clients()
+ super(QuotasAdminNegativeTestBase, cls).setup_clients()
cls.client = cls.os_primary.quotas_client
cls.adm_client = cls.os_admin.quotas_client
cls.sg_client = cls.security_groups_client
@@ -35,7 +35,7 @@
@classmethod
def resource_setup(cls):
- super(QuotasAdminNegativeTestJSON, cls).resource_setup()
+ super(QuotasAdminNegativeTestBase, cls).resource_setup()
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
@@ -51,6 +51,9 @@
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
**{quota_item: default_quota_value})
+
+class QuotasAdminNegativeTest(QuotasAdminNegativeTestBase):
+
@decorators.attr(type=['negative'])
@decorators.idempotent_id('733abfe8-166e-47bb-8363-23dbd7ff3476')
def test_update_quota_normal_user(self):
@@ -85,6 +88,10 @@
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server)
+
+class QuotasSecurityGroupAdminNegativeTest(QuotasAdminNegativeTestBase):
+ max_microversion = '2.35'
+
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
diff --git a/tempest/api/compute/admin/test_security_group_default_rules.py b/tempest/api/compute/admin/test_security_group_default_rules.py
index f2f3b57..bca6a22 100644
--- a/tempest/api/compute/admin/test_security_group_default_rules.py
+++ b/tempest/api/compute/admin/test_security_group_default_rules.py
@@ -23,6 +23,7 @@
class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
+ max_microversion = '2.35'
@classmethod
# TODO(GMann): Once Bug# 1311500 is fixed, these test can run
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index ff9caa3..f0178aa 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -20,6 +20,7 @@
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
+ max_microversion = '2.35'
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 975728c..b377c0c 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -501,7 +501,7 @@
# is already detached.
pass
- def attach_volume(self, server, volume, device=None):
+ def attach_volume(self, server, volume, device=None, tag=None):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -510,10 +510,14 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
+ :param tag: Optional device role tag to apply to the volume.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
+ if tag:
+ attach_kwargs['tag'] = tag
+
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index 54a6da8..49125d1 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -22,6 +22,7 @@
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
+ max_microversion = '2.35'
@classmethod
def skip_checks(cls):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index b0d527c..3a85a86 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -13,12 +13,14 @@
# under the License.
import json
+import time
from oslo_log import log as logging
from tempest.api.compute import base
from tempest.common import utils
from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -31,18 +33,11 @@
LOG = logging.getLogger(__name__)
-class DeviceTaggingTest(base.BaseV2ComputeTest):
-
- min_microversion = '2.32'
- # NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
- # bug in the 2.32 microversion, tags on block devices only worked with the
- # 2.32 microversion specifically. And tags on networks only worked between
- # 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
- max_microversion = '2.32'
+class DeviceTaggingBase(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
- super(DeviceTaggingTest, cls).skip_checks()
+ super(DeviceTaggingBase, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron is required')
if not CONF.validation.run_validation:
@@ -54,7 +49,7 @@
@classmethod
def setup_clients(cls):
- super(DeviceTaggingTest, cls).setup_clients()
+ super(DeviceTaggingBase, cls).setup_clients()
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.subnets_client = cls.os_primary.subnets_client
@@ -64,7 +59,55 @@
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
- super(DeviceTaggingTest, cls).setup_credentials()
+ super(DeviceTaggingBase, cls).setup_credentials()
+
+ def verify_metadata_from_api(self, server, ssh_client, verify_method):
+ md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ LOG.info('Attempting to verify tagged devices in server %s via '
+ 'the metadata service: %s', server['id'], md_url)
+
+ def get_and_verify_metadata():
+ try:
+ ssh_client.exec_command('curl -V')
+ except exceptions.SSHExecCommandFailed:
+ if not CONF.compute_feature_enabled.config_drive:
+ raise self.skipException('curl not found in guest '
+ 'and config drive is '
+ 'disabled')
+ LOG.warning('curl was not found in the guest, device '
+ 'tagging metadata was not checked in the '
+ 'metadata API')
+ return True
+ cmd = 'curl %s' % md_url
+ md_json = ssh_client.exec_command(cmd)
+ verify_method(md_json)
+ return True
+
+ if not test_utils.call_until_true(get_and_verify_metadata,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise exceptions.TimeoutException('Timeout while verifying '
+ 'metadata on server.')
+
+ def verify_metadata_on_config_drive(self, server, ssh_client,
+ verify_method):
+ LOG.info('Attempting to verify tagged devices in server %s via '
+ 'the config drive.', server['id'])
+ ssh_client.mount_config_drive()
+ cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
+ md_json = ssh_client.exec_command(cmd_md)
+ verify_method(md_json)
+ ssh_client.unmount_config_drive()
+
+
+class TaggedBootDevicesTest(DeviceTaggingBase):
+
+ min_microversion = '2.32'
+ # NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
+ # bug in the 2.32 microversion, tags on block devices only worked with the
+ # 2.32 microversion specifically. And tags on networks only worked between
+ # 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
+ max_microversion = '2.32'
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
@@ -92,7 +135,7 @@
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
- def test_device_tagging(self):
+ def test_tagged_boot_devices(self):
# Create volumes
# The create_volume methods waits for the volumes to be available and
# the base class will clean them up on tearDown.
@@ -134,7 +177,6 @@
self.addCleanup(self.ports_client.delete_port, self.port2['id'])
# Create server
- admin_pass = data_utils.rand_password()
config_drive_enabled = CONF.compute_feature_enabled.config_drive
validation_resources = self.get_test_validation_resources(
self.os_primary)
@@ -144,7 +186,6 @@
wait_until='ACTIVE',
validation_resources=validation_resources,
config_drive=config_drive_enabled,
- adminPass=admin_pass,
name=data_utils.rand_name('device-tagging-server'),
networks=[
# Validation network for ssh
@@ -209,11 +250,10 @@
self.addCleanup(self.delete_server, server['id'])
server = self.servers_client.show_server(server['id'])['server']
- self.ssh_client = remote_client.RemoteClient(
+ ssh_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
- admin_pass,
- validation_resources['keypair']['private_key'],
+ pkey=validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
@@ -233,46 +273,104 @@
self.assertTrue(self.net_2_100_mac)
self.assertTrue(self.net_2_200_mac)
- # Verify metadata from metadata service
+ # Verify metadata from metadata API
if CONF.compute_feature_enabled.metadata_service:
- md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
- LOG.info('Attempting to verify tagged devices in server %s via '
- 'the metadata service: %s', server['id'], md_url)
-
- def get_and_verify_metadata():
- try:
- self.ssh_client.exec_command('curl -V')
- except exceptions.SSHExecCommandFailed:
- if not CONF.compute_feature_enabled.config_drive:
- raise self.skipException('curl not found in guest '
- 'and config drive is '
- 'disabled')
- LOG.warning('curl was not found in the guest, device '
- 'tagging metadata was not checked in the '
- 'metadata API')
- return True
- cmd = 'curl %s' % md_url
- md_json = self.ssh_client.exec_command(cmd)
- self.verify_device_metadata(md_json)
- return True
-
- if not test_utils.call_until_true(get_and_verify_metadata,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- raise exceptions.TimeoutException('Timeout while verifying '
- 'metadata on server.')
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_device_metadata)
# Verify metadata on config drive
if CONF.compute_feature_enabled.config_drive:
- LOG.info('Attempting to verify tagged devices in server %s via '
- 'the config drive.', server['id'])
- self.ssh_client.mount_config_drive()
- cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
- md_json = self.ssh_client.exec_command(cmd_md)
- self.verify_device_metadata(md_json)
- self.ssh_client.unmount_config_drive()
+ self.verify_metadata_on_config_drive(server, ssh_client,
+ self.verify_device_metadata)
-class DeviceTaggingTestV2_42(DeviceTaggingTest):
+class TaggedBootDevicesTest_v242(TaggedBootDevicesTest):
min_microversion = '2.42'
max_microversion = 'latest'
+
+
+class TaggedAttachmentsTest(DeviceTaggingBase):
+
+ min_microversion = '2.49'
+ max_microversion = 'latest'
+
+ @classmethod
+ def skip_checks(cls):
+ super(TaggedAttachmentsTest, cls).skip_checks()
+ if not CONF.compute_feature_enabled.metadata_service:
+ raise cls.skipException('Metadata API must be enabled')
+
+ def verify_device_metadata(self, md_json):
+ md_dict = json.loads(md_json)
+ found_devices = [d['tags'][0] for d in md_dict['devices']]
+ self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+
+ def verify_empty_devices(self, md_json):
+ md_dict = json.loads(md_json)
+ self.assertEmpty(md_dict['devices'])
+
+ @decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
+ @utils.services('network', 'volume', 'image')
+ def test_tagged_attachment(self):
+ # Create network
+ net = self.networks_client.create_network(
+ name=data_utils.rand_name(
+ 'tagged-attachments-test-net'))['network']
+ self.addCleanup(self.networks_client.delete_network, net['id'])
+
+ # Create subnet
+ subnet = self.subnets_client.create_subnet(
+ network_id=net['id'],
+ cidr='10.10.10.0/24',
+ ip_version=4)['subnet']
+ self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
+
+ # Create volume
+ volume = self.create_volume()
+
+ # Boot test server
+ config_drive_enabled = CONF.compute_feature_enabled.config_drive
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+
+ server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ config_drive=config_drive_enabled,
+ name=data_utils.rand_name('device-tagging-server'),
+ networks=[{'uuid': self.get_tenant_network()['id']}])
+ self.addCleanup(self.delete_server, server['id'])
+
+ # Attach tagged nic and volume
+ interface = self.interfaces_client.create_interface(
+ server['id'], net_id=net['id'],
+ tag='nic-tag')['interfaceAttachment']
+ self.attach_volume(server, volume, tag='volume-tag')
+
+ ssh_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ CONF.validation.image_ssh_user,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+
+ # NOTE(artom) The newly attached tagged nic won't appear in the
+ # metadata until the cache is refreshed. We wait 16 seconds since the
+ # default cache expiry is 15 seconds.
+ time.sleep(16)
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_device_metadata)
+
+ # Detach tagged nic and volume
+ self.servers_client.detach_volume(server['id'], volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ self.interfaces_client.delete_interface(server['id'],
+ interface['port_id'])
+ waiters.wait_for_interface_detach(self.interfaces_client,
+ server['id'],
+ interface['port_id'])
+ # NOTE(artom) More waiting until metadata cache is refreshed.
+ time.sleep(16)
+ self.verify_metadata_from_api(server, ssh_client,
+ self.verify_empty_devices)
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 7cf90ae..a62492d 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -43,14 +43,19 @@
super(QuotasTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
cls.user_id = cls.client.user_id
- cls.default_quota_set = set(('injected_file_content_bytes',
- 'metadata_items', 'injected_files',
- 'ram', 'floating_ips',
- 'fixed_ips', 'key_pairs',
- 'injected_file_path_bytes',
- 'instances', 'security_group_rules',
- 'cores', 'security_groups',
+ cls.default_quota_set = set(('metadata_items', 'ram', 'key_pairs',
+ 'instances', 'cores',
'server_group_members', 'server_groups'))
+ if cls.is_requested_microversion_compatible('2.35'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['fixed_ips', 'floating_ips',
+ 'security_group_rules',
+ 'security_groups'])
+ if cls.is_requested_microversion_compatible('2.56'):
+ cls.default_quota_set = \
+ cls.default_quota_set | set(['injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'injected_files'])
@decorators.idempotent_id('f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107')
def test_get_quotas(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 7a74869..8618148 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -56,3 +56,16 @@
self.assertRaises(lib_exc.BadRequest,
self.attach_volume, server, volume)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('ee37a796-2afb-11e7-bc0f-fa163e65f5ce')
+ def test_attach_attached_volume_to_different_server(self):
+ server1 = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ self.attach_volume(server1, volume)
+
+ # Create server2 and attach in-use volume
+ server2 = self.create_test_server(wait_until='ACTIVE')
+ self.assertRaises(lib_exc.BadRequest,
+ self.attach_volume, server2, volume)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 08e2a12..0e86f05 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -287,3 +287,24 @@
raise lib_exc.TimeoutException(message)
return body
+
+
+def wait_for_interface_detach(client, server_id, port_id):
+ """Waits for an interface to be detached from a server."""
+ body = client.list_interfaces(server_id)['interfaceAttachments']
+ ports = [iface['port_id'] for iface in body]
+ start = int(time.time())
+
+ while port_id in ports:
+ time.sleep(client.build_interval)
+ body = client.list_interfaces(server_id)['interfaceAttachments']
+ ports = [iface['port_id'] for iface in body]
+ if port_id not in ports:
+ return body
+
+ timed_out = int(time.time()) - start >= client.build_timeout
+ if timed_out:
+ message = ('Interface %s failed to detach from server %s within '
+ 'the required time (%s s)' % (port_id, server_id,
+ client.build_timeout))
+ raise lib_exc.TimeoutException(message)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index bc197b5..eb1e2b6 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -72,3 +72,64 @@
mock_show.assert_has_calls([mock.call(volume_id),
mock.call(volume_id)])
mock_sleep.assert_called_once_with(1)
+
+
+class TestInterfaceWaiters(base.TestCase):
+ def setUp(self):
+ super(TestInterfaceWaiters, self).setUp()
+ self.client = mock.MagicMock()
+ self.client.build_timeout = 1
+ self.client.build_interval = 1
+
+ def _port_down(self):
+ return {'interfaceAttachment': {'port_state': 'DOWN'}}
+
+ def _port_active(self):
+ return {'interfaceAttachment': {'port_state': 'ACTIVE'}}
+
+ def test_wait_for_interface_status(self):
+ self.client.show_interface.side_effect = [self._port_down(),
+ self._port_active()]
+ with mock.patch.object(time, 'sleep') as sleep_mock:
+ start_time = int(time.time())
+ waiters.wait_for_interface_status(self.client, 'server_id',
+ 'port_id', 'ACTIVE')
+ end_time = int(time.time())
+ self.assertLess(end_time, (start_time + self.client.build_timeout))
+ sleep_mock.assert_called_once_with(self.client.build_interval)
+
+ def test_wait_for_interface_status_timeout(self):
+ time_mock = self.patch('time.time')
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ self.client.show_interface.return_value = self._port_down()
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_interface_status,
+ self.client, 'server_id', 'port_id', 'ACTIVE')
+
+ def _one_interface(self):
+ return {'interfaceAttachments': [{'port_id': 'port_one'}]}
+
+ def _two_interfaces(self):
+ return {'interfaceAttachments': [{'port_id': 'port_one'},
+ {'port_id': 'port_two'}]}
+
+ def test_wait_for_interface_detach(self):
+ self.client.list_interfaces.side_effect = [self._two_interfaces(),
+ self._one_interface()]
+ with mock.patch.object(time, 'sleep') as sleep_mock:
+ start_time = int(time.time())
+ waiters.wait_for_interface_detach(self.client, 'server_id',
+ 'port_two')
+ end_time = int(time.time())
+ self.assertLess(end_time, (start_time + self.client.build_timeout))
+ sleep_mock.assert_called_once_with(self.client.build_interval)
+
+ def test_wait_for_interface_detach_timeout(self):
+ time_mock = self.patch('time.time')
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ self.client.list_interfaces.return_value = self._one_interface()
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_interface_detach,
+ self.client, 'server_id', 'port_one')