Merge "Remove grenade jobs from integrated-gate-networking"
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 3ac2b46..3b44ded 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -560,8 +560,17 @@
'tagging metadata was not checked in the '
'metadata API')
return True
+
cmd = 'curl %s' % md_url
- md_json = ssh_client.exec_command(cmd)
+ try:
+ md_json = ssh_client.exec_command(cmd)
+ except lib_exc.SSHExecCommandFailed:
+ # NOTE(eolivare): We cannot guarantee that the metadata service
+ # is available right after the VM is ssh-able, because it could
+ # obtain authorized ssh keys from config_drive or it could use
+ # password. Hence, retries may be needed.
+ LOG.exception('metadata service not available yet')
+ return False
return verify_method(md_json)
# NOTE(gmann) Keep refreshing the metadata info until the metadata
# cache is refreshed. For safer side, we will go with wait loop of
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 37783b8..6c472a6 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -109,6 +109,57 @@
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
+ @decorators.idempotent_id('aab68903-cc9f-493a-b17e-b387db3e4e44')
+ @utils.requires_ext(extension='account_quotas', service='object')
+ def test_storage_policy_quota_limit(self):
+ """Verify quota limits are enforced per storage policy"""
+ policy_names = [p["name"] for p in self.policies]
+ if 'silver' not in policy_names:
+ raise self.skipException("Missing storage policy 'silver'")
+
+ policy_quota = 10
+ policy_quota_header = {
+ "X-Account-Quota-Bytes-Policy-silver": str(policy_quota)
+ }
+ self.account_client.auth_provider.set_alt_auth_data(
+ request_part='headers',
+ auth_data=self.reselleradmin_auth_data
+ )
+ self.os_roles_operator.account_client.request(
+ "POST", url="", headers=policy_quota_header, body=""
+ )
+
+ # Create a new container using the "silver" storage policy
+ silver_container = data_utils.rand_name("silver-container")
+ headers = {'X-Storage-Policy': 'silver'}
+ self.container_client.create_container(
+ silver_container, **headers
+ )
+
+ # Try uploading an object larger than the quota
+ large_data = data_utils.arbitrary_string(size=policy_quota + 1)
+ object_name = data_utils.rand_name(name='large_object')
+ self.assertRaises(
+ lib_exc.OverLimit,
+ self.object_client.create_object,
+ silver_container,
+ object_name,
+ large_data
+ )
+
+ # Upload same large object to default container
+ default_container = data_utils.rand_name(
+ "default_container"
+ )
+ self.container_client.create_container(default_container)
+ default_object = data_utils.rand_name(name='default_object')
+ resp, _ = self.object_client.create_object(
+ default_container,
+ default_object,
+ large_data
+ )
+ self.assertHeaders(resp, 'Object', 'PUT')
+
@decorators.attr(type=["smoke"])
@decorators.idempotent_id('63f51f9f-5f1d-4fc6-b5be-d454d70949d6')
@utils.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index 055dcb6..faff6f9 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -152,21 +152,36 @@
min_kbps=self.BANDWIDTH_2
)
- def _create_network_and_qos_policies(self, policy_method):
- physnet_name = CONF.network_feature_enabled.qos_placement_physnet
- base_segm = \
- CONF.network_feature_enabled.provider_net_base_segmentation_id
-
- self.prov_network, _, _ = self.setup_network_subnet_with_router(
- networks_client=self.networks_client,
- routers_client=self.routers_client,
- subnets_client=self.subnets_client,
+ def _use_or_create_network_and_qos_policies(self, policy_method):
+ vlan_ext_nets = self.networks_client.list_networks(
**{
- 'shared': True,
'provider:network_type': 'vlan',
- 'provider:physical_network': physnet_name,
- 'provider:segmentation_id': base_segm
- })
+ 'router:external': True}
+ )['networks']
+ if vlan_ext_nets:
+ self.prov_network = vlan_ext_nets[0]
+ if not self.prov_network['shared']:
+ self.prov_network = self.networks_client.update_network(
+ self.prov_network['id'], shared=True)['network']
+ self.addClassResourceCleanup(
+ self.networks_client.update_network,
+ self.prov_network['id'],
+ shared=False)
+ else:
+ physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+ base_segm = \
+ CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+ self.prov_network, _, _ = self.setup_network_subnet_with_router(
+ networks_client=self.networks_client,
+ routers_client=self.routers_client,
+ subnets_client=self.subnets_client,
+ **{
+ 'shared': True,
+ 'provider:network_type': 'vlan',
+ 'provider:physical_network': physnet_name,
+ 'provider:segmentation_id': base_segm
+ })
policy_method()
@@ -261,7 +276,8 @@
* Create port with invalid QoS policy, and try to boot VM with that,
it should fail.
"""
- self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ self._use_or_create_network_and_qos_policies(
+ self._create_qos_basic_policies)
server1, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server1['id'],
@@ -297,7 +313,8 @@
* If the VM goes to ACTIVE state check that allocations are as
expected.
"""
- self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ self._use_or_create_network_and_qos_policies(
+ self._create_qos_basic_policies)
server, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server['id'],
@@ -335,7 +352,8 @@
* If the VM goes to ACTIVE state check that allocations are as
expected.
"""
- self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ self._use_or_create_network_and_qos_policies(
+ self._create_qos_basic_policies)
server, valid_port = self._boot_vm_with_min_bw(
qos_policy_id=self.qos_policy_valid['id'])
self._assert_allocation_is_as_expected(server['id'],
@@ -378,7 +396,7 @@
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
- self._create_network_and_qos_policies(
+ self._use_or_create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
@@ -432,7 +450,7 @@
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
- self._create_network_and_qos_policies(
+ self._use_or_create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(self.prov_network['id'])
@@ -457,7 +475,7 @@
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
- self._create_network_and_qos_policies(
+ self._use_or_create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
@@ -479,7 +497,7 @@
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
- self._create_network_and_qos_policies(
+ self._use_or_create_network_and_qos_policies(
self._create_qos_policies_from_life)
port1 = self.create_port(
@@ -506,7 +524,7 @@
if not utils.is_network_feature_enabled('update_port_qos'):
raise self.skipException("update_port_qos feature is not enabled")
- self._create_network_and_qos_policies(
+ self._use_or_create_network_and_qos_policies(
self._create_qos_policies_from_life)
port = self.create_port(
@@ -552,7 +570,7 @@
direction=self.EGRESS_DIRECTION,
)
- self._create_network_and_qos_policies(create_policies)
+ self._use_or_create_network_and_qos_policies(create_policies)
port = self.create_port(
self.prov_network['id'],
diff --git a/test-requirements.txt b/test-requirements.txt
index b925921..f599d53 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,4 +1,4 @@
hacking>=7.0.0,<7.1.0
coverage!=4.4,>=4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
-flake8-import-order>=0.18.0,<0.19.0 # LGPLv3
+flake8-import-order>=0.19.0 # LGPLv3
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 2e8ced5..0690d57 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -79,6 +79,10 @@
# No changes are merging in this
# https://review.opendev.org/q/project:x%252Fnetworking-fortinet
'x/networking-fortinet'
+ # It is broken and it use retired plugin 'patrol'. Last change done
+ # in this plugin was 7 years ago.
+ # https://opendev.org/airship/tempest-plugin
+ 'airship/tempest-plugin'
]
url = 'https://review.opendev.org/projects/'
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 154e4e8..2fc7aea 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -82,8 +82,8 @@
Former names for this job where:
* legacy-tempest-dsvm-py35
* gate-tempest-dsvm-py35
- required-projects:
- - openstack/horizon
+ # required-projects:
+ # - openstack/horizon
vars:
# NOTE(gmann): Default concurrency is higher (number of cpu -2) which
# end up 6 in upstream CI. Higher concurrency means high parallel
@@ -101,7 +101,11 @@
neutron: https://opendev.org/openstack/neutron
devstack_services:
# Enable horizon so that we can run horizon test.
- horizon: true
+ # horizon: true
+ # FIXME(sean-k-mooney): restore horizon deployment
+ # once horizon does not depend on setuptools to provide
+ # pkg_resources or bug #2141277 is resolved by other means
+ horizon: false
- job:
name: tempest-full-centos-9-stream