Merge "Revert "Revert "[S-RBAC] Switch to new policies by default"""
diff --git a/neutron_tempest_plugin/api/admin/test_external_network_extension.py b/neutron_tempest_plugin/api/admin/test_external_network_extension.py
index cf6c44d..c4b55c9 100644
--- a/neutron_tempest_plugin/api/admin/test_external_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_external_network_extension.py
@@ -53,7 +53,7 @@
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_external',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
body = self.client2.list_networks()
networks_list = [n['id'] for n in body['networks']]
self.assertIn(net['id'], networks_list)
@@ -107,7 +107,7 @@
# changing wildcard to specific tenant should be okay since its the
# only one using the network
self.admin_client.update_rbac_policy(
- rbac_pol['id'], target_tenant=self.client2.tenant_id)
+ rbac_pol['id'], target_tenant=self.client2.project_id)
@decorators.idempotent_id('a5539002-5bdb-48b5-b124-e9eedd5975e6')
def test_external_conversion_on_policy_create(self):
@@ -115,7 +115,7 @@
self.admin_client.create_rbac_policy(
object_type='network', object_id=net_id,
action='access_as_external',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
body = self.admin_client.show_network(net_id)['network']
self.assertTrue(body['router:external'])
@@ -138,13 +138,13 @@
self.admin_client.create_rbac_policy(
object_type='network', object_id=net_id,
action='access_as_external',
- target_tenant=self.admin_client.tenant_id)
+ target_tenant=self.admin_client.project_id)
body = self.admin_client.show_network(net_id)['network']
self.assertTrue(body['router:external'])
policy2 = self.admin_client.create_rbac_policy(
object_type='network', object_id=net_id,
action='access_as_external',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
self.admin_client.delete_rbac_policy(policy2['rbac_policy']['id'])
body = self.admin_client.show_network(net_id)['network']
self.assertTrue(body['router:external'])
@@ -168,14 +168,14 @@
self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_external',
- target_tenant=self.admin_client.tenant_id)
+ target_tenant=self.admin_client.project_id)
self.create_subnet(net, client=self.admin_client, enable_dhcp=False)
with testtools.ExpectedException(lib_exc.NotFound):
self.create_floatingip(net['id'], client=self.client2)
self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_external',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
self.create_floatingip(net['id'], client=self.client2)
@decorators.idempotent_id('476be1e0-f72e-47dc-9a14-4435926bbe82')
@@ -185,7 +185,7 @@
self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_external',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
r = self.client2.create_router(
data_utils.rand_name('router'),
external_gateway_info={'network_id': net['id']})['router']
@@ -209,7 +209,7 @@
tenant = self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_external',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
# now we can delete the policy because the tenant has its own policy
# to allow it access
self.admin_client.delete_rbac_policy(wildcard['id'])
diff --git a/neutron_tempest_plugin/api/admin/test_networks.py b/neutron_tempest_plugin/api/admin/test_networks.py
index 74e72ef..17a8990 100644
--- a/neutron_tempest_plugin/api/admin/test_networks.py
+++ b/neutron_tempest_plugin/api/admin/test_networks.py
@@ -25,7 +25,7 @@
@decorators.idempotent_id('d3c76044-d067-4cb0-ae47-8cdd875c7f67')
@utils.requires_ext(extension="project-id", service="network")
def test_create_network_with_project(self):
- project_id = self.client.tenant_id # non-admin
+ project_id = self.client.project_id # non-admin
name = 'admin-created-with-project_id'
network = self.create_network(name, project_id=project_id,
@@ -43,7 +43,7 @@
@decorators.idempotent_id('8d21aaca-4364-4eb9-8b79-44b4fff6373b')
@utils.requires_ext(extension="project-id", service="network")
def test_create_network_with_project_and_tenant(self):
- project_id = self.client.tenant_id # non-admin
+ project_id = self.client.project_id # non-admin
name = 'created-with-project-and-tenant'
network = self.create_network(name, project_id=project_id,
@@ -62,7 +62,7 @@
@decorators.idempotent_id('08b92179-669d-45ee-8233-ef6611190809')
@utils.requires_ext(extension="project-id", service="network")
def test_create_network_with_project_and_other_tenant(self):
- project_id = self.client.tenant_id # non-admin
+ project_id = self.client.project_id # non-admin
other_tenant = uuidutils.generate_uuid()
name = 'created-with-project-and-other-tenant'
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index 1444b2d..5522194 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -214,7 +214,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
def test_network_only_visible_to_policy_target(self):
net = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)['network']
+ self.client.project_id)['network']
self.client.show_network(net['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
@@ -223,7 +223,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
def test_subnet_on_network_only_visible_to_policy_target(self):
sub = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)['subnet']
+ self.client.project_id)['subnet']
self.client.show_subnet(sub['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
@@ -232,11 +232,11 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
def test_policy_target_update(self):
res = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['policy'].pop('target_tenant')
@@ -246,16 +246,17 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-affefefef321')
def test_duplicate_policy_error(self):
res = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
- action='access_as_shared', target_tenant=self.client.tenant_id)
+ action='access_as_shared',
+ target_tenant=self.client.project_id)
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
def test_port_presence_prevents_network_rbac_policy_deletion(self):
res = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
port = self.create_port(res['network'])
# a port on the network should prevent the deletion of a policy
# required for it to exist
@@ -282,7 +283,7 @@
net = self.create_network() # owned by self.client
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
port = self.client2.create_port(network_id=net['id'])['port']
self.client.delete_port(port['id'])
@@ -300,7 +301,7 @@
self.client2.show_network(net['id'])
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
self.client2.show_network(net['id'])
self.assertIn(pol['rbac_policy'],
@@ -316,7 +317,7 @@
net = self.create_network()
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -326,7 +327,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
def test_policy_show(self):
res = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['policy']
p2 = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
@@ -344,11 +345,11 @@
pol1 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
pol2 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies']
res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies']
self.assertEqual(1, len(res1))
@@ -359,16 +360,17 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
def test_regular_client_blocked_from_sharing_anothers_network(self):
net = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)['network']
+ self.client.project_id)['network']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=self.client.tenant_id)
+ action='access_as_shared',
+ target_tenant=self.client.project_id)
@decorators.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb')
@utils.requires_ext(extension="quotas", service="network")
def test_rbac_policy_quota(self):
- quota = self.client.show_quotas(self.client.tenant_id)['quota']
+ quota = self.client.show_quotas(self.client.project_id)['quota']
max_policies = quota['rbac_policy']
self.assertGreater(max_policies, 0)
net = self.client.create_network(
@@ -391,7 +393,7 @@
# ensure it works on update as well
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.update_rbac_policy(pol['rbac_policy']['id'],
target_tenant='*')
@@ -405,14 +407,14 @@
target_tenant=net['project_id'])['rbac_policy']
port = self.create_port(net)
self.client.update_rbac_policy(self_share['id'],
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
self.client.delete_port(port['id'])
@utils.requires_ext(extension="standard-attr-revisions", service="network")
@decorators.idempotent_id('86c3529b-1231-40de-1234-89664291a4cb')
def test_rbac_bumps_network_revision(self):
resp = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
net_id = resp['network']['id']
rev = self.client.show_network(net_id)['network']['revision_number']
self.admin_client.create_rbac_policy(
@@ -426,7 +428,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
def test_filtering_works_with_rbac_records_present(self):
resp = self._make_admin_net_and_subnet_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
net = resp['network']['id']
sub = resp['subnet']['id']
self.admin_client.create_rbac_policy(
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 10821c1..b66fe0d 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -386,7 +386,7 @@
# Keep this network visible from current project
project_id = (kwargs.get('project_id') or
kwargs.get('tenant_id') or
- cls.client.tenant_id)
+ cls.client.project_id)
kwargs.update(project_id=project_id, tenant_id=project_id)
else:
# Use default client
diff --git a/neutron_tempest_plugin/api/test_address_groups.py b/neutron_tempest_plugin/api/test_address_groups.py
index 69f22d0..ee0064c 100644
--- a/neutron_tempest_plugin/api/test_address_groups.py
+++ b/neutron_tempest_plugin/api/test_address_groups.py
@@ -171,11 +171,11 @@
@decorators.idempotent_id('95f59a88-c47e-4dd9-a231-85f1782753a7')
def test_policy_target_update(self):
res = self._make_admin_ag_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['rbac_policy'].pop('target_tenant')
@@ -185,7 +185,7 @@
@decorators.idempotent_id('35a214c9-5c99-468f-9242-34d0529cabfa')
def test_secgrprule_presence_prevents_policy_rbac_policy_deletion(self):
res = self._make_admin_ag_shared_to_project_id(
- self.client2.tenant_id)
+ self.client2.project_id)
ag_id = res['address_group']['id']
security_group = self.create_security_group(client=self.client2)
protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
@@ -213,7 +213,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_address_group(ag['id'])
self.assertIn(rbac_policy,
@@ -228,7 +228,7 @@
ag = self._create_address_group()
self.admin_client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -238,7 +238,7 @@
@decorators.idempotent_id('20b2706b-1cea-4724-ab72-d7452ecb1fc4')
def test_rbac_policy_show(self):
res = self._make_admin_ag_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
object_type='address_group',
@@ -257,11 +257,11 @@
rbac_pol1 = self.admin_client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
rbac_pol2 = self.admin_client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
action='access_as_shared',
- target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ target_tenant=self.admin_client.project_id)['rbac_policy']
res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
'rbac_policies']
res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -274,12 +274,12 @@
@decorators.idempotent_id('a0f3a01a-e2c7-47d6-9385-0cd7a7f0c996')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
ag = self._make_admin_ag_shared_to_project_id(
- self.client.tenant_id)['address_group']
+ self.client.project_id)['address_group']
with testtools.ExpectedException(exceptions.BadRequest):
self.client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
# make sure the rbac-policy is invisible to the tenant for which it's
# being shared
@@ -292,7 +292,7 @@
self.admin_client.create_rbac_policy(
object_type='address_group', object_id=ag['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_address_group(ag['id'])
with testtools.ExpectedException(exceptions.NotFound):
self.client.update_address_group(ag['id'], name='new_name')
diff --git a/neutron_tempest_plugin/api/test_address_scopes.py b/neutron_tempest_plugin/api/test_address_scopes.py
index b8c143a..76592a0 100644
--- a/neutron_tempest_plugin/api/test_address_scopes.py
+++ b/neutron_tempest_plugin/api/test_address_scopes.py
@@ -86,8 +86,8 @@
show_addr_scope = body['address_scope']
self.assertIn('project_id', show_addr_scope)
self.assertIn('tenant_id', show_addr_scope)
- self.assertEqual(self.client.tenant_id, show_addr_scope['project_id'])
- self.assertEqual(self.client.tenant_id, show_addr_scope['tenant_id'])
+ self.assertEqual(self.client.project_id, show_addr_scope['project_id'])
+ self.assertEqual(self.client.project_id, show_addr_scope['tenant_id'])
@decorators.idempotent_id('85a259b2-ace6-4e32-9657-a9a392b452aa')
def test_tenant_update_address_scope(self):
@@ -142,11 +142,11 @@
@decorators.idempotent_id('038e999b-cd4b-4021-a9ff-ebb734f6e056')
def test_policy_target_update(self):
res = self._make_admin_as_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['rbac_policy'].pop('target_tenant')
@@ -156,7 +156,7 @@
@decorators.idempotent_id('798ac6c6-96cc-49ce-ba5c-c6eced7a09d3')
def test_subnet_pool_presence_prevents_rbac_policy_deletion(self):
res = self._make_admin_as_shared_to_project_id(
- self.client2.tenant_id)
+ self.client2.project_id)
snp = self.create_subnetpool(
data_utils.rand_name("rbac-address-scope"),
default_prefixlen=24, prefixes=['10.0.0.0/8'],
@@ -183,7 +183,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_address_scope(a_s['id'])
self.assertIn(rbac_policy,
@@ -198,7 +198,7 @@
a_s = self._create_address_scope(ip_version=4)
self.admin_client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -208,7 +208,7 @@
@decorators.idempotent_id('19cbd62e-c6c3-4495-98b9-b9c6c6c9c127')
def test_rbac_policy_show(self):
res = self._make_admin_as_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
object_type='address_scope',
@@ -227,11 +227,11 @@
rbac_pol1 = self.admin_client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
rbac_pol2 = self.admin_client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
action='access_as_shared',
- target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ target_tenant=self.admin_client.project_id)['rbac_policy']
res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
'rbac_policies']
res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -244,12 +244,12 @@
@decorators.idempotent_id('222a638d-819e-41a7-a3fe-550265c06e79')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
a_s = self._make_admin_as_shared_to_project_id(
- self.client.tenant_id)['address_scope']
+ self.client.project_id)['address_scope']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
# make sure the rbac-policy is invisible to the tenant for which it's
# being shared
diff --git a/neutron_tempest_plugin/api/test_networks.py b/neutron_tempest_plugin/api/test_networks.py
index c685256..d79b7ab 100644
--- a/neutron_tempest_plugin/api/test_networks.py
+++ b/neutron_tempest_plugin/api/test_networks.py
@@ -49,7 +49,7 @@
fields.append('mtu')
for key in fields:
self.assertEqual(network[key], self.network[key])
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
self.assertEqual(project_id, network['tenant_id'])
if utils.is_extension_enabled('project-id', 'network'):
self.assertEqual(project_id, network['project_id'])
@@ -76,7 +76,7 @@
@decorators.idempotent_id('0cc0552f-afaf-4231-b7a7-c2a1774616da')
@utils.requires_ext(extension="project-id", service="network")
def test_create_network_with_project(self):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
name = 'created-with-project_id'
network = self.create_network(name, project_id=project_id)
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 372bf1e..448f391 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -86,7 +86,7 @@
body = self.admin_client.show_qos_policy(policy['id'])
show_policy = body['policy']
self.assertIn('project_id', show_policy)
- self.assertEqual(self.admin_client.tenant_id,
+ self.assertEqual(self.admin_client.project_id,
show_policy['project_id'])
@decorators.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
@@ -134,10 +134,11 @@
@decorators.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6')
def test_policy_update(self):
- policy = self.create_qos_policy(name=self.policy_name,
- description='',
- shared=False,
- project_id=self.admin_client.tenant_id)
+ policy = self.create_qos_policy(
+ name=self.policy_name,
+ description='',
+ shared=False,
+ project_id=self.admin_client.project_id)
self.admin_client.update_qos_policy(policy['id'],
description='test policy desc2',
shared=True)
@@ -153,7 +154,7 @@
policy = self.create_qos_policy(name=self.policy_name,
description='',
shared=False,
- project_id=self.client.tenant_id)
+ project_id=self.client.project_id)
self.assertRaises(
exceptions.Forbidden,
self.client.update_qos_policy,
@@ -161,10 +162,11 @@
@decorators.idempotent_id('4ecfd7e7-47b6-4702-be38-be9235901a87')
def test_policy_update_forbidden_for_regular_tenants_foreign_policy(self):
- policy = self.create_qos_policy(name=self.policy_name,
- description='',
- shared=False,
- project_id=self.admin_client.tenant_id)
+ policy = self.create_qos_policy(
+ name=self.policy_name,
+ description='',
+ shared=False,
+ project_id=self.admin_client.project_id)
self.assertRaises(
exceptions.NotFound,
self.client.update_qos_policy,
@@ -172,10 +174,11 @@
@decorators.idempotent_id('ee263db4-009a-4641-83e5-d0e83506ba4c')
def test_shared_policy_update(self):
- policy = self.create_qos_policy(name=self.policy_name,
- description='',
- shared=True,
- project_id=self.admin_client.tenant_id)
+ policy = self.create_qos_policy(
+ name=self.policy_name,
+ description='',
+ shared=True,
+ project_id=self.admin_client.project_id)
self.admin_client.update_qos_policy(policy['id'],
description='test policy desc2')
@@ -396,7 +399,7 @@
name='test-policy-shared',
description='shared policy',
shared=True,
- project_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.project_id)
obtained_policy = self.client.show_qos_policy(policy['id'])['policy']
self.assertEqual(obtained_policy, policy)
@@ -438,11 +441,11 @@
def test_user_create_port_with_admin_qos_policy(self):
qos_policy = self.create_qos_policy(
name=self.policy_name,
- project_id=self.admin_client.tenant_id,
+ project_id=self.admin_client.project_id,
shared=False)
network = self.create_network(
'test network', client=self.admin_client,
- project_id=self.client.tenant_id,
+ project_id=self.client.project_id,
qos_policy_id=qos_policy['id'])
port = self.create_port(network)
self.assertEqual(network['id'], port['network_id'])
@@ -603,7 +606,7 @@
policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False,
- project_id=self.client.tenant_id)
+ project_id=self.client.project_id)
rule = self._create_qos_bw_limit_rule(
policy['id'],
{'max_kbps': 1, 'max_burst_kbps': 1})
@@ -618,7 +621,7 @@
name=self.policy_name,
description='test policy',
shared=False,
- project_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.project_id)
rule = self._create_qos_bw_limit_rule(
policy['id'], {'max_kbps': 1, 'max_burst_kbps': 1})
self.assertRaises(
@@ -797,7 +800,7 @@
qos_pol = self.create_qos_policy(
name=data_utils.rand_name('test-policy'),
description='test-shared-policy', shared=False,
- project_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.project_id)
self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies'])
# test update shared False -> True
@@ -805,8 +808,8 @@
qos_pol['shared'] = True
self.client2.show_qos_policy(qos_pol['id'])
rbac_pol = {'target_tenant': '*',
- 'tenant_id': self.admin_client.tenant_id,
- 'project_id': self.admin_client.tenant_id,
+ 'tenant_id': self.admin_client.project_id,
+ 'project_id': self.admin_client.project_id,
'object_type': 'qos_policy',
'object_id': qos_pol['id'],
'action': 'access_as_shared'}
@@ -829,7 +832,7 @@
def _create_net_bound_qos_rbacs(self):
res = self._make_admin_policy_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
qos_policy, rbac_for_client_tenant = res['policy'], res['rbac_policy']
# add a wildcard rbac rule - now the policy globally shared
@@ -862,7 +865,7 @@
@decorators.idempotent_id('2ace9adc-da6e-11e5-aafe-54ee756c66df')
def test_policy_sharing_with_wildcard_and_project_id(self):
res = self._make_admin_policy_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
qos_policy, rbac = res['policy'], res['rbac_policy']
qos_pol = self.client.show_qos_policy(qos_policy['id'])['policy']
self.assertTrue(qos_pol['shared'])
@@ -885,11 +888,11 @@
@decorators.idempotent_id('9f85c76a-a350-11e5-8ae5-54ee756c66df')
def test_policy_target_update(self):
res = self._make_admin_policy_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['rbac_policy'].pop('target_tenant')
@@ -899,7 +902,7 @@
@decorators.idempotent_id('a9b39f46-a350-11e5-97c7-54ee756c66df')
def test_network_presence_prevents_policy_rbac_policy_deletion(self):
res = self._make_admin_policy_shared_to_project_id(
- self.client2.tenant_id)
+ self.client2.project_id)
qos_policy_id = res['policy']['id']
self._create_network(qos_policy_id, self.client2)
# a network with shared qos-policy should prevent the deletion of an
@@ -922,7 +925,7 @@
# we can't update the policy to a different tenant
with testtools.ExpectedException(exceptions.Conflict):
self.admin_client.update_rbac_policy(
- wild['id'], target_tenant=self.client2.tenant_id)
+ wild['id'], target_tenant=self.client2.project_id)
@decorators.idempotent_id('b0fe87e8-a350-11e5-9f08-54ee756c66df')
def test_regular_client_shares_to_another_regular_client(self):
@@ -933,7 +936,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='qos_policy', object_id=policy['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_qos_policy(policy['id'])
self.assertIn(rbac_policy,
@@ -948,7 +951,7 @@
policy = self._create_qos_policy()
self.admin_client.create_rbac_policy(
object_type='qos_policy', object_id=policy['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -958,7 +961,7 @@
@decorators.idempotent_id('c10d993a-a350-11e5-9c7a-54ee756c66df')
def test_rbac_policy_show(self):
res = self._make_admin_policy_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
object_type='qos_policy', object_id=res['policy']['id'],
@@ -976,11 +979,11 @@
rbac_pol1 = self.admin_client.create_rbac_policy(
object_type='qos_policy', object_id=policy['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
rbac_pol2 = self.admin_client.create_rbac_policy(
object_type='qos_policy', object_id=policy['id'],
action='access_as_shared',
- target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ target_tenant=self.admin_client.project_id)['rbac_policy']
res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
'rbac_policies']
res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -993,12 +996,12 @@
@decorators.idempotent_id('cd7d755a-a350-11e5-a344-54ee756c66df')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
qos_policy = self._make_admin_policy_shared_to_project_id(
- self.client.tenant_id)['policy']
+ self.client.project_id)['policy']
with testtools.ExpectedException(exceptions.BadRequest):
self.client.create_rbac_policy(
object_type='qos_policy', object_id=qos_policy['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
# make sure the rbac-policy is invisible to the tenant for which it's
# being shared
@@ -1195,6 +1198,7 @@
dscp_policy_id = self.create_qos_policy(
name=self.policy_name,
description='test-qos-policy',
+ project_id=self.client.project_id,
shared=True)['id']
# Associate QoS to the network
diff --git a/neutron_tempest_plugin/api/test_revisions.py b/neutron_tempest_plugin/api/test_revisions.py
index 0d590f6..09bb3f1 100644
--- a/neutron_tempest_plugin/api/test_revisions.py
+++ b/neutron_tempest_plugin/api/test_revisions.py
@@ -344,7 +344,7 @@
def test_update_router_extra_attributes_bumps_revision(self):
# updates from CVR to CVR-HA are supported on every release,
# but only the admin can forcibly create a non-HA router
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'ha': False}
router = self.admin_client.create_router('r1', True,
**router_args)['router']
diff --git a/neutron_tempest_plugin/api/test_router_interface_fip.py b/neutron_tempest_plugin/api/test_router_interface_fip.py
index 4369838..5d8ab67 100644
--- a/neutron_tempest_plugin/api/test_router_interface_fip.py
+++ b/neutron_tempest_plugin/api/test_router_interface_fip.py
@@ -61,7 +61,7 @@
subnet1 = self.create_subnet(net1, cidr=cidr1)
self.create_router_interface(router1['id'], subnet1['id'])
net2 = self.admin_client.create_network(
- project_id=self.client.tenant_id,
+ project_id=self.client.project_id,
**{'router:external': True})['network']
self.networks.append(net2)
subnet2 = self.create_subnet(net2, cidr=cidr2)
diff --git a/neutron_tempest_plugin/api/test_routers.py b/neutron_tempest_plugin/api/test_routers.py
index 5e916f5..4179e6d 100644
--- a/neutron_tempest_plugin/api/test_routers.py
+++ b/neutron_tempest_plugin/api/test_routers.py
@@ -316,7 +316,7 @@
@decorators.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729')
def test_convert_distributed_router_back_to_centralized(self):
# Convert a centralized router to distributed firstly
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'distributed': False, 'ha': False}
router = self._create_admin_router(
data_utils.rand_name('router'), admin_state_up=False,
@@ -348,7 +348,7 @@
@decorators.idempotent_id('0ffb9973-0c1a-4b76-a1f2-060178057661')
def test_convert_centralized_router_to_distributed_extended(self):
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'distributed': False, 'ha': False}
router = self._create_admin_router(
data_utils.rand_name('router'), admin_state_up=True,
@@ -371,7 +371,7 @@
@decorators.idempotent_id('e9a8f55b-c535-44b7-8b0a-20af6a7c2921')
def test_convert_distributed_router_to_centralized_extended(self):
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'distributed': True, 'ha': False}
router = self._create_admin_router(
data_utils.rand_name('router'), admin_state_up=True,
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index 9c83fc7..b51485b 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -124,7 +124,7 @@
@decorators.idempotent_id('5379fe06-e45e-4a4f-8b4a-9e28a924b451')
def test_router_update_distributed_returns_exception(self):
# create a centralized router
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'distributed': False}
router = self._create_admin_router(
data_utils.rand_name('router'), admin_state_up=True,
@@ -141,7 +141,7 @@
@decorators.idempotent_id('c277e945-3b39-442d-b149-e2e8cc6a2b40')
def test_router_update_centralized_returns_exception(self):
# create a centralized router
- router_args = {'tenant_id': self.client.tenant_id,
+ router_args = {'tenant_id': self.client.project_id,
'distributed': False}
router = self._create_admin_router(
data_utils.rand_name('router'), admin_state_up=True,
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index e7a9eae..14e0c66 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -71,7 +71,7 @@
protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
security_group_rule = self.create_security_group_rule(
security_group=security_group,
- project={'id': self.admin_client.tenant_id},
+ project={'id': self.admin_client.project_id},
client=self.admin_client,
protocol=protocol,
direction=constants.INGRESS_DIRECTION)
@@ -97,7 +97,7 @@
protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
security_group_rule = self.create_security_group_rule(
security_group=security_group,
- project={'id': self.admin_client.tenant_id},
+ project={'id': self.admin_client.project_id},
client=self.admin_client,
protocol=protocol,
direction=constants.INGRESS_DIRECTION)
@@ -105,7 +105,7 @@
# Create also other SG with some custom rule to check that regular user
# can't see this rule
sg_kwargs = {
- 'project': {'id': self.admin_client.tenant_id},
+ 'project': {'id': self.admin_client.project_id},
'client': self.admin_client
}
if self.stateless_sg:
@@ -113,7 +113,7 @@
admin_security_group = self.create_security_group(**sg_kwargs)
admin_security_group_rule = self.create_security_group_rule(
security_group=admin_security_group,
- project={'id': self.admin_client.tenant_id},
+ project={'id': self.admin_client.project_id},
client=self.admin_client,
protocol=protocol,
direction=constants.INGRESS_DIRECTION)
@@ -272,18 +272,18 @@
def _set_sg_quota(self, val):
sg_quota = self._get_sg_quota()
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
self.admin_client.update_quotas(project_id, **{'security_group': val})
self.addCleanup(self.admin_client.update_quotas,
project_id, **{'security_group': sg_quota})
def _get_sg_quota(self):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
quotas = self.admin_client.show_quotas(project_id)
return quotas['quota']['security_group']
def _get_sg_amount(self):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
filter_query = {'project_id': project_id}
security_groups = self.client.list_security_groups(**filter_query)
return len(security_groups['security_groups'])
@@ -341,7 +341,7 @@
def _create_security_group_rules(self, amount, port_index=1):
for i in range(amount):
ingress_rule = self.create_security_group_rule(**{
- 'project_id': self.client.tenant_id,
+ 'project_id': self.client.project_id,
'direction': 'ingress',
'port_range_max': port_index + i,
'port_range_min': port_index + i,
@@ -364,18 +364,18 @@
return new_sg_rules_quota
def _set_sg_rules_quota(self, val):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
self.admin_client.update_quotas(project_id,
**{'security_group_rule': val})
LOG.info('Trying to update security group rule quota {} '.format(val))
def _get_sg_rules_quota(self):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
quotas = self.admin_client.show_quotas(project_id)
return quotas['quota']['security_group_rule']
def _get_sg_rules_amount(self):
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
filter_query = {'project_id': project_id}
security_group_rules = self.client.list_security_group_rules(
**filter_query)
@@ -390,7 +390,7 @@
def setUp(self):
super(SecGroupRulesQuotaTest, self).setUp()
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.admin_client.reset_quotas, self.client.tenant_id)
+ self.admin_client.reset_quotas, self.client.project_id)
self._set_sg_rules_quota(10)
@decorators.idempotent_id('77ec038c-5638-11ea-8e2d-0242ac130003')
@@ -416,7 +416,7 @@
values, different values.
"""
sg_rules_quota = self._get_sg_rules_quota()
- project_id = self.client.tenant_id
+ project_id = self.client.project_id
self.addCleanup(self.admin_client.update_quotas,
project_id, **{'security_group_rule': sg_rules_quota})
values = [-1, 0, 10, 2147483647]
@@ -569,7 +569,7 @@
def _create_security_group(self):
return self.create_security_group(
name=data_utils.rand_name('test-sg'),
- project={'id': self.admin_client.tenant_id})
+ project={'id': self.admin_client.project_id})
def _make_admin_sg_shared_to_project_id(self, project_id):
sg = self._create_security_group()
@@ -584,11 +584,11 @@
@decorators.idempotent_id('2a41eb8f-2a35-11e9-bae9-acde48001122')
def test_policy_target_update(self):
res = self._make_admin_sg_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['rbac_policy'].pop('target_tenant')
@@ -598,7 +598,7 @@
@decorators.idempotent_id('2a619a8a-2a35-11e9-90d9-acde48001122')
def test_port_presence_prevents_policy_rbac_policy_deletion(self):
res = self._make_admin_sg_shared_to_project_id(
- self.client2.tenant_id)
+ self.client2.project_id)
sg_id = res['security_group']['id']
net = self.create_network(client=self.client2)
port = self.client2.create_port(
@@ -623,7 +623,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='security_group', object_id=sg['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_security_group(sg['id'])
self.assertIn(rbac_policy,
@@ -638,7 +638,7 @@
sg = self._create_security_group()
self.admin_client.create_rbac_policy(
object_type='security_group', object_id=sg['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -648,7 +648,7 @@
@decorators.idempotent_id('2abf8f9e-2a35-11e9-85f7-acde48001122')
def test_rbac_policy_show(self):
res = self._make_admin_sg_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
object_type='security_group',
@@ -667,11 +667,11 @@
rbac_pol1 = self.admin_client.create_rbac_policy(
object_type='security_group', object_id=sg['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
rbac_pol2 = self.admin_client.create_rbac_policy(
object_type='security_group', object_id=sg['id'],
action='access_as_shared',
- target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ target_tenant=self.admin_client.project_id)['rbac_policy']
res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
'rbac_policies']
res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -684,12 +684,12 @@
@decorators.idempotent_id('2aff3900-2a35-11e9-96b3-acde48001122')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
sg = self._make_admin_sg_shared_to_project_id(
- self.client.tenant_id)['security_group']
+ self.client.project_id)['security_group']
with testtools.ExpectedException(exceptions.BadRequest):
self.client.create_rbac_policy(
object_type='security_group', object_id=sg['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
# make sure the rbac-policy is invisible to the tenant for which it's
# being shared
diff --git a/neutron_tempest_plugin/api/test_security_groups_negative.py b/neutron_tempest_plugin/api/test_security_groups_negative.py
index 224558c..07fc606 100644
--- a/neutron_tempest_plugin/api/test_security_groups_negative.py
+++ b/neutron_tempest_plugin/api/test_security_groups_negative.py
@@ -182,7 +182,7 @@
def setUp(self):
super(NegativeSecGroupRulesQuotaTest, self).setUp()
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.admin_client.reset_quotas, self.client.tenant_id)
+ self.admin_client.reset_quotas, self.client.project_id)
self._set_sg_rules_quota(10)
@decorators.idempotent_id('8336e6ea-2e0a-4a1a-8673-a6f81b577d57')
diff --git a/neutron_tempest_plugin/api/test_subnetpools.py b/neutron_tempest_plugin/api/test_subnetpools.py
index 38c721f..eaaee33 100644
--- a/neutron_tempest_plugin/api/test_subnetpools.py
+++ b/neutron_tempest_plugin/api/test_subnetpools.py
@@ -145,8 +145,8 @@
show_subnetpool = body['subnetpool']
self.assertIn('project_id', show_subnetpool)
self.assertIn('tenant_id', show_subnetpool)
- self.assertEqual(self.client.tenant_id, show_subnetpool['project_id'])
- self.assertEqual(self.client.tenant_id, show_subnetpool['tenant_id'])
+ self.assertEqual(self.client.project_id, show_subnetpool['project_id'])
+ self.assertEqual(self.client.project_id, show_subnetpool['tenant_id'])
@decorators.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c')
def test_tenant_update_subnetpool(self):
@@ -446,11 +446,11 @@
@decorators.idempotent_id('71b35ad0-51cd-40da-985d-89a51c95ec6a')
def test_policy_target_update(self):
res = self._make_admin_snp_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
- res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
- self.assertEqual(self.client2.tenant_id,
+ res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+ self.assertEqual(self.client2.project_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['rbac_policy'].pop('target_tenant')
@@ -460,7 +460,7 @@
@decorators.idempotent_id('451d9d38-65a0-4916-a805-1460d6a938d1')
def test_subnet_presence_prevents_rbac_policy_deletion(self):
res = self._make_admin_snp_shared_to_project_id(
- self.client2.tenant_id)
+ self.client2.project_id)
network = self.create_network(client=self.client2)
subnet = self.client2.create_subnet(
network_id=network['id'],
@@ -491,7 +491,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='address_scope', object_id=a_s['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
# Create subnet pool owned by client with shared AS
snp = self._create_subnetpool(address_scope_id=a_s["id"])
@@ -500,7 +500,7 @@
self.client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id
+ target_tenant=self.client2.project_id
)
# cleanup
@@ -517,7 +517,7 @@
rbac_policy = self.admin_client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
action='access_as_shared',
- target_tenant=self.client.tenant_id)['rbac_policy']
+ target_tenant=self.client.project_id)['rbac_policy']
self.client.show_subnetpool(snp['id'])
self.assertIn(rbac_policy,
@@ -532,7 +532,7 @@
snp = self._create_subnetpool()
self.admin_client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
- action='access_as_shared', target_tenant=self.client2.tenant_id)
+ action='access_as_shared', target_tenant=self.client2.project_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('project_id', 'target_tenant'))
for fields in field_args:
@@ -542,7 +542,7 @@
@decorators.idempotent_id('e59e4502-4e6a-4e49-b446-a5d5642bbd69')
def test_rbac_policy_show(self):
res = self._make_admin_snp_shared_to_project_id(
- self.client.tenant_id)
+ self.client.project_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
object_type='subnetpool',
@@ -561,11 +561,11 @@
rbac_pol1 = self.admin_client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)['rbac_policy']
+ target_tenant=self.client2.project_id)['rbac_policy']
rbac_pol2 = self.admin_client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
action='access_as_shared',
- target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ target_tenant=self.admin_client.project_id)['rbac_policy']
res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
'rbac_policies']
res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -578,12 +578,12 @@
@decorators.idempotent_id('63d9acbe-403c-4e77-9ffd-80e636a4621e')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
snp = self._make_admin_snp_shared_to_project_id(
- self.client.tenant_id)['subnetpool']
+ self.client.project_id)['subnetpool']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='subnetpool', object_id=snp['id'],
action='access_as_shared',
- target_tenant=self.client2.tenant_id)
+ target_tenant=self.client2.project_id)
# make sure the rbac-policy is invisible to the tenant for which it's
# being shared
diff --git a/neutron_tempest_plugin/api/test_subnetpools_negative.py b/neutron_tempest_plugin/api/test_subnetpools_negative.py
index 1e222df..934d3cd 100644
--- a/neutron_tempest_plugin/api/test_subnetpools_negative.py
+++ b/neutron_tempest_plugin/api/test_subnetpools_negative.py
@@ -289,5 +289,5 @@
lib_exc.BadRequest,
self.admin_client.update_subnetpool,
subnetpool['id'],
- tenant_id=self.admin_client.tenant_id,
+ tenant_id=self.admin_client.project_id,
)
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index 26f8de8..1006617 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -48,7 +48,7 @@
if parent_network_type:
client = cls.admin_client
network_kwargs = {"provider:network_type": parent_network_type,
- "tenant_id": cls.client.tenant_id}
+ "tenant_id": cls.client.project_id}
network = cls.create_network(client=client, **network_kwargs)
parent_port = cls.create_port(network)
return cls.create_trunk(parent_port, subports, **kwargs)
@@ -98,7 +98,7 @@
observed_trunk = self._show_trunk(trunk)
for key in ['project_id', 'tenant_id']:
self.assertIn(key, observed_trunk)
- self.assertEqual(self.client.tenant_id, observed_trunk[key])
+ self.assertEqual(self.client.project_id, observed_trunk[key])
@decorators.idempotent_id('4ce46c22-a2b6-4659-bc5a-0ef2463cab32')
def test_create_update_trunk(self):
diff --git a/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
index f3a7b11..4610686 100644
--- a/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
+++ b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
@@ -54,21 +54,21 @@
@decorators.idempotent_id('709b23b0-9719-47df-9f53-b0812a5d5a48')
def test_delete_bgpvpn(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
self.delete_bgpvpn(self.bgpvpn_admin_client, bgpvpn)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('596abfc2-fd89-491d-863d-25459db1df4b')
def test_delete_bgpvpn_as_non_admin_fail(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
self.assertRaises(exceptions.Forbidden,
self.bgpvpn_client.delete_bgpvpn, bgpvpn['id'])
@decorators.idempotent_id('9fa29db8-35d0-4beb-a986-23c369499ab1')
def test_show_bgpvpn(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
bgpvpn_details = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])['bgpvpn']
self.assertEqual(bgpvpn['id'], bgpvpn_details['id'])
@@ -76,14 +76,14 @@
@decorators.idempotent_id('b20110bb-393b-4342-8b30-6486cd2b4fc6')
def test_show_bgpvpn_as_non_owner_fail(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
self.assertRaises(exceptions.NotFound,
self.bgpvpn_alt_client.show_bgpvpn, bgpvpn['id'])
@decorators.idempotent_id('7a7feca2-1c24-4f5d-ad4b-b0e5a712adb1')
def test_list_bgpvpn(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
bgpvpns = self.bgpvpn_client.list_bgpvpns()['bgpvpns']
self.assertIn(bgpvpn['id'],
[bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns])
@@ -92,7 +92,7 @@
@decorators.idempotent_id('4875e65d-0b65-40c0-9efd-309420686ab4')
def test_list_bgpvpn_as_non_owner_fail(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
bgpvpns_alt = self.bgpvpn_alt_client.list_bgpvpns()['bgpvpns']
self.assertNotIn(bgpvpn['id'],
[bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns_alt])
@@ -100,7 +100,7 @@
@decorators.idempotent_id('096281da-356d-4c04-bd55-784a26bb1b0c')
def test_list_show_network_association(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
network = self.networks_client.create_network()['network']
association = self.bgpvpn_client.create_network_association(
@@ -118,7 +118,7 @@
@decorators.idempotent_id('57b0da93-8e37-459f-9aaf-f903acc36025')
def test_show_netassoc_as_non_owner_fail(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
network = self.networks_client.create_network()['network']
net_assoc = self.bgpvpn_client.create_network_association(
@@ -132,7 +132,7 @@
@decorators.idempotent_id('2cbb10af-bf9c-4b32-b6a6-4066de783758')
def test_list_netassoc_as_non_owner_fail(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
network = self.networks_client.create_network()['network']
self.bgpvpn_client.create_network_association(bgpvpn['id'],
@@ -144,7 +144,7 @@
@decorators.idempotent_id('51e1b079-aefa-4c37-8b1a-0567b3ef7954')
def test_associate_disassociate_network(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
network = self.networks_client.create_network()
network_id = network['network']['id']
@@ -187,7 +187,7 @@
def test_update_route_target_non_admin_fail(self):
bgpvpn = self.create_bgpvpn(
self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id,
+ tenant_id=self.bgpvpn_client.project_id,
route_targets=['64512:1'])
with ExpectedException(exceptions.Forbidden):
self.bgpvpn_client.update_bgpvpn(
@@ -206,21 +206,21 @@
"""
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
"route_targets": ["0"]
}
self.assertRaises(exceptions.BadRequest,
self.bgpvpn_admin_client.create_bgpvpn, **postdata)
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
"import_targets": ["test", " "]
}
self.assertRaises(exceptions.BadRequest,
self.bgpvpn_admin_client.create_bgpvpn, **postdata)
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
"export_targets": ["64512:1000000000000", "xyz"]
}
self.assertRaises(exceptions.BadRequest,
@@ -236,7 +236,7 @@
"""
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
}
bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
updatedata = {
@@ -268,7 +268,7 @@
"""
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
}
bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
network = self.networks_client.create_network()
@@ -290,7 +290,7 @@
"""
postdata = {
"name": "testbgpvpn",
- "tenant_id": self.bgpvpn_client.tenant_id,
+ "tenant_id": self.bgpvpn_client.project_id,
}
bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
network = self.networks_client.create_network()
@@ -310,7 +310,7 @@
@decorators.idempotent_id('de8d94b0-0239-4a48-9574-c3a4a4f7cacb')
def test_associate_disassociate_router(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
router = self.routers_client.create_router()
router_id = router['router']['id']
@@ -334,7 +334,7 @@
@decorators.idempotent_id('3ae91755-b1b6-4c62-a699-a44eeb4ee522')
def test_list_show_router_association(self):
bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
router = self.routers_client.create_router()
router_id = router['router']['id']
@@ -355,7 +355,7 @@
# Create a first bgpvpn and associate a network with a subnet to it
bgpvpn_net = self.create_bgpvpn(
self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
network = self.create_network()
subnet = self.create_subnet(network)
self.bgpvpn_client.create_network_association(
@@ -364,7 +364,7 @@
# Create a second bgpvpn and associate a router to it
bgpvpn_router = self.create_bgpvpn(
self.bgpvpn_admin_client,
- tenant_id=self.bgpvpn_client.tenant_id)
+ tenant_id=self.bgpvpn_client.project_id)
router = self.create_router(
router_name=data_utils.rand_name('test-bgpvpn-'))
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/manager.py b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
index 90c2bb1..398c764 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/manager.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
@@ -147,9 +147,9 @@
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
- client = self.routers_client
+ client = self.admin_routers_client
if not tenant_id:
- tenant_id = client.tenant_id
+ tenant_id = client.project_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
index 0142045..9cca602 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
@@ -108,6 +108,20 @@
self.RT3 = self.new_rt()
self.RT4 = self.new_rt()
+ @classmethod
+ def setup_clients(cls):
+ """This setup the service clients for the tests"""
+ super(TestBGPVPNBasic, cls).setup_clients()
+ cls.admin_security_group_client = cls.os_admin.security_groups_client
+ cls.admin_security_group_rule_client = (
+ cls.os_admin.security_group_rules_client)
+ cls.admin_routers_client = cls.os_admin.routers_client
+ cls.admin_ports_client = cls.os_admin.ports_client
+ cls.admin_networks_client = cls.os_admin.networks_client
+ cls.admin_subnets_client = cls.os_admin.subnets_client
+ cls.admin_fips_client = cls.os_admin.floating_ips_client
+ cls.admin_keys_client = cls.os_admin.keypairs_client
+
@decorators.idempotent_id('afdd6cad-871a-4343-b97b-6319c76c815d')
@utils.services('compute', 'network')
def test_bgpvpn_basic(self):
@@ -164,6 +178,7 @@
self._create_networks_and_subnets()
self._create_servers()
self.router_b = self._create_fip_router(
+ client=self.admin_routers_client,
subnet_id=self.subnets[NET_B][0]['id'])
self._create_l3_bgpvpn()
self._associate_all_nets_to_bgpvpn()
@@ -187,10 +202,13 @@
self._create_networks_and_subnets()
self._create_servers()
self.router_b = self._create_fip_router(
+ client=self.admin_routers_client,
subnet_id=self.subnets[NET_B][0]['id'])
self._create_l3_bgpvpn()
self._associate_all_nets_to_bgpvpn()
- self.delete_router(self.router_b)
+ self._delete_router(self.router_b,
+ routers_client=self.admin_routers_client,
+ ports_client=self.admin_ports_client)
self._associate_fip_and_check_l3_bgpvpn()
@decorators.idempotent_id('973ab26d-c7d8-4a32-9aa9-2d7e6f406135')
@@ -212,6 +230,7 @@
self._create_l3_bgpvpn()
self._associate_all_nets_to_bgpvpn()
self.router_b = self._create_fip_router(
+ client=self.admin_routers_client,
subnet_id=self.subnets[NET_B][0]['id'])
self._associate_fip_and_check_l3_bgpvpn()
@@ -231,6 +250,7 @@
"""
self._create_networks_and_subnets()
self.router_b = self._create_fip_router(
+ client=self.admin_routers_client,
subnet_id=self.subnets[NET_B][0]['id'])
self._create_l3_bgpvpn()
self._associate_all_nets_to_bgpvpn()
@@ -255,6 +275,7 @@
self._create_l3_bgpvpn()
self._associate_all_nets_to_bgpvpn()
self.router_b = self._create_fip_router(
+ client=self.admin_routers_client,
subnet_id=self.subnets[NET_B][0]['id'])
self._create_servers()
self._associate_fip_and_check_l3_bgpvpn()
@@ -344,10 +365,10 @@
0, self.subnets[NET_A][0])
self._create_l3_bgpvpn(rts=[], export_rts=[self.RT1],
import_rts=[self.RT2])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
self._check_l3_bgpvpn(should_succeed=False)
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_B]['id'])
self._check_l3_bgpvpn(should_succeed=False)
self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
@@ -388,13 +409,13 @@
[self.networks[NET_B], IP_B_S1_1],
[self.networks[NET_A], IP_A_S1_2],
[self.networks[NET_B], IP_B_S1_2]])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
self.router_a = self._create_router_and_associate_fip(
0, self.subnets[NET_A][0])
self._check_l3_bgpvpn(should_succeed=False)
self._check_l3_bgpvpn(self.servers[0], self.servers[2])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_B]['id'])
self.router_b = self._create_router_and_associate_fip(
1, self.subnets[NET_B][0])
@@ -444,11 +465,11 @@
0, self.subnets[NET_A][0])
router_b = self._create_router_and_associate_fip(
3, self.subnets[NET_B][0])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
self._check_l3_bgpvpn(should_succeed=False)
self._check_l3_bgpvpn(self.servers[0], self.servers[2])
- self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
+ self.bgpvpn_admin_client.create_router_association(self.bgpvpn['id'],
router_b['id'])
self._check_l3_bgpvpn(should_succeed=False)
self._check_l3_bgpvpn(self.servers[3], self.servers[1])
@@ -513,16 +534,16 @@
'local_pref': 100,
'prefix': NET_C_S1}]
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
port_id_1 = self.ports[self.servers[1]['id']]['id']
- body = self.bgpvpn_client.create_port_association(
+ body = self.bgpvpn_admin_client.create_port_association(
self.bgpvpn['id'], port_id=port_id_1, routes=primary_port_routes)
port_association_1 = body['port_association']
port_id_2 = self.ports[self.servers[2]['id']]['id']
- body = self.bgpvpn_client.create_port_association(
+ body = self.bgpvpn_admin_client.create_port_association(
self.bgpvpn['id'], port_id=port_id_2, routes=alternate_port_routes)
port_association_2 = body['port_association']
@@ -535,10 +556,10 @@
to_server_ip=IP_C_S1_1,
validate_server=destination_srv_1)
- self.bgpvpn_client.update_port_association(
+ self.bgpvpn_admin_client.update_port_association(
self.bgpvpn['id'], port_association_1['id'],
routes=alternate_port_routes)
- self.bgpvpn_client.update_port_association(
+ self.bgpvpn_admin_client.update_port_association(
self.bgpvpn['id'], port_association_2['id'],
routes=primary_port_routes)
@@ -581,9 +602,9 @@
rts=[self.RT1])
bgpvpn_a_bis = self._create_l3_bgpvpn(name='test-l3-bgpvpn-a-bis',
rts=[self.RT2])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn_a['id'], self.networks[NET_A]['id'])
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn_a_bis['id'], self.networks[NET_A_BIS]['id'])
self._create_servers([[self.networks[NET_A], IP_A_S1_1],
[self.networks[NET_A_BIS], IP_A_BIS_S1_2],
@@ -652,18 +673,17 @@
self._setup_ip_forwarding(1)
self._setup_ip_address(1, IP_C_S1_1)
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
port_id = self.ports[self.servers[1]['id']]['id']
port_routes = [{'type': 'prefix',
'prefix': NET_C_S1}]
- body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
- port_id=port_id,
- routes=port_routes)
+ body = self.bgpvpn_admin_client.create_port_association(
+ self.bgpvpn['id'], port_id=port_id, routes=port_routes)
port_association = body['port_association']
self._check_l3_bgpvpn_by_specific_ip(
to_server_ip=IP_C_S1_1)
- self.bgpvpn_client.update_port_association(
+ self.bgpvpn_admin_client.update_port_association(
self.bgpvpn['id'], port_association['id'], routes=[])
self._check_l3_bgpvpn_by_specific_ip(
should_succeed=False, to_server_ip=IP_C_S1_1)
@@ -717,16 +737,15 @@
self._setup_range_ip_address(1, LOOPBACKS)
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
port_id = self.ports[self.servers[1]['id']]['id']
port_routes = [{'type': 'prefix',
'prefix': ip + "/32"}
for ip in LOOPBACKS]
- body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
- port_id=port_id,
- routes=port_routes)
+ body = self.bgpvpn_admin_client.create_port_association(
+ self.bgpvpn['id'], port_id=port_id, routes=port_routes)
port_association = body['port_association']
for ip in random.sample(LOOPBACKS, SAMPLE_SIZE):
@@ -735,7 +754,7 @@
self._check_l3_bgpvpn_by_specific_ip(
to_server_ip=ip)
- self.bgpvpn_client.update_port_association(
+ self.bgpvpn_admin_client.update_port_association(
self.bgpvpn['id'], port_association['id'], routes=[])
for ip in SUB_LOOPBACKS:
@@ -782,18 +801,17 @@
self._setup_ip_forwarding(1)
self._setup_ip_address(1, IP_C_S1_1)
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
port_id = self.ports[self.servers[1]['id']]['id']
port_routes = [{'type': 'prefix',
'prefix': NET_C_S1}]
- body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
- port_id=port_id,
- routes=port_routes)
+ body = self.bgpvpn_admin_client.create_port_association(
+ self.bgpvpn['id'], port_id=port_id, routes=port_routes)
port_association = body['port_association']
self._check_l3_bgpvpn_by_specific_ip(
to_server_ip=IP_C_S1_1)
- self.bgpvpn_client.delete_port_association(
+ self.bgpvpn_admin_client.delete_port_association(
self.bgpvpn['id'], port_association['id'])
self._check_l3_bgpvpn_by_specific_ip(
should_succeed=False, to_server_ip=IP_C_S1_1)
@@ -866,21 +884,21 @@
self._setup_ip_forwarding(0)
# connect network A to its BGPVPN
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn_a['id'], self.networks[NET_A]['id'])
# connect network B to its BGPVPN
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn_b['id'], self.networks[NET_B]['id'])
# connect network C to its BGPVPN
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn_c['id'], self.networks[NET_C]['id'])
# create port associations for A->C traffic
# (leak routes imported by BGPVPN B -- which happen to include the
# routes net C -- into net A)
- self.bgpvpn_client.create_port_association(
+ self.bgpvpn_admin_client.create_port_association(
bgpvpn_to_a['id'],
port_id=self.ports[vm2['id']]['id'],
routes=[{'type': 'bgpvpn',
@@ -890,7 +908,7 @@
# create port associations for C->A traffic
# (leak routes imported by BGPVPN B -- which happen to include the
# routes from net A -- into net C)
- body = self.bgpvpn_client.create_port_association(
+ body = self.bgpvpn_admin_client.create_port_association(
bgpvpn_to_c['id'],
port_id=self.ports[vm2['id']]['id'],
routes=[{'type': 'bgpvpn',
@@ -914,7 +932,7 @@
should_succeed=True)
# remove port association 1
- self.bgpvpn_client.delete_port_association(self.bgpvpn['id'],
+ self.bgpvpn_admin_client.delete_port_association(self.bgpvpn['id'],
port_association['id'])
# check that connectivity is actually interrupted
@@ -938,7 +956,7 @@
"""
self._create_networks_and_subnets()
self._create_l3_bgpvpn()
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
self._create_servers()
self._associate_fip_and_check_l3_bgpvpn(should_succeed=False)
@@ -1007,10 +1025,10 @@
"""
self._create_networks_and_subnets()
self._create_l3_bgpvpn()
- body = self.bgpvpn_client.create_network_association(
+ body = self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
assoc_b = body['network_association']
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_B]['id'])
self._create_servers()
self._associate_fip_and_check_l3_bgpvpn()
@@ -1040,10 +1058,10 @@
router_b = self._create_fip_router(
subnet_id=self.subnets[NET_B][0]['id'])
self._create_l3_bgpvpn()
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
self.bgpvpn['id'], self.networks[NET_A]['id'])
- body = self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
- router_b['id'])
+ body = self.bgpvpn_admin_client.create_router_association(
+ self.bgpvpn['id'], router_b['id'])
assoc_b = body['router_association']
self._create_servers()
self._associate_fip_and_check_l3_bgpvpn()
@@ -1104,7 +1122,9 @@
def _create_security_group_for_test(self):
self.security_group = self.create_security_group(
- project_id=self.bgpvpn_client.project_id)
+ project_id=self.bgpvpn_admin_client.project_id,
+ security_groups_client=self.admin_security_group_client,
+ security_group_rules_client=self.admin_security_group_rule_client)
def _create_networks_and_subnets(self, names=None, subnet_cidrs=None,
port_security=True):
@@ -1115,15 +1135,15 @@
for (name, subnet_cidrs) in zip(names, subnet_cidrs):
network = super(manager.NetworkScenarioTest,
self).create_network(namestart=name,
- port_security_enabled=port_security)
+ port_security_enabled=port_security,
+ networks_client=self.admin_networks_client)
self.networks[name] = network
self.subnets[name] = []
for (j, cidr) in enumerate(subnet_cidrs):
sub_name = "subnet-%s-%d" % (name, j + 1)
- subnet = self._create_subnet_with_cidr(network,
- namestart=sub_name,
- cidr=cidr,
- ip_version=4)
+ subnet = self._create_subnet_with_cidr(
+ network, namestart=sub_name, cidr=cidr, ip_version=4,
+ subnets_client=self.admin_subnets_client)
self.subnets[name].append(subnet)
def _create_subnet_with_cidr(self, network, subnets_client=None,
@@ -1146,6 +1166,8 @@
def _create_fip_router(self, client=None, public_network_id=None,
subnet_id=None):
+ if not client:
+ client = self.admin_routers_client
router = self._create_router(client, namestart='router-')
router_id = router['id']
if public_network_id is None:
@@ -1165,12 +1187,14 @@
server = self.servers[server_index]
fip = self.create_floating_ip(
server, external_network_id=CONF.network.public_network_id,
- port_id=self.ports[server['id']]['id'])
+ port_id=self.ports[server['id']]['id'],
+ client=self.admin_fips_client)
self.server_fips[server['id']] = fip
return fip
def _create_router_and_associate_fip(self, server_index, subnet):
- router = self._create_fip_router(subnet_id=subnet['id'])
+ router = self._create_fip_router(client=self.admin_routers_client,
+ subnet_id=subnet['id'])
self._associate_fip(server_index)
return router
@@ -1185,7 +1209,7 @@
port = super(manager.NetworkScenarioTest,
self).create_port(network_id=network['id'],
- client=clients.ports_client,
+ client=self.admin_ports_client,
**create_port_body)
create_server_kwargs = {
@@ -1205,7 +1229,7 @@
return server
def _create_servers(self, ports_config=None, port_security=True):
- keypair = self.create_keypair()
+ keypair = self.create_keypair(client=self.admin_keys_client)
security_group_ids = [self.security_group['id']]
if ports_config is None:
ports_config = [[self.networks[NET_A], IP_A_S1_1],
@@ -1214,7 +1238,7 @@
network = port_config[0]
server = self._create_server(
'server-' + str(i + 1), keypair, network, port_config[1],
- security_group_ids, self.os_primary, port_security)
+ security_group_ids, self.os_admin, port_security)
self.servers.append(server)
self.servers_keypairs[server['id']] = keypair
self.server_fixed_ips[server['id']] = (
@@ -1228,7 +1252,8 @@
import_rts = import_rts or []
export_rts = export_rts or []
self.bgpvpn = self.create_bgpvpn(
- self.bgpvpn_admin_client, tenant_id=self.bgpvpn_client.tenant_id,
+ self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_admin_client.project_id,
name=name, route_targets=rts, export_targets=export_rts,
import_targets=import_rts)
return self.bgpvpn
@@ -1249,7 +1274,7 @@
def _associate_all_nets_to_bgpvpn(self, bgpvpn=None):
bgpvpn = bgpvpn or self.bgpvpn
for network in self.networks.values():
- self.bgpvpn_client.create_network_association(
+ self.bgpvpn_admin_client.create_network_association(
bgpvpn['id'], network['id'])
LOG.debug('BGPVPN network associations completed')
@@ -1360,3 +1385,16 @@
subnet = self.subnets[NET_A][0]
self.router = self._create_router_and_associate_fip(0, subnet)
self._check_l3_bgpvpn(should_succeed=should_succeed)
+
+ def _delete_router(self, router, routers_client=None, ports_client=None):
+ if not routers_client:
+ routers_client = self.routers_client
+ if not ports_client:
+ ports_client = self.ports_client
+ ports_rsp = ports_client.list_ports(device_id=router['id'])
+ interfaces = ports_rsp['ports']
+ for i in interfaces:
+ test_utils.call_and_ignore_notfound_exc(
+ routers_client.remove_router_interface, router['id'],
+ subnet_id=i['fixed_ips'][0]['subnet_id'])
+ routers_client.delete_router(router['id'])
diff --git a/neutron_tempest_plugin/common/ip.py b/neutron_tempest_plugin/common/ip.py
index e2f6a4a..07bbe69 100644
--- a/neutron_tempest_plugin/common/ip.py
+++ b/neutron_tempest_plugin/common/ip.py
@@ -391,9 +391,9 @@
def list_iptables(version=constants.IP_VERSION_4, namespace=None):
- cmd = ''
+ cmd = 'sudo '
if namespace:
- cmd = 'sudo ip netns exec %s ' % namespace
+ cmd += 'ip netns exec %s ' % namespace
cmd += ('iptables-save' if version == constants.IP_VERSION_4 else
'ip6tables-save')
return shell.execute(cmd).stdout
diff --git a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
index 0dd18f1..4341ec7 100644
--- a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
+++ b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
@@ -336,7 +336,10 @@
ports=[intf_2['port_id']])
updated_fwg = body["firewall_group"]
self.assertEqual([intf_2['port_id']], updated_fwg['ports'])
-
+ # Wait for the firewall resource to become ready
+ self._wait_until_ready(fwg_id)
+ # Disassociate all ports with this firewall group
+ self.firewall_groups_client.update_firewall_group(fwg_id, ports=[])
# Delete firewall_group
self.firewall_groups_client.delete_firewall_group(fwg_id)
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
index 517c96e..9cc0a6a 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -67,7 +67,7 @@
if not client:
client = self.routers_client
if not tenant_id:
- tenant_id = client.tenant_id
+ tenant_id = client.project_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
index 3ec231e..fe0f3fc 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
@@ -127,7 +127,7 @@
admin_state_up=True,
external_network_id=CONF.network.public_network_id,
enable_snat=False,
- project_id=cls.os_primary.network_client.tenant_id)
+ project_id=cls.os_primary.network_client.project_id)
network = cls.create_network(network_name='right-network')
subnet = cls.create_subnet(
network,
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index c8eddd1..9d53f79 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -74,16 +74,16 @@
return cmd
-def get_ncat_client_cmd(ip_address, port, protocol):
- udp = ''
- if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
- udp = '-u'
+def get_ncat_client_cmd(ip_address, port, protocol, ssh_client=None):
cmd = 'echo "knock knock" | nc '
- ncat_version = get_ncat_version()
+ ncat_version = get_ncat_version(ssh_client=ssh_client)
if ncat_version > packaging_version.Version('7.60'):
- cmd += '-z '
- cmd += '-w 1 %(udp)s %(host)s %(port)s' % {
- 'udp': udp, 'host': ip_address, 'port': port}
+ cmd += '-d 1 '
+ if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
+ cmd += '-u '
+ if ncat_version > packaging_version.Version('7.60'):
+ cmd += '-z '
+ cmd += '-w 1 %(host)s %(port)s' % {'host': ip_address, 'port': port}
return cmd
@@ -636,9 +636,9 @@
If ssh_client is not given, it is executed locally on host where tests
are executed. Otherwise ssh_client object is used to execute it.
"""
- cmd = get_ncat_client_cmd(ip_address, port, protocol)
- result = shell.execute(cmd, ssh_client=ssh_client)
- self.assertEqual(0, result.exit_status)
+ cmd = get_ncat_client_cmd(ip_address, port, protocol,
+ ssh_client=ssh_client)
+ result = shell.execute(cmd, ssh_client=ssh_client, check=False)
return result.stdout
def _ensure_public_router(self, client=None, tenant_id=None):
@@ -653,7 +653,7 @@
if not client:
client = self.client
if not tenant_id:
- tenant_id = client.tenant_id
+ tenant_id = client.project_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
index ca7d755..5608dae 100644
--- a/neutron_tempest_plugin/scenario/test_connectivity.py
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -211,7 +211,7 @@
network, cidr=str(subnet_cidr), gateway=str(gw_ip))
non_dvr_router = self.create_router_by_client(
- tenant_id=self.client.tenant_id,
+ tenant_id=self.client.project_id,
is_admin=True,
router_name=data_utils.rand_name("nondvr-2-routers-same-network"),
admin_state_up=True,
@@ -219,7 +219,7 @@
self.create_router_interface(non_dvr_router['id'], subnet['id'])
dvr_router = self.create_router_by_client(
- tenant_id=self.client.tenant_id,
+ tenant_id=self.client.project_id,
is_admin=True,
router_name=data_utils.rand_name("dvr-2-rotuers-same-network"),
admin_state_up=True,
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
index 6f2756c..be9a477 100644
--- a/neutron_tempest_plugin/scenario/test_dns_integration.py
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -17,9 +17,11 @@
import testtools
+from oslo_log import log
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -30,6 +32,8 @@
CONF = config.CONF
+LOG = log.getLogger(__name__)
+
# Note(jh): Need to do a bit of juggling here in order to avoid failures
# when designate_tempest_plugin is not available
@@ -47,7 +51,8 @@
@classmethod
def setup_clients(cls):
super(BaseDNSIntegrationTests, cls).setup_clients()
- cls.dns_client = cls.os_tempest.dns_v2.ZonesClient()
+ cls.zone_client = cls.os_tempest.dns_v2.ZonesClient()
+ cls.recordset_client = cls.os_tempest.dns_v2.RecordsetClient()
cls.query_client.build_timeout = 30
@classmethod
@@ -63,11 +68,11 @@
@utils.requires_ext(extension="dns-integration", service="network")
def resource_setup(cls):
super(BaseDNSIntegrationTests, cls).resource_setup()
- _, cls.zone = cls.dns_client.create_zone()
- cls.addClassResourceCleanup(cls.dns_client.delete_zone,
+ cls.zone = cls.zone_client.create_zone()[1]
+ cls.addClassResourceCleanup(cls.zone_client.delete_zone,
cls.zone['id'], ignore_errors=lib_exc.NotFound)
dns_waiters.wait_for_zone_status(
- cls.dns_client, cls.zone['id'], 'ACTIVE')
+ cls.zone_client, cls.zone['id'], 'ACTIVE')
cls.network = cls.create_network(dns_domain=cls.zone['name'])
cls.subnet = cls.create_subnet(cls.network)
@@ -94,12 +99,79 @@
fip = self.create_floatingip(port=port)
return {'port': port, 'fip': fip, 'server': server}
+ def _check_type_in_recordsets(self, zone_id, rec_type):
+ types = [rec['type'] for rec in self.recordset_client.list_recordset(
+ zone_id)[1]['recordsets']]
+ if rec_type in types:
+ return True
+ return False
+
+ def _wait_for_type_in_recordsets(self, zone_id, type):
+ test_utils.call_until_true(
+ func=self._check_type_in_recordsets, zone_id=zone_id,
+ rec_type=type, duration=self.query_client.build_timeout,
+ sleep_for=5)
+
+ def _check_recordset_deleted(
+ self, recordset_client, zone_id, recordset_id):
+ return test_utils.call_and_ignore_notfound_exc(
+ recordset_client.show_recordset, zone_id, recordset_id) is None
+
+ def _verify_designate_recordset(
+ self, address, found=True, record_type='A'):
+ if found:
+ self._wait_for_type_in_recordsets(self.zone['id'], record_type)
+ recordsets = self.recordset_client.list_recordset(
+ self.zone['id'])[1]['recordsets']
+ relevant_type = [rec for rec in recordsets if
+ rec['type'] == record_type]
+ self.assertTrue(
+ relevant_type,
+ 'Failed no {} type recordset has been detected in the '
+ 'Designate DNS DB'.format(record_type))
+ rec_id = [rec['id'] for rec in relevant_type if address in
+ str(rec['records'])][0]
+ self.assertTrue(
+ rec_id, 'Record of type:{} with IP:{} was not detected in '
+ 'the Designate DNS DB'.format(record_type, address))
+ dns_waiters.wait_for_recordset_status(
+ self.recordset_client, self.zone['id'], rec_id, 'ACTIVE')
+ else:
+ rec_id = None
+ recordsets = self.recordset_client.list_recordset(
+ self.zone['id'])[1]['recordsets']
+ relevant_type = [rec for rec in recordsets if
+ rec['type'] == record_type]
+ if relevant_type:
+ rec_id = [rec['id'] for rec in relevant_type if
+ address in str(rec['records'])][0]
+ if rec_id:
+ recordset_exists = test_utils.call_until_true(
+ func=self._check_recordset_deleted,
+ recordset_client=self.recordset_client,
+ zone_id=self.zone['id'], recordset_id=rec_id,
+ duration=self.query_client.build_timeout, sleep_for=5)
+ self.assertTrue(
+ recordset_exists,
+ 'Failed, recordset type:{} and ID:{} is still exist in '
+ 'the Designate DNS DB'.format(record_type, rec_id))
+
def _verify_dns_records(self, address, name, found=True, record_type='A'):
client = self.query_client
forward = name + '.' + self.zone['name']
reverse = ipaddress.ip_address(address).reverse_pointer
- dns_waiters.wait_for_query(client, forward, record_type, found)
- dns_waiters.wait_for_query(client, reverse, 'PTR', found)
+ record_types_to_check = [record_type, 'PTR']
+ for rec_type in record_types_to_check:
+ try:
+ if rec_type == 'PTR':
+ dns_waiters.wait_for_query(
+ client, reverse, rec_type, found)
+ else:
+ dns_waiters.wait_for_query(
+ client, forward, rec_type, found)
+ except Exception as e:
+ LOG.error(e)
+ self._verify_designate_recordset(address, found, rec_type)
if not found:
return
fwd_response = client.query(forward, record_type)
@@ -218,15 +290,15 @@
name = data_utils.rand_name('test-domain')
zone_name = "%s.%s.%s.zone." % (cls.client.user_id,
- cls.client.tenant_id,
+ cls.client.project_id,
name)
dns_domain_template = "<user_id>.<project_id>.%s.zone." % name
- _, cls.zone = cls.dns_client.create_zone(name=zone_name)
- cls.addClassResourceCleanup(cls.dns_client.delete_zone,
+ cls.zone = cls.zone_client.create_zone(name=zone_name)[1]
+ cls.addClassResourceCleanup(cls.zone_client.delete_zone,
cls.zone['id'], ignore_errors=lib_exc.NotFound)
dns_waiters.wait_for_zone_status(
- cls.dns_client, cls.zone['id'], 'ACTIVE')
+ cls.zone_client, cls.zone['id'], 'ACTIVE')
cls.network = cls.create_network(dns_domain=dns_domain_template)
cls.subnet = cls.create_subnet(cls.network,
diff --git a/neutron_tempest_plugin/scenario/test_dvr.py b/neutron_tempest_plugin/scenario/test_dvr.py
index fa2e9d4..a37abf4 100644
--- a/neutron_tempest_plugin/scenario/test_dvr.py
+++ b/neutron_tempest_plugin/scenario/test_dvr.py
@@ -64,7 +64,7 @@
The test is done by putting the SNAT port down on controller node.
"""
router = self.create_router_by_client(
- distributed=True, tenant_id=self.client.tenant_id, is_admin=True,
+ distributed=True, tenant_id=self.client.project_id, is_admin=True,
ha=False)
self.setup_network_and_server(router=router)
self._check_snat_port_connectivity()
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
index 410c64e..3c0d384 100644
--- a/neutron_tempest_plugin/scenario/test_migration.py
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -125,7 +125,7 @@
def _test_migration(self, before_dvr, before_ha, after_dvr, after_ha):
router = self.create_router_by_client(
distributed=before_dvr, ha=before_ha,
- tenant_id=self.client.tenant_id, is_admin=True)
+ tenant_id=self.client.project_id, is_admin=True)
self.setup_network_and_server(router=router)
self._wait_until_router_ports_ready(
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index 31319ec..ea62fcf 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -101,7 +101,7 @@
def _create_setup(self):
self.admin_client = self.os_admin.network_client
- net_kwargs = {'tenant_id': self.client.tenant_id}
+ net_kwargs = {'tenant_id': self.client.project_id}
for net_type in ['vxlan', 'gre']:
net_kwargs['name'] = '-'.join([net_type, 'net'])
net_kwargs['provider:network_type'] = net_type
@@ -186,7 +186,7 @@
def _create_setup(self):
self.admin_client = self.os_admin.network_client
for test_net in self._get_network_params():
- test_net['tenant_id'] = self.client.tenant_id
+ test_net['tenant_id'] = self.client.project_id
test_net['name'] = data_utils.rand_name('net')
cidr = None if 'cidr' not in test_net else test_net.pop('cidr')
network = self.admin_client.create_network(**test_net)[
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index a74b7b7..3d075b4 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -17,6 +17,7 @@
from neutron_lib import constants
import testtools
+from oslo_log import log
from tempest.common import utils as tempest_utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
@@ -32,7 +33,7 @@
from neutron_tempest_plugin.scenario import constants as const
CONF = config.CONF
-
+LOG = log.getLogger(__name__)
EPHEMERAL_PORT_RANGE = {'min': 32768, 'max': 65535}
@@ -96,10 +97,18 @@
def resource_setup(cls):
super(BaseNetworkSecGroupTest, cls).resource_setup()
# setup basic topology for servers we can log into it
+ cls.reserve_external_subnet_cidrs()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ if cls.ipv6_mode:
+ cls.subnet_v6 = cls.create_subnet(
+ cls.network,
+ ip_version=constants.IP_VERSION_6,
+ ipv6_ra_mode=cls.ipv6_mode,
+ ipv6_address_mode=cls.ipv6_mode)
+ cls.create_router_interface(cls.router['id'], cls.subnet_v6['id'])
cls.keypair = cls.create_keypair()
def setUp(self):
@@ -107,6 +116,8 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.network_client.reset_quotas, self.project_id)
self.network_client.update_quotas(self.project_id, security_group=-1)
+ self.network_client.update_quotas(self.project_id,
+ security_group_rule=-1)
def create_vm_testing_sec_grp(self, num_servers=2, security_groups=None,
ports=None, network_id=None,
@@ -330,6 +341,7 @@
self.assertTrue(ext_net_ip)
self.check_remote_connectivity(server_ssh_clients[0], ext_net_ip,
servers=servers)
+ return server_ssh_clients, fips, servers
def _test_protocol_number_rule(self):
# protocol number is added instead of str in security rule creation
@@ -737,6 +749,7 @@
class StatefulNetworkSecGroupTest(BaseNetworkSecGroupTest):
stateless_sg = False
+ ipv6_mode = None
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d764')
def test_default_sec_grp_scenarios(self):
@@ -920,9 +933,10 @@
CONF.neutron_plugin_options.firewall_driver in ['openvswitch', 'None'],
"Firewall driver other than 'openvswitch' is required to use "
"stateless security groups.")
-class StatelessNetworkSecGroupTest(BaseNetworkSecGroupTest):
+class StatelessNetworkSecGroupIPv4Test(BaseNetworkSecGroupTest):
required_extensions = ['security-group', 'stateful-security-group']
stateless_sg = True
+ ipv6_mode = None
@decorators.idempotent_id('9e193e3f-56f2-4f4e-886c-988a147958ef')
def test_default_sec_grp_scenarios(self):
@@ -1102,12 +1116,16 @@
def test_fragmented_traffic_is_accepted(self):
ssh_clients, fips, servers, security_groups = (
self._create_client_and_server_vms(use_advanced_image=True))
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ username = CONF.validation.image_ssh_user
+ else:
+ username = CONF.neutron_plugin_options.advanced_image_ssh_user
# make sure tcp connectivity to vms works fine
for fip in fips.values():
self.check_connectivity(
fip['floating_ip_address'],
- CONF.neutron_plugin_options.advanced_image_ssh_user,
+ username,
self.keypair['private_key'])
# Check that ICMP packets bigger than MTU aren't working without
@@ -1121,3 +1139,65 @@
ssh_clients['client'], fips['server']['fixed_ip_address'],
mtu=self.network['mtu'] + 1, fragmentation=True,
should_succeed=True)
+
+
+@testtools.skipIf(
+ CONF.neutron_plugin_options.firewall_driver in ['openvswitch', 'None'],
+ "Firewall driver other than 'openvswitch' is required to use "
+ "stateless security groups.")
+class StatelessSecGroupDualStackSlaacTest(BaseNetworkSecGroupTest):
+ required_extensions = ['security-group', 'stateful-security-group']
+ stateless_sg = True
+ ipv6_mode = 'slaac'
+
+ def _get_port_cidrs(self, port):
+ ips = []
+ subnet_cidrs = {}
+ for fixed_ip in port['fixed_ips']:
+ subnet_id = fixed_ip['subnet_id']
+ subnet_cidr = subnet_cidrs.get('subnet_id')
+ if not subnet_cidr:
+ subnet = self.client.show_subnet(subnet_id)['subnet']
+ subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
+ subnet_cidrs[subnet_id] = subnet_cidr
+ ips.append(
+ netaddr.IPNetwork(
+ "%s/%s" % (fixed_ip['ip_address'], subnet_cidr.prefixlen)))
+ LOG.debug("On port %s found IP cidrs: %s", port['id'], ips)
+ return ips
+
+ def _test_default_sec_grp_scenarios(self):
+ # Make "regular" test like for IPv4 case
+ server_ssh_clients, _, servers = (
+ super()._test_default_sec_grp_scenarios())
+
+ # And additionally ensure that IPv6 addresses are configured properly
+ # in the VM
+ for ssh_client, server in zip(server_ssh_clients, servers):
+ ip_cmd = ip.IPCommand(ssh_client=ssh_client)
+ ports = self.client.list_ports(
+ device_id=server['server']['id'])['ports']
+ for port in ports:
+ configured_cidrs = [ip.network for ip in
+ ip_cmd.list_addresses(port=port)]
+ for port_cidr in self._get_port_cidrs(port):
+ self.assertIn(port_cidr, configured_cidrs)
+
+ @decorators.idempotent_id('e7d64384-ea6a-40aa-b454-854f0990153c')
+ def test_default_sec_grp_scenarios(self):
+ self._test_default_sec_grp_scenarios()
+
+
+@testtools.skipIf(
+ CONF.neutron_plugin_options.firewall_driver in ['openvswitch', 'None'],
+ "Firewall driver other than 'openvswitch' is required to use "
+ "stateless security groups.")
+class StatelessSecGroupDualStackDHCPv6StatelessTest(
+ StatelessSecGroupDualStackSlaacTest):
+ required_extensions = ['security-group', 'stateful-security-group']
+ stateless_sg = True
+ ipv6_mode = 'dhcpv6-stateless'
+
+ @decorators.idempotent_id('c61c127c-e08f-4ddf-87a3-58b3c86e5476')
+ def test_default_sec_grp_scenarios(self):
+ self._test_default_sec_grp_scenarios()
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index b994775..2ba8f13 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -277,6 +277,7 @@
self._wait_for_trunk(vm.trunk)
self._assert_has_ssh_connectivity(vm1.ssh_client)
+ @test.unstable_test("bug 2033887 / bug 2024160")
@testtools.skipUnless(
(CONF.neutron_plugin_options.advanced_image_ref or
CONF.neutron_plugin_options.default_image_is_advanced),
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/manager.py b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
index 394fb02..4861931 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/manager.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
@@ -89,7 +89,7 @@
if not client:
client = self.routers_client
if not tenant_id:
- tenant_id = client.tenant_id
+ tenant_id = client.project_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
index 5598fbe..92f515a 100644
--- a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
@@ -82,7 +82,7 @@
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
- tenant_id = networks_client.tenant_id
+ tenant_id = networks_client.project_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
# Neutron disables port security by default so we have to check the
diff --git a/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
index ab48a2f..953360e 100644
--- a/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
+++ b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
@@ -137,7 +137,7 @@
def _get_tenant_id(self):
"""Returns the tenant_id of the client current user"""
- return self.client.tenant_id
+ return self.client.project_id
@decorators.attr(type='smoke')
@decorators.idempotent_id('74dcf2d3-a40e-4a6c-a25a-747d764bee81')
diff --git a/playbooks/linuxbridge-scenario-pre-run.yaml b/playbooks/linuxbridge-scenario-pre-run.yaml
index 26586f6..bc80e76 100644
--- a/playbooks/linuxbridge-scenario-pre-run.yaml
+++ b/playbooks/linuxbridge-scenario-pre-run.yaml
@@ -4,4 +4,4 @@
# destination IP addresses in arp tables:
- include_role:
name: legacy_ebtables
- when: ansible_distribution_release | lower == 'focal'
+ when: ansible_distribution_release | lower in ['focal', 'jammy']
diff --git a/zuul.d/2023_1_jobs.yaml b/zuul.d/2023_1_jobs.yaml
index d6ba077..b9f293f 100644
--- a/zuul.d/2023_1_jobs.yaml
+++ b/zuul.d/2023_1_jobs.yaml
@@ -7,6 +7,8 @@
- dhcp_agent_scheduler
- local_ip
- qos-bw-minimum-ingress
+ - port-resource-request
+ - port-resource-request-groups
tempest_test_regex: "\
(^neutron_tempest_plugin.api)|\
(^neutron_tempest_plugin.scenario)|\
@@ -56,8 +58,6 @@
- network-segment-range
- pagination
- port-device-profile
- - port-resource-request
- - port-resource-request-groups
- port-mac-address-regenerate
- port-security
- port-security-groups-filtering
@@ -106,10 +106,13 @@
parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
override-checkout: stable/2023.1
vars:
+ network_api_extensions_common: *api_extensions
network_api_extensions_openvswitch:
- dhcp_agent_scheduler
- local_ip
- logging
+ - port-resource-request
+ - port-resource-request-groups
network_available_features: *available_features
tempest_test_regex: "\
(^neutron_tempest_plugin.api)|\
@@ -157,6 +160,7 @@
parent: neutron-tempest-plugin-linuxbridge
override-checkout: stable/2023.1
vars:
+ network_api_extensions_common: *api_extensions
network_api_extensions_linuxbridge:
- dhcp_agent_scheduler
- vlan-transparent
diff --git a/zuul.d/2023_2_jobs.yaml b/zuul.d/2023_2_jobs.yaml
new file mode 100644
index 0000000..f5ece3d
--- /dev/null
+++ b/zuul.d/2023_2_jobs.yaml
@@ -0,0 +1,269 @@
+- job:
+ name: neutron-tempest-plugin-openvswitch-2023-2
+ parent: neutron-tempest-plugin-openvswitch
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_openvswitch: &api_extensions_openvswitch
+ - dhcp_agent_scheduler
+ - local_ip
+ - qos-bw-minimum-ingress
+ tempest_test_regex: "\
+ (^neutron_tempest_plugin.api)|\
+ (^neutron_tempest_plugin.scenario)|\
+ (^tempest.api.compute.servers.test_attach_interfaces)|\
+ (^tempest.api.compute.servers.test_multiple_create)"
+ network_available_features: &available_features
+ - ipv6_metadata
+ network_api_extensions_common: &api_extensions
+ - address-group
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dns-domain-ports
+ - dns-integration
+ - dns-integration-domain-keywords
+ - empty-string-filtering
+ - expose-port-forwarding-in-fip
+ - expose-l3-conntrack-helper
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - extraroute-atomic
+ - filter-validation
+ - fip-port-details
+ - flavors
+ - floating-ip-port-forwarding
+ - floating-ip-port-forwarding-detail
+ - floatingip-pools
+ - ip-substring-filtering
+ - l3-conntrack-helper
+ - l3-ext-ndp-proxy
+ - l3-flavors
+ - l3-ha
+ - l3-ndp-proxy
+ - l3_agent_scheduler
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - network-segment-range
+ - pagination
+ - port-device-profile
+ - port-mac-address-regenerate
+ - port-resource-request
+ - port-resource-request-groups
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-address-group
+ - rbac-address-scope
+ - rbac-policies
+ - rbac-security-groups
+ - rbac-subnetpool
+ - router
+ - router_availability_zone
+ - security-group
+ - security-groups-remote-address-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-tag
+ - standard-attr-timestamp
+ - stateful-security-group
+ - subnet_allocation
+ - subnet-dns-publish-fixed-ip
+ - subnet-service-types
+ - subnetpool-prefix-ops
+ - tag-ports-during-bulk-creation
+ - trunk
+ - trunk-details
+ - uplink-status-propagation
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_openvswitch) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-openvswitch-iptables_hybrid-2023-2
+ parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_common: *api_extensions
+ network_api_extensions_openvswitch: *api_extensions_openvswitch
+ network_available_features: *available_features
+ tempest_test_regex: "\
+ (^neutron_tempest_plugin.api)|\
+ (^neutron_tempest_plugin.scenario)|\
+ (^tempest.api.compute.servers.test_attach_interfaces)|\
+ (^tempest.api.compute.servers.test_multiple_create)"
+ # TODO(slaweq): remove trunks subport_connectivity test from blacklist
+ # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
+ # TODO(akatz): remove established tcp session verification test when the
+ # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_openvswitch) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+ neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: iptables_hybrid
+
+- job:
+ name: neutron-tempest-plugin-openvswitch-enforce-scope-new-defaults-2023-2
+ parent: neutron-tempest-plugin-openvswitch-2023-2
+ override-checkout: stable/2023.2
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services.
+ # NOTE: (gmann) We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for both system as well as
+ # for project scoped, we need to keep scope check disable for
+ # keystone.
+ NOVA_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
+- job:
+ name: neutron-tempest-plugin-linuxbridge-2023-2
+ parent: neutron-tempest-plugin-linuxbridge
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_common: *api_extensions
+ network_api_extensions_linuxbridge:
+ - dhcp_agent_scheduler
+ - vlan-transparent
+ network_available_features: *available_features
+ tempest_test_regex: "\
+ (^neutron_tempest_plugin.api)|\
+ (^neutron_tempest_plugin.scenario)|\
+ (^tempest.api.compute.servers.test_attach_interfaces)|\
+ (^tempest.api.compute.servers.test_multiple_create)"
+ # TODO(eolivare): remove VLAN Transparency tests from blacklist
+ # when bug https://bugs.launchpad.net/neutron/+bug/1907548 will be fixed
+ # TODO(slaweq): remove
+ # test_established_tcp_session_after_re_attachinging_sg from the
+ # exclude regex when bug https://bugs.launchpad.net/neutron/+bug/1936911
+ # will be fixed
+ # TODO(slaweq) remove test_floatingip_port_details from the exclude
+ # regex when bug https://bugs.launchpad.net/neutron/+bug/1799790 will be
+ # fixed
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+ (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_linuxbridge) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+ neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ q_agent: linuxbridge
+ firewall_driver: iptables
+
+- job:
+ name: neutron-tempest-plugin-ovn-2023-2
+ parent: neutron-tempest-plugin-ovn
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_ovn:
+ - vlan-transparent
+ tempest_test_regex: "\
+ (^neutron_tempest_plugin.api)|\
+ (^neutron_tempest_plugin.scenario)|\
+ (^tempest.api.compute.servers.test_attach_interfaces)|\
+ (^tempest.api.compute.servers.test_multiple_create)"
+ # TODO(jlibosva): Remove the NetworkWritableMtuTest test from the list
+ # once east/west fragmentation is supported in core OVN
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest)"
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_ovn) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
+ neutron_plugin_options:
+ available_type_drivers: local,flat,vlan,geneve
+ is_igmp_snooping_enabled: True
+ firewall_driver: ovn
+
+- job:
+ name: neutron-tempest-plugin-dvr-multinode-scenario-2023-2
+ parent: neutron-tempest-plugin-dvr-multinode-scenario
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_common: *api_extensions
+ network_api_extensions_dvr:
+ - dhcp_agent_scheduler
+ - dvr
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-designate-scenario-2023-2
+ parent: neutron-tempest-plugin-designate-scenario
+ override-checkout: stable/2023.2
+ vars:
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-sfc-2023-2
+ parent: neutron-tempest-plugin-sfc
+ override-checkout: stable/2023.2
+
+- job:
+ name: neutron-tempest-plugin-bgpvpn-bagpipe-2023-2
+ parent: neutron-tempest-plugin-bgpvpn-bagpipe
+ override-checkout: stable/2023.2
+
+- job:
+ name: neutron-tempest-plugin-dynamic-routing-2023-2
+ parent: neutron-tempest-plugin-dynamic-routing
+ override-checkout: stable/2023.2
+
+- job:
+ name: neutron-tempest-plugin-fwaas-2023-2
+ parent: neutron-tempest-plugin-fwaas
+ override-checkout: stable/2023.2
+
+- job:
+ name: neutron-tempest-plugin-vpnaas-2023-2
+ parent: neutron-tempest-plugin-vpnaas
+ override-checkout: stable/2023.2
+
+- job:
+ name: neutron-tempest-plugin-tap-as-a-service-2023-2
+ parent: neutron-tempest-plugin-tap-as-a-service
+ override-checkout: stable/2023.2
diff --git a/zuul.d/base-nested-switch.yaml b/zuul.d/base-nested-switch.yaml
index a9f5750..b4570fe 100644
--- a/zuul.d/base-nested-switch.yaml
+++ b/zuul.d/base-nested-switch.yaml
@@ -8,26 +8,49 @@
nodes:
- controller
-# Base nested switch job for non EM releases
+- nodeset:
+ name: neutron-nested-virt-ubuntu-jammy
+ nodes:
+ - name: controller
+ label: nested-virt-ubuntu-jammy
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+# Base nested switch job for 2023.1 and later
- job:
name: neutron-tempest-plugin-base-nested-switch
parent: neutron-tempest-plugin-base
abstract: true
- branches: ^(?!stable/(train|ussuri|victoria|wallaby)).*$
+ branches:
+ regex: ^(stable/(train|ussuri|victoria|wallaby|xena|yoga|zed)).*$
+ negate: true
# Comment nodeset and vars to switch back to non nested nodes
- nodeset: neutron-nested-virt-ubuntu-focal
- vars:
+ nodeset: neutron-nested-virt-ubuntu-jammy
+ vars: &nested_virt_vars
devstack_localrc:
LIBVIRT_TYPE: kvm
- # cirros 0.6.1 not booting when host-passthrough is used
+ # NOTE(ykarel): seeing issues with host-passthrough mode
+ # https://bugs.launchpad.net/neutron/+bug/2036603
# LIBVIRT_CPU_MODE: host-passthrough
- CIRROS_VERSION: 0.6.1
- DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-disk
- DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-disk.img
+ CIRROS_VERSION: 0.6.2
+ DEFAULT_IMAGE_NAME: cirros-0.6.2-x86_64-disk
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.6.2-x86_64-disk.img
+
+# Base nested switch job for yoga and zed
+- job:
+ name: neutron-tempest-plugin-base-nested-switch
+ parent: neutron-tempest-plugin-base
+ abstract: true
+ branches: ^stable/(yoga|zed)$
+ # Comment nodeset and vars to switch back to non nested nodes
+ nodeset: neutron-nested-virt-ubuntu-focal
+ vars: *nested_virt_vars
# Base nested switch job for EM releases
- job:
name: neutron-tempest-plugin-base-nested-switch
parent: neutron-tempest-plugin-base
abstract: true
- branches: ^(stable/(train|ussuri|victoria|wallaby)).*$
+ branches: ^(stable/(train|ussuri|victoria|wallaby|xena)).*$
diff --git a/zuul.d/master_jobs.yaml b/zuul.d/master_jobs.yaml
index dacf3d5..7e1aacb 100644
--- a/zuul.d/master_jobs.yaml
+++ b/zuul.d/master_jobs.yaml
@@ -27,9 +27,9 @@
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
PHYSICAL_NETWORK: public
IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
- CIRROS_VERSION: 0.6.1
- DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-uec
- DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-uec.tar.gz
+ CIRROS_VERSION: 0.6.2
+ DEFAULT_IMAGE_NAME: cirros-0.6.2-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.6.2-x86_64-uec.tar.gz
ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
ADVANCED_INSTANCE_TYPE: ntp_image_256M
ADVANCED_INSTANCE_USER: ubuntu
@@ -38,11 +38,13 @@
# TODO(lucasagomes): Re-enable MOD_WSGI after
# https://bugs.launchpad.net/neutron/+bug/1912359 is implemented
NEUTRON_DEPLOY_MOD_WSGI: false
- # TODO(ralonsoh): remove OVN_BUILD_FROM_SOURCE once the OS packages
- # include at least OVN v20.12.0.
+ # TODO(ihrachys): remove OVN_BUILD_FROM_SOURCE once the OS packages
+ # include at least OVN v22.03.3.
OVN_BUILD_FROM_SOURCE: True
- OVN_BRANCH: "v21.03.0"
- OVS_BRANCH: "8dc1733eaea866dce033b3c44853e1b09bf59fc7"
+ # TODO(ihrachys): switch back to a tagged version when it's released
+ # OVN_BRANCH: "v22.03.3"
+ OVN_BRANCH: "36e3ab9b47e93af0599a818e9d6b2930e49473f0"
+ OVS_BRANCH: "2410b95597fcec5f733caf77febdb46f4ffacd27"
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
@@ -268,7 +270,7 @@
neutron_plugin_options:
available_type_drivers: flat,vlan,local,vxlan
firewall_driver: openvswitch
- irrelevant-files: &openvswitch-scenario-irrelevant-files
+ irrelevant-files:
- ^\.pylintrc$
- ^(test-|)requirements.txt$
- lower-constraints.txt
@@ -303,7 +305,10 @@
- ^neutron_tempest_plugin/services/bgp/.*$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -404,7 +409,10 @@
- ^neutron_tempest_plugin/services/bgp/.*$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -571,6 +579,8 @@
- ^neutron_tempest_plugin/services/bgp/.*$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -605,11 +615,13 @@
OVN_DBS_LOG_LEVEL: dbg
ENABLE_TLS: True
OVN_IGMP_SNOOPING_ENABLE: True
- # TODO(eolivare): Remove OVN_BUILD_FROM_SOURCE once vlan-transparency
- # is included in an ovn released version
+ # TODO(ihrachys): remove OVN_BUILD_FROM_SOURCE once the OS packages
+ # include at least OVN v22.03.3.
OVN_BUILD_FROM_SOURCE: True
- OVN_BRANCH: "v21.06.0"
- OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ # TODO(ihrachys): switch back to a tagged version when it's released
+ # OVN_BRANCH: "v22.03.3"
+ OVN_BRANCH: "36e3ab9b47e93af0599a818e9d6b2930e49473f0"
+ OVS_BRANCH: "2410b95597fcec5f733caf77febdb46f4ffacd27"
OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
devstack_services:
br-ex-tcpdump: true
@@ -701,7 +713,10 @@
- ^neutron_tempest_plugin/services/bgp/.*$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -741,9 +756,9 @@
USE_PYTHON3: true
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
PHYSICAL_NETWORK: default
- CIRROS_VERSION: 0.6.1
- DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-uec
- DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-uec.tar.gz
+ CIRROS_VERSION: 0.6.2
+ DEFAULT_IMAGE_NAME: cirros-0.6.2-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.6.2-x86_64-uec.tar.gz
IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
ADVANCED_INSTANCE_TYPE: ntp_image_256M
@@ -894,7 +909,46 @@
agent_mode: dvr_snat
agent:
availability_zone: nova
- irrelevant-files: *openvswitch-scenario-irrelevant-files
+ irrelevant-files:
+ - ^\.pylintrc$
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^plugin.spec$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
+ - ^neutron/services/ovn_l3/.*$
+ - ^neutron/services/logapi/drivers/ovn/.*$
+ - ^neutron/services/portforwarding/drivers/ovn/.*$
+ - ^neutron/services/qos/drivers/linuxbridge/.*$
+ - ^neutron/services/qos/drivers/ovn/.*$
+ - ^neutron/services/trunk/drivers/linuxbridge/.*$
+ - ^neutron/services/trunk/drivers/ovn/.*$
+ - ^neutron/cmd/ovn/.*$
+ - ^neutron/common/ovn/.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^roles/.*functional.*$
+ - ^playbooks/.*dynamic-routing.*$
+ - ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-designate-scenario
@@ -949,7 +1003,10 @@
- ^neutron_tempest_plugin/services/bgp/.*$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -988,9 +1045,9 @@
devstack_localrc:
# TODO(slaweq): check why traceroute output is different in Cirros >
# 0.6.1 which is causing failures of the networking-sfc jobs
- CIRROS_VERSION: 0.5.1
- DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
- DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
+ CIRROS_VERSION: 0.5.2
+ DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
@@ -1013,6 +1070,8 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|tap_as_a_service|vpnaas).*$
- ^neutron_tempest_plugin/services/bgp/.*$
- ^tools/.*$
@@ -1020,7 +1079,10 @@
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -1076,6 +1138,8 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
- ^neutron_tempest_plugin/services/bgp/.*$
- ^tools/.*$
@@ -1083,7 +1147,10 @@
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -1108,24 +1175,7 @@
- bgp_4byte_asn
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
- Q_AGENT: openvswitch
- Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
devstack_services:
- # Disable OVN services
- br-ex-tcpdump: false
- br-int-flows: false
- ovn-controller: false
- ovn-northd: false
- ovs-vswitchd: false
- ovsdb-server: false
- q-ovn-metadata-agent: false
- # Neutron services
- q-agt: true
- q-dhcp: true
- q-meta: true
- q-metering: true
- q-l3: true
neutron-dr: true
neutron-dr-agent: true
tempest_concurrency: 1
@@ -1144,13 +1194,17 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(bgpvpn|fwaas|sfc|tap_as_a_service|vpnaas).*$
- ^tools/.*$
- ^tox.ini$
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -1204,6 +1258,8 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(bgpvpn|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
- ^neutron_tempest_plugin/services/bgp/.*$
- ^tools/.*$
@@ -1211,7 +1267,10 @@
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -1268,6 +1327,8 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service).*$
- ^neutron_tempest_plugin/services/bgp/.*$
- ^tools/.*$
@@ -1275,7 +1336,10 @@
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
@@ -1374,6 +1438,8 @@
- ^neutron/tests/fullstack/.*
- ^neutron/tests/functional/.*
- ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/scenario/admin/.*$
+ - ^neutron_tempest_plugin/scenario/test_.*$
- ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|vpnaas).*$
- ^neutron_tempest_plugin/services/bgp/.*$
- ^tools/.*$
@@ -1381,6 +1447,9 @@
- ^plugin.spec$
- ^rally-jobs/.*$
- ^roles/.*functional.*$
+ - ^playbooks/.*dvr-multinode.*$
+ - ^playbooks/.*dynamic-routing.*$
- ^playbooks/.*functional.*$
+ - ^playbooks/.*linuxbridge.*$
- ^vagrant/.*$
- ^zuul.d/(?!(project)).*\.yaml
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index d1d9717..5d3f973 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -2,7 +2,6 @@
name: neutron-tempest-plugin-jobs
check:
jobs:
- - neutron-tempest-plugin-linuxbridge
- neutron-tempest-plugin-openvswitch
- neutron-tempest-plugin-openvswitch-iptables_hybrid
- neutron-tempest-plugin-openvswitch-enforce-scope-old-defaults
@@ -10,7 +9,6 @@
- neutron-tempest-plugin-designate-scenario
gate:
jobs:
- - neutron-tempest-plugin-linuxbridge
- neutron-tempest-plugin-openvswitch
- neutron-tempest-plugin-ovn
- neutron-tempest-plugin-openvswitch-iptables_hybrid
@@ -19,6 +17,7 @@
# the experimental queue when it will be more stable
experimental:
jobs:
+ - neutron-tempest-plugin-linuxbridge
- neutron-tempest-plugin-dvr-multinode-scenario
- neutron-tempest-plugin-openvswitch-distributed-dhcp
- neutron-tempest-plugin-openvswitch-iptables_hybrid-distributed-dhcp
@@ -176,47 +175,66 @@
jobs:
- neutron-tempest-plugin-dvr-multinode-scenario-2023-1
+- project-template:
+ name: neutron-tempest-plugin-jobs-2023-2
+ check:
+ jobs:
+ - neutron-tempest-plugin-openvswitch-2023-2
+ - neutron-tempest-plugin-openvswitch-iptables_hybrid-2023-2
+ - neutron-tempest-plugin-ovn-2023-2
+ - neutron-tempest-plugin-designate-scenario-2023-2
+ gate:
+ jobs:
+ - neutron-tempest-plugin-ovn-2023-2
+ #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+ # the experimental queue when it will be more stable
+ experimental:
+ jobs:
+ - neutron-tempest-plugin-linuxbridge-2023-2
+ - neutron-tempest-plugin-dvr-multinode-scenario-2023-2
+
- project:
templates:
- build-openstack-docs-pti
- neutron-tempest-plugin-jobs
- - neutron-tempest-plugin-jobs-xena
- neutron-tempest-plugin-jobs-yoga
- neutron-tempest-plugin-jobs-zed
- neutron-tempest-plugin-jobs-2023-1
+ - neutron-tempest-plugin-jobs-2023-2
- check-requirements
- tempest-plugin-jobs
- release-notes-jobs-python3
check:
jobs:
- neutron-tempest-plugin-sfc
- - neutron-tempest-plugin-sfc-xena
- neutron-tempest-plugin-sfc-yoga
- neutron-tempest-plugin-sfc-zed
- neutron-tempest-plugin-sfc-2023-1
+ - neutron-tempest-plugin-sfc-2023-2
- neutron-tempest-plugin-bgpvpn-bagpipe
- - neutron-tempest-plugin-bgpvpn-bagpipe-xena
- neutron-tempest-plugin-bgpvpn-bagpipe-yoga
- neutron-tempest-plugin-bgpvpn-bagpipe-zed
- neutron-tempest-plugin-bgpvpn-bagpipe-2023-1
+ - neutron-tempest-plugin-bgpvpn-bagpipe-2023-2
- neutron-tempest-plugin-dynamic-routing
- - neutron-tempest-plugin-dynamic-routing-xena
- neutron-tempest-plugin-dynamic-routing-yoga
- neutron-tempest-plugin-dynamic-routing-zed
- neutron-tempest-plugin-dynamic-routing-2023-1
+ - neutron-tempest-plugin-dynamic-routing-2023-2
- neutron-tempest-plugin-fwaas
- neutron-tempest-plugin-fwaas-zed
- neutron-tempest-plugin-fwaas-2023-1
+ - neutron-tempest-plugin-fwaas-2023-2
- neutron-tempest-plugin-vpnaas
- - neutron-tempest-plugin-vpnaas-xena
- neutron-tempest-plugin-vpnaas-yoga
- neutron-tempest-plugin-vpnaas-zed
- neutron-tempest-plugin-vpnaas-2023-1
+ - neutron-tempest-plugin-vpnaas-2023-2
- neutron-tempest-plugin-tap-as-a-service
- - neutron-tempest-plugin-tap-as-a-service-xena
- neutron-tempest-plugin-tap-as-a-service-yoga
- neutron-tempest-plugin-tap-as-a-service-zed
- neutron-tempest-plugin-tap-as-a-service-2023-1
+ - neutron-tempest-plugin-tap-as-a-service-2023-2
gate:
jobs:
diff --git a/zuul.d/victoria_jobs.yaml b/zuul.d/victoria_jobs.yaml
index cfca0ea..1f611db 100644
--- a/zuul.d/victoria_jobs.yaml
+++ b/zuul.d/victoria_jobs.yaml
@@ -238,6 +238,8 @@
ADVANCED_INSTANCE_TYPE: ds512M
ADVANCED_INSTANCE_USER: ubuntu
CUSTOMIZE_IMAGE: false
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
diff --git a/zuul.d/wallaby_jobs.yaml b/zuul.d/wallaby_jobs.yaml
index 9fce55b..92a3e50 100644
--- a/zuul.d/wallaby_jobs.yaml
+++ b/zuul.d/wallaby_jobs.yaml
@@ -175,6 +175,8 @@
network_api_extensions: *api_extensions
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
diff --git a/zuul.d/xena_jobs.yaml b/zuul.d/xena_jobs.yaml
index 595b4d8..25d63a9 100644
--- a/zuul.d/xena_jobs.yaml
+++ b/zuul.d/xena_jobs.yaml
@@ -3,6 +3,11 @@
parent: neutron-tempest-plugin-base
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: &required-projects-xena
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 2.3.0
+ - openstack/tempest
vars:
tempest_concurrency: 4
tempest_test_regex: ^neutron_tempest_plugin\.api
@@ -96,6 +101,7 @@
name: neutron-tempest-plugin-scenario-openvswitch-xena
parent: neutron-tempest-plugin-openvswitch
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -115,6 +121,7 @@
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-xena
parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -134,6 +141,7 @@
name: neutron-tempest-plugin-scenario-linuxbridge-xena
parent: neutron-tempest-plugin-linuxbridge
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -153,6 +161,7 @@
name: neutron-tempest-plugin-scenario-ovn-xena
parent: neutron-tempest-plugin-ovn
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -172,6 +181,7 @@
parent: neutron-tempest-plugin-dvr-multinode-scenario
nodeset: openstack-two-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
@@ -179,6 +189,7 @@
name: neutron-tempest-plugin-designate-scenario-xena
parent: neutron-tempest-plugin-designate-scenario
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
@@ -187,6 +198,7 @@
parent: neutron-tempest-plugin-sfc
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
@@ -195,6 +207,7 @@
parent: neutron-tempest-plugin-bgpvpn-bagpipe
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions: *api_extensions
@@ -203,14 +216,38 @@
parent: neutron-tempest-plugin-dynamic-routing
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
+ neutron-dr: true
+ neutron-dr-agent: true
- job:
name: neutron-tempest-plugin-vpnaas-xena
parent: neutron-tempest-plugin-vpnaas
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
@@ -219,5 +256,6 @@
parent: neutron-tempest-plugin-tap-as-a-service
nodeset: openstack-single-node-focal
override-checkout: stable/xena
+ required-projects: *required-projects-xena
vars:
network_api_extensions_common: *api_extensions
diff --git a/zuul.d/yoga_jobs.yaml b/zuul.d/yoga_jobs.yaml
index d47fc93..04c9ddd 100644
--- a/zuul.d/yoga_jobs.yaml
+++ b/zuul.d/yoga_jobs.yaml
@@ -98,6 +98,7 @@
name: neutron-tempest-plugin-scenario-openvswitch-yoga
parent: neutron-tempest-plugin-openvswitch
override-checkout: stable/yoga
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -117,6 +118,7 @@
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
override-checkout: stable/yoga
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -136,6 +138,7 @@
name: neutron-tempest-plugin-scenario-linuxbridge-yoga
parent: neutron-tempest-plugin-linuxbridge
override-checkout: stable/yoga
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -155,6 +158,7 @@
name: neutron-tempest-plugin-scenario-ovn-yoga
parent: neutron-tempest-plugin-ovn
override-checkout: stable/yoga
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.scenario)|\
@@ -183,6 +187,7 @@
name: neutron-tempest-plugin-designate-scenario-yoga
parent: neutron-tempest-plugin-designate-scenario
override-checkout: stable/yoga
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
network_api_extensions_common: *api_extensions
@@ -209,6 +214,28 @@
override-checkout: stable/yoga
vars:
network_api_extensions_common: *api_extensions
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
+ neutron-dr: true
+ neutron-dr-agent: true
- job:
name: neutron-tempest-plugin-vpnaas-yoga
diff --git a/zuul.d/zed_jobs.yaml b/zuul.d/zed_jobs.yaml
index a11c2c8..8c70a66 100644
--- a/zuul.d/zed_jobs.yaml
+++ b/zuul.d/zed_jobs.yaml
@@ -2,9 +2,12 @@
name: neutron-tempest-plugin-openvswitch-zed
parent: neutron-tempest-plugin-openvswitch
override-checkout: stable/zed
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
network_api_extensions_openvswitch:
- local_ip
+ - port-resource-request
+ - port-resource-request-groups
- qos-bw-minimum-ingress
tempest_test_regex: "\
(^neutron_tempest_plugin.api)|\
@@ -45,7 +48,6 @@
- l3-ha
- l3-ndp-proxy
- l3_agent_scheduler
- - logging
- metering
- multi-provider
- net-mtu
@@ -55,8 +57,6 @@
- network-segment-range
- pagination
- port-device-profile
- - port-resource-request
- - port-resource-request-groups
- port-mac-address-regenerate
- port-security
- port-security-groups-filtering
@@ -107,10 +107,14 @@
name: neutron-tempest-plugin-openvswitch-iptables_hybrid-zed
parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
override-checkout: stable/zed
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
+ network_api_extensions_common: *api_extensions
network_api_extensions_openvswitch:
- local_ip
- logging
+ - port-resource-request
+ - port-resource-request-groups
tempest_test_regex: "\
(^neutron_tempest_plugin.api)|\
(^neutron_tempest_plugin.scenario)|\
@@ -137,7 +141,9 @@
name: neutron-tempest-plugin-linuxbridge-zed
parent: neutron-tempest-plugin-linuxbridge
override-checkout: stable/zed
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
+ network_api_extensions_common: *api_extensions
network_api_extensions_linuxbridge:
- vlan-transparent
tempest_test_regex: "\
@@ -163,6 +169,7 @@
name: neutron-tempest-plugin-ovn-zed
parent: neutron-tempest-plugin-ovn
override-checkout: stable/zed
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
tempest_test_regex: "\
(^neutron_tempest_plugin.api)|\
@@ -194,6 +201,7 @@
name: neutron-tempest-plugin-designate-scenario-zed
parent: neutron-tempest-plugin-designate-scenario
override-checkout: stable/zed
+ nodeset: neutron-nested-virt-ubuntu-focal
vars:
network_api_extensions_common: *api_extensions
@@ -220,6 +228,28 @@
override-checkout: stable/zed
vars:
network_api_extensions_common: *api_extensions
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
+ neutron-dr: true
+ neutron-dr-agent: true
- job:
name: neutron-tempest-plugin-fwaas-zed