Merge "Add test_reuse_ip_address_with_other_fip..."
diff --git a/.zuul.yaml b/.zuul.yaml
index 0d22083..6e0a7c9 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -63,8 +63,10 @@
- qos-fip
- quotas
- quota_details
+ - rbac-address-scope
- rbac-policies
- rbac-security-groups
+ - rbac-subnetpool
- router
- router-admin-state-down-before-update
- router_availability_zone
@@ -78,7 +80,9 @@
- standard-attr-tag
- standard-attr-timestamp
- subnet_allocation
+ - subnet-dns-publish-fixed-ip
- subnetpool-prefix-ops
+ - tag-ports-during-bulk-creation
- trunk
- trunk-details
- uplink-status-propagation
@@ -103,13 +107,14 @@
neutron-network-segment-range: true
neutron-port-forwarding: true
neutron-conntrack-helper: true
+ neutron-tag-ports-during-bulk-creation: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
QUOTAS:
quota_router: 100
quota_floatingip: 500
- quota_security_group: 100
+ quota_security_group: 150
quota_security_group_rule: 1000
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
@@ -176,6 +181,12 @@
nodeset: openstack-single-node-xenial
parent: neutron-tempest-plugin-api
override-checkout: stable/queens
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.3.0
+ - openstack/tempest
vars:
branch_override: stable/queens
# TODO(slaweq): find a way to put this list of extensions in
@@ -241,8 +252,16 @@
name: neutron-tempest-plugin-api-rocky
nodeset: openstack-single-node-xenial
parent: neutron-tempest-plugin-api
+ description: |
+ This job run on py2 for stable/rocky gate.
override-checkout: stable/rocky
- vars:
+ required-projects: &required-projects-rocky
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.9.0
+ - openstack/tempest
+ vars: &api_vars_rocky
branch_override: stable/rocky
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -309,6 +328,25 @@
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(gmann): This job run on py2 for stable/rocky gate.
+ branches:
+ - stable/rocky
+
+
+- job:
+ name: neutron-tempest-plugin-api-rocky
+ nodeset: openstack-single-node-xenial
+ parent: neutron-tempest-plugin-api
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-pluign master gate.
+ override-checkout: stable/rocky
+ required-projects: *required-projects-rocky
+ vars:
+ <<: *api_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-api-stein
@@ -479,9 +517,7 @@
tempest_test_regex: ^neutron_tempest_plugin\.scenario
devstack_localrc:
PHYSICAL_NETWORK: default
- DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
- DEFAULT_IMAGE_NAME: cirros-0.3.4-i386-disk
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1
ADVANCED_INSTANCE_TYPE: ds512M
ADVANCED_INSTANCE_USER: ubuntu
@@ -522,12 +558,22 @@
parent: neutron-tempest-plugin-scenario-openvswitch
nodeset: openstack-single-node-xenial
override-checkout: stable/queens
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.3.0
+ - openstack/tempest
vars:
branch_override: stable/queens
network_api_extensions: *api_extensions_queens
# TODO(slaweq): remove trunks subport_connectivity test from blacklist
# when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
- tempest_black_regex: "(^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)"
+ # NOTE(bcafarel): remove DNS test as queens pinned version does not have
+ # fix for https://bugs.launchpad.net/neutron/+bug/1826419
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
@@ -536,15 +582,39 @@
- job:
name: neutron-tempest-plugin-scenario-openvswitch-rocky
parent: neutron-tempest-plugin-scenario-openvswitch
+ description: |
+ This job run on py2 for stable/rocky gate.
nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- vars:
+ required-projects: *required-projects-rocky
+ vars: &scenario_vars_rocky
branch_override: stable/rocky
network_api_extensions: *api_extensions_rocky
devstack_localrc:
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(bcafarel): newer tests, unstable on rocky branch
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ branches:
+ - stable/rocky
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-rocky
+ parent: neutron-tempest-plugin-scenario-openvswitch
+ nodeset: openstack-single-node-xenial
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-pluign master gate.
+ override-checkout: stable/rocky
+ required-projects: *required-projects-rocky
+ vars:
+ <<: *scenario_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-scenario-openvswitch-stein
@@ -603,14 +673,41 @@
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
nodeset: openstack-single-node-xenial
+ description: |
+ This job run on py2 for stable/rocky gate.
override-checkout: stable/rocky
- vars:
+ required-projects: *required-projects-rocky
+ vars: &openvswitch_vars_rocky
branch_override: stable/rocky
network_api_extensions: *api_extensions_rocky
devstack_localrc:
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # TODO(bcafarel): remove trunks subport_connectivity test from blacklist
+ # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
+ # NOTE(bcafarel): other are newer tests, unstable on rocky branch
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ branches:
+ - stable/rocky
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
+ parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ nodeset: openstack-single-node-xenial
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-pluign master gate.
+ override-checkout: stable/rocky
+ required-projects: *required-projects-rocky
+ vars:
+ <<: *openvswitch_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-stein
@@ -665,9 +762,19 @@
parent: neutron-tempest-plugin-scenario-linuxbridge
nodeset: openstack-single-node-xenial
override-checkout: stable/queens
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.3.0
+ - openstack/tempest
vars:
branch_override: stable/queens
network_api_extensions: *api_extensions_queens
+ # NOTE(bcafarel): remove DNS test as queens pinned version does not have
+ # fix for https://bugs.launchpad.net/neutron/+bug/1826419
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
@@ -684,9 +791,12 @@
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-rocky
parent: neutron-tempest-plugin-scenario-linuxbridge
+ description: |
+ This job run on py2 for stable/rocky gate.
nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- vars:
+ required-projects: *required-projects-rocky
+ vars: &linuxbridge_vars_rocky
branch_override: stable/rocky
network_api_extensions: *api_extensions_rocky
devstack_localrc:
@@ -701,6 +811,27 @@
$TEMPEST_CONFIG:
neutron_plugin_options:
q_agent: None
+ # NOTE(bcafarel): newer tests, unstable on rocky branch
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ branches:
+ - stable/rocky
+
+- job:
+ name: neutron-tempest-plugin-scenario-linuxbridge-rocky
+ parent: neutron-tempest-plugin-scenario-linuxbridge
+ nodeset: openstack-single-node-xenial
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-pluign master gate.
+ override-checkout: stable/rocky
+ required-projects: *required-projects-rocky
+ vars:
+ <<: *linuxbridge_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-stein
@@ -747,9 +878,7 @@
USE_PYTHON3: true
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
PHYSICAL_NETWORK: default
- DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
- DEFAULT_IMAGE_NAME: cirros-0.3.4-i386-disk
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img
ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1
ADVANCED_INSTANCE_TYPE: ds512M
ADVANCED_INSTANCE_USER: ubuntu
@@ -831,6 +960,8 @@
neutron-trunk: true
neutron-log: true
neutron-port-forwarding: true
+ devstack_localrc:
+ USE_PYTHON3: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -859,12 +990,22 @@
parent: neutron-tempest-plugin-dvr-multinode-scenario
nodeset: openstack-two-node-xenial
override-checkout: stable/queens
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.3.0
+ - openstack/tempest
vars:
branch_override: stable/queens
network_api_extensions_common: *api_extensions_queens
# TODO(slaweq): remove trunks subport_connectivity test from blacklist
# when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
- tempest_black_regex: "(^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)"
+ # NOTE(bcafarel): remove DNS test as queens pinned version does not have
+ # fix for https://bugs.launchpad.net/neutron/+bug/1826419
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
@@ -872,14 +1013,42 @@
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
parent: neutron-tempest-plugin-dvr-multinode-scenario
+ description: |
+ This job run on py2 for stable/rocky gate.
nodeset: openstack-two-node-xenial
override-checkout: stable/rocky
- vars:
+ required-projects: *required-projects-rocky
+ vars: &multinode_scenario_vars_rocky
branch_override: stable/rocky
network_api_extensions_common: *api_extensions_rocky
devstack_localrc:
USE_PYTHON3: false
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(bcafarel): newer tests, unstable on rocky branch
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ branches:
+ - stable/rocky
+
+- job:
+ name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
+ parent: neutron-tempest-plugin-dvr-multinode-scenario
+ nodeset: openstack-two-node-xenial
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-pluign master gate.
+ override-checkout: stable/rocky
+ vars:
+ <<: *multinode_scenario_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ required-projects: *required-projects-rocky
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-stein
@@ -909,8 +1078,6 @@
vars:
devstack_localrc:
DESIGNATE_BACKEND_DRIVER: bind9
- DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,
Q_AGENT: openvswitch
# In this job advanced image is not needed, so it's name should be
# empty
@@ -945,9 +1112,21 @@
parent: neutron-tempest-plugin-designate-scenario
nodeset: openstack-single-node-xenial
override-checkout: stable/queens
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.3.0
+ - name: openstack/designate-tempest-plugin
+ override-checkout: 0.7.0
+ - openstack/tempest
vars:
branch_override: stable/queens
network_api_extensions_common: *api_extensions_queens
+ # NOTE(bcafarel): remove DNS test as queens pinned version does not have
+ # fix for https://bugs.launchpad.net/neutron/+bug/1826419
+ tempest_black_regex: "\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
@@ -955,22 +1134,56 @@
- job:
name: neutron-tempest-plugin-designate-scenario-rocky
parent: neutron-tempest-plugin-designate-scenario
+ description: |
+ This job run on py2 for stable/rocky gate.
nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
- vars:
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 0.9.0
+ - name: openstack/designate-tempest-plugin
+ override-checkout: 0.7.0
+ - openstack/tempest
+ vars: &designate_scenario_vars_rocky
branch_override: stable/rocky
network_api_extensions_common: *api_extensions_rocky
devstack_localrc:
USE_PYTHON3: false
TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
+ branches:
+ - stable/rocky
+
+- job:
+ name: neutron-tempest-plugin-designate-scenario-rocky
+ parent: neutron-tempest-plugin-designate-scenario
+ nodeset: openstack-single-node-xenial
+ description: |
+ This job run on py3 for other than stable/rocky gate
+ which is nothing but neutron-tempest-plugin master gate.
+ override-checkout: stable/rocky
+ required-projects: *required-projects-rocky
+ vars:
+ <<: *designate_scenario_vars_rocky
+ devstack_localrc:
+ USE_PYTHON3: True
+ branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-designate-scenario-stein
parent: neutron-tempest-plugin-designate-scenario
override-checkout: stable/stein
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - openstack/neutron-tempest-plugin
+ - name: openstack/designate-tempest-plugin
+ override-checkout: 0.7.0
+ - openstack/tempest
vars:
- network_api_extensions_common: *api_extensions_stein
branch_override: stable/stein
+ network_api_extensions_common: *api_extensions_stein
- job:
name: neutron-tempest-plugin-designate-scenario-train
@@ -1102,6 +1315,34 @@
tempest_concurrency: 1
tempest_test_regex: ^neutron_tempest_plugin\.neutron_dynamic_routing
+- job:
+ name: neutron-tempest-plugin-vpnaas
+ parent: neutron-tempest-plugin
+ timeout: 3900
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - openstack/neutron-vpnaas
+ - openstack/neutron-tempest-plugin
+ - openstack/tempest
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.vpnaas
+ tox_envlist: all-plugin
+ devstack_plugins:
+ neutron-vpnaas: https://opendev.org/openstack/neutron-vpnaas.git
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
+ network_api_extensions_common: *api_extensions_master
+ network_api_extensions_vpnaas:
+ - vpnaas
+ devstack_localrc:
+ IPSEC_PACKAGE: strongswan
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_vpnaas) | join(',') }}"
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^neutron_vpnaas/tests/unit/.*$
+ - ^releasenotes/.*$
+
- project-template:
name: neutron-tempest-plugin-jobs
check:
@@ -1178,8 +1419,6 @@
templates:
- build-openstack-docs-pti
- neutron-tempest-plugin-jobs
- - neutron-tempest-plugin-jobs-queens
- - neutron-tempest-plugin-jobs-rocky
- neutron-tempest-plugin-jobs-stein
- neutron-tempest-plugin-jobs-train
- check-requirements
@@ -1191,18 +1430,22 @@
- neutron-tempest-plugin-sfc-train
- neutron-tempest-plugin-bgpvpn-bagpipe
- neutron-tempest-plugin-bgpvpn-bagpipe-train
- - neutron-tempest-plugin-fwaas
- - neutron-tempest-plugin-fwaas-train
- - neutron-tempest-plugin-dynamic-routing:
+ - neutron-tempest-plugin-fwaas:
# TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1850626 will be fixed
+ # https://bugs.launchpad.net/neutron/+bug/1858645 will be fixed
voting: false
+ - neutron-tempest-plugin-fwaas-train:
+ # TODO(slaweq): switch it to be voting when bug
+ # https://bugs.launchpad.net/neutron/+bug/1858645 will be fixed
+ voting: false
+ - neutron-tempest-plugin-dynamic-routing
+ - neutron-tempest-plugin-vpnaas
gate:
jobs:
- neutron-tempest-plugin-sfc
- neutron-tempest-plugin-bgpvpn-bagpipe
- - neutron-tempest-plugin-fwaas
# TODO(slaweq): bring it back to gate queue
- # https://bugs.launchpad.net/neutron/+bug/1850626 will be fixed
- # - neutron-tempest-plugin-dynamic-routing
+ # https://bugs.launchpad.net/neutron/+bug/1858645 will be fixed
+ # - neutron-tempest-plugin-fwaas
+ - neutron-tempest-plugin-dynamic-routing
diff --git a/neutron_tempest_plugin/api/admin/test_agent_management.py b/neutron_tempest_plugin/api/admin/test_agent_management.py
index 4a37904..f63e81b 100644
--- a/neutron_tempest_plugin/api/admin/test_agent_management.py
+++ b/neutron_tempest_plugin/api/admin/test_agent_management.py
@@ -13,7 +13,9 @@
# under the License.
from neutron_tempest_plugin.common import tempest_fixtures
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
@@ -90,3 +92,10 @@
if self.agent['id'] != agent['id']:
return agent
raise self.skipException("This test requires at least two agents.")
+
+ @decorators.idempotent_id('b33af888-b6ac-4e68-a0ca-0444c2696cf9')
+ def test_delete_agent_negative(self):
+ non_existent_id = data_utils.rand_uuid()
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.admin_client.delete_agent, non_existent_id)
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index eb902b9..1444b2d 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -104,8 +104,8 @@
port = self.create_port(self.shared_network)
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
- self.assertNotEqual(self.shared_network['tenant_id'],
- port['tenant_id'])
+ self.assertNotEqual(self.shared_network['project_id'],
+ port['project_id'])
@decorators.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
def test_create_bulk_shared_network(self):
@@ -183,7 +183,7 @@
super(RBACSharedNetworksTest, cls).resource_setup()
cls.client2 = cls.os_alt.network_client
- def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id):
+ def _make_admin_net_and_subnet_shared_to_project_id(self, project_id):
net = self.admin_client.create_network(
name=data_utils.rand_name('test-network'))['network']
self.addCleanup(self.admin_client.delete_network, net['id'])
@@ -191,7 +191,7 @@
# network is shared to first unprivileged client by default
pol = self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
- action='access_as_shared', target_tenant=tenant_id
+ action='access_as_shared', target_tenant=project_id
)['rbac_policy']
return {'network': net, 'subnet': subnet, 'policy': pol}
@@ -199,21 +199,21 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-bfffffff1eee')
def test_create_rbac_policy_with_target_tenant_none(self):
with testtools.ExpectedException(lib_exc.BadRequest):
- self._make_admin_net_and_subnet_shared_to_tenant_id(
- tenant_id=None)
+ self._make_admin_net_and_subnet_shared_to_project_id(
+ project_id=None)
@decorators.attr(type='smoke')
@decorators.idempotent_id('86c3529b-1231-40de-803c-bfffffff1fff')
def test_create_rbac_policy_with_target_tenant_too_long_id(self):
with testtools.ExpectedException(lib_exc.BadRequest):
- target_tenant = '1234' * 100
- self._make_admin_net_and_subnet_shared_to_tenant_id(
- tenant_id=target_tenant)
+ target_project = '1234' * 100
+ self._make_admin_net_and_subnet_shared_to_project_id(
+ project_id=target_project)
@decorators.attr(type='smoke')
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
def test_network_only_visible_to_policy_target(self):
- net = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ net = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)['network']
self.client.show_network(net['id'])
with testtools.ExpectedException(lib_exc.NotFound):
@@ -222,7 +222,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
def test_subnet_on_network_only_visible_to_policy_target(self):
- sub = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ sub = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)['subnet']
self.client.show_subnet(sub['id'])
with testtools.ExpectedException(lib_exc.NotFound):
@@ -231,7 +231,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
def test_policy_target_update(self):
- res = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ res = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
@@ -245,7 +245,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-affefefef321')
def test_duplicate_policy_error(self):
- res = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ res = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.create_rbac_policy(
@@ -254,7 +254,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
def test_port_presence_prevents_network_rbac_policy_deletion(self):
- res = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ res = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
port = self.create_port(res['network'])
# a port on the network should prevent the deletion of a policy
@@ -272,7 +272,7 @@
self_share = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
- target_tenant=net['tenant_id'])['rbac_policy']
+ target_tenant=net['project_id'])['rbac_policy']
port = self.create_port(net)
self.client.delete_rbac_policy(self_share['id'])
self.client.delete_port(port['id'])
@@ -318,14 +318,14 @@
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
- ('tenant_id', 'target_tenant'))
+ ('project_id', 'target_tenant'))
for fields in field_args:
res = self.client.list_rbac_policies(fields=fields)
self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
def test_policy_show(self):
- res = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ res = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
p1 = res['policy']
p2 = self.admin_client.create_rbac_policy(
@@ -358,7 +358,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
def test_regular_client_blocked_from_sharing_anothers_network(self):
- net = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ net = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)['network']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
@@ -402,7 +402,7 @@
self_share = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
- target_tenant=net['tenant_id'])['rbac_policy']
+ target_tenant=net['project_id'])['rbac_policy']
port = self.create_port(net)
self.client.update_rbac_policy(self_share['id'],
target_tenant=self.client2.tenant_id)
@@ -411,7 +411,7 @@
@utils.requires_ext(extension="standard-attr-revisions", service="network")
@decorators.idempotent_id('86c3529b-1231-40de-1234-89664291a4cb')
def test_rbac_bumps_network_revision(self):
- resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ resp = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
net_id = resp['network']['id']
rev = self.client.show_network(net_id)['network']['revision_number']
@@ -425,7 +425,7 @@
@decorators.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
def test_filtering_works_with_rbac_records_present(self):
- resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
+ resp = self._make_admin_net_and_subnet_shared_to_project_id(
self.client.tenant_id)
net = resp['network']['id']
sub = resp['subnet']['id']
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 4441dd1..1b02211 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -378,16 +378,6 @@
return cls.create_network(name=network_name, shared=True, **kwargs)
@classmethod
- def create_network_keystone_v3(cls, network_name=None, project_id=None,
- tenant_id=None, client=None):
- params = {}
- if project_id:
- params['project_id'] = project_id
- if tenant_id:
- params['tenant_id'] = tenant_id
- return cls.create_network(name=network_name, client=client, **params)
-
- @classmethod
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
ip_version=None, client=None, reserve_cidr=True,
**kwargs):
@@ -743,10 +733,10 @@
@classmethod
def create_qos_policy(cls, name, description=None, shared=False,
- tenant_id=None, is_default=False):
+ project_id=None, is_default=False):
"""Wrapper utility that returns a test QoS policy."""
body = cls.admin_client.create_qos_policy(
- name, description, shared, tenant_id, is_default)
+ name, description, shared, project_id, is_default)
qos_policy = body['policy']
cls.qos_policies.append(qos_policy)
return qos_policy
@@ -799,12 +789,15 @@
return body['address_scope']
@classmethod
- def create_subnetpool(cls, name, is_admin=False, **kwargs):
+ def create_subnetpool(cls, name, is_admin=False, client=None, **kwargs):
+ if client is None:
+ client = cls.admin_client if is_admin else cls.client
+
if is_admin:
- body = cls.admin_client.create_subnetpool(name, **kwargs)
+ body = client.create_subnetpool(name, **kwargs)
cls.admin_subnetpools.append(body['subnetpool'])
else:
- body = cls.client.create_subnetpool(name, **kwargs)
+ body = client.create_subnetpool(name, **kwargs)
cls.subnetpools.append(body['subnetpool'])
return body['subnetpool']
diff --git a/neutron_tempest_plugin/api/test_address_scopes.py b/neutron_tempest_plugin/api/test_address_scopes.py
index 6cf0885..b8c143a 100644
--- a/neutron_tempest_plugin/api/test_address_scopes.py
+++ b/neutron_tempest_plugin/api/test_address_scopes.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.common import utils
from tempest.lib.common.utils import data_utils
@@ -115,3 +116,141 @@
address_scope = self._test_update_address_scope_helper(is_admin=True,
shared=True)
self.assertTrue(address_scope['shared'])
+
+
+class RbacAddressScopeTest(AddressScopeTestBase):
+
+ force_tenant_isolation = True
+ credentials = ['primary', 'alt', 'admin']
+ required_extensions = ['address-scope', 'rbac-address-scope']
+
+ @classmethod
+ def resource_setup(cls):
+ super(RbacAddressScopeTest, cls).resource_setup()
+ cls.client2 = cls.os_alt.network_client
+
+ def _make_admin_as_shared_to_project_id(self, project_id):
+ a_s = self._create_address_scope(ip_version=4, is_admin=True)
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='address_scope',
+ object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=project_id,
+ )['rbac_policy']
+ return {'address_scope': a_s, 'rbac_policy': rbac_policy}
+
+ @decorators.idempotent_id('038e999b-cd4b-4021-a9ff-ebb734f6e056')
+ def test_policy_target_update(self):
+ res = self._make_admin_as_shared_to_project_id(
+ self.client.tenant_id)
+ # change to client2
+ update_res = self.admin_client.update_rbac_policy(
+ res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
+ self.assertEqual(self.client2.tenant_id,
+ update_res['rbac_policy']['target_tenant'])
+ # make sure everything else stayed the same
+ res['rbac_policy'].pop('target_tenant')
+ update_res['rbac_policy'].pop('target_tenant')
+ self.assertEqual(res['rbac_policy'], update_res['rbac_policy'])
+
+ @decorators.idempotent_id('798ac6c6-96cc-49ce-ba5c-c6eced7a09d3')
+ def test_subnet_pool_presence_prevents_rbac_policy_deletion(self):
+ res = self._make_admin_as_shared_to_project_id(
+ self.client2.tenant_id)
+ snp = self.create_subnetpool(
+ data_utils.rand_name("rbac-address-scope"),
+ default_prefixlen=24, prefixes=['10.0.0.0/8'],
+ address_scope_id=res['address_scope']['id'],
+ client=self.client2
+ )
+ self.addCleanup(
+ self.admin_client.delete_rbac_policy,
+ res['rbac_policy']['id']
+ )
+ self.addCleanup(self.client2.delete_subnetpool, snp['id'])
+
+ # a port with shared sg should prevent the deletion of an
+ # rbac-policy required for it to be shared
+ with testtools.ExpectedException(lib_exc.Conflict):
+ self.admin_client.delete_rbac_policy(res['rbac_policy']['id'])
+
+ @decorators.idempotent_id('57da6ba2-6329-49c8-974c-9858fe187136')
+ def test_regular_client_shares_to_another_regular_client(self):
+ # owned by self.admin_client
+ a_s = self._create_address_scope(ip_version=4, is_admin=True)
+ with testtools.ExpectedException(lib_exc.NotFound):
+ self.client.show_address_scope(a_s['id'])
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=self.client.tenant_id)['rbac_policy']
+ self.client.show_address_scope(a_s['id'])
+
+ self.assertIn(rbac_policy,
+ self.admin_client.list_rbac_policies()['rbac_policies'])
+ # ensure that 'client2' can't see the rbac-policy sharing the
+ # as to it because the rbac-policy belongs to 'client'
+ self.assertNotIn(rbac_policy['id'], [p['id'] for p in
+ self.client2.list_rbac_policies()['rbac_policies']])
+
+ @decorators.idempotent_id('051248e7-d66f-4c69-9022-2b73ee5b9e73')
+ def test_filter_fields(self):
+ a_s = self._create_address_scope(ip_version=4)
+ self.admin_client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared', target_tenant=self.client2.tenant_id)
+ field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
+ ('project_id', 'target_tenant'))
+ for fields in field_args:
+ res = self.admin_client.list_rbac_policies(fields=fields)
+ self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
+
+ @decorators.idempotent_id('19cbd62e-c6c3-4495-98b9-b9c6c6c9c127')
+ def test_rbac_policy_show(self):
+ res = self._make_admin_as_shared_to_project_id(
+ self.client.tenant_id)
+ p1 = res['rbac_policy']
+ p2 = self.admin_client.create_rbac_policy(
+ object_type='address_scope',
+ object_id=res['address_scope']['id'],
+ action='access_as_shared',
+ target_tenant='*')['rbac_policy']
+
+ self.assertEqual(
+ p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
+ self.assertEqual(
+ p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
+
+ @decorators.idempotent_id('88852ba0-8546-4ce7-8f79-4a9c840c881d')
+ def test_filter_rbac_policies(self):
+ a_s = self._create_address_scope(ip_version=4)
+ rbac_pol1 = self.admin_client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)['rbac_policy']
+ rbac_pol2 = self.admin_client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
+ 'rbac_policies']
+ res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
+ 'rbac_policies']
+ self.assertEqual(1, len(res1))
+ self.assertEqual(1, len(res2))
+ self.assertEqual(rbac_pol1['id'], res1[0]['id'])
+ self.assertEqual(rbac_pol2['id'], res2[0]['id'])
+
+ @decorators.idempotent_id('222a638d-819e-41a7-a3fe-550265c06e79')
+ def test_regular_client_blocked_from_sharing_anothers_policy(self):
+ a_s = self._make_admin_as_shared_to_project_id(
+ self.client.tenant_id)['address_scope']
+ with testtools.ExpectedException(lib_exc.BadRequest):
+ self.client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)
+
+ # make sure the rbac-policy is invisible to the tenant for which it's
+ # being shared
+ self.assertFalse(self.client.list_rbac_policies()['rbac_policies'])
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index 52783b9..8867eee 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.common import utils
from tempest.lib import decorators
@@ -203,3 +205,62 @@
@decorators.idempotent_id('74293e59-d794-4a93-be09-38667199ef68')
def test_list_pagination_page_reverse_with_href_links(self):
self._test_list_pagination_page_reverse_with_href_links()
+
+
+class PortsTaggingOnCreationTestJSON(base.BaseNetworkTest):
+
+ _tags = [
+ ['tag-1', 'tag-2', 'tag-3'],
+ ['tag-1', 'tag-2'],
+ ['tag-1', 'tag-3'],
+ []
+ ]
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortsTaggingOnCreationTestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+
+ def _create_ports_in_bulk(self, ports):
+ body = self.client.create_bulk_port(ports)
+ for port in body['ports']:
+ self.ports.append(port)
+ return body
+
+ def _create_ports_list(self):
+ num_ports = len(self._tags)
+ net_id = self.network['id']
+ port = {'port': {'network_id': net_id,
+ 'admin_state_up': True}}
+ return [copy.deepcopy(port) for x in range(num_ports)]
+
+ @decorators.idempotent_id('5cf26014-fdd3-4a6d-b94d-a05f0c55da89')
+ @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+ service="network")
+ def test_tagging_ports_during_bulk_creation(self):
+ ports = self._create_ports_list()
+ ports_tags_map = {}
+ for port, tags in zip(ports, self._tags):
+ port['port']['tags'] = tags
+ port['port']['name'] = '-'.join(tags)
+ ports_tags_map[port['port']['name']] = tags
+ body = self._create_ports_in_bulk(ports)
+ for port in body['ports']:
+ self.assertEqual(ports_tags_map[port['name']], port['tags'])
+
+ @decorators.idempotent_id('33eda785-a08a-44a0-1bbb-fb50a2f1cd78')
+ @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+ service="network")
+ def test_tagging_ports_during_bulk_creation_no_tags(self):
+ ports = self._create_ports_list()
+ body = self._create_ports_in_bulk(ports)
+ for port in body['ports']:
+ self.assertFalse(port['tags'])
+
+ @decorators.idempotent_id('6baa43bf-88fb-8bca-6051-97ea1a5e8f4f')
+ @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+ service="network")
+ def test_tagging_ports_during_creation(self):
+ port = {'name': 'port', 'tags': self._tags[0]}
+ body = self.create_port(self.network, **port)
+ self.assertEqual(self._tags[0], body['tags'])
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 25d2e81..0fc7b15 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -24,9 +24,10 @@
import testtools
from neutron_tempest_plugin.api import base
-
+from neutron_tempest_plugin import config
load_tests = testscenarios.load_tests_apply_scenarios
+CONF = config.CONF
class QosTestJSON(base.BaseAdminNetworkTest):
@@ -64,11 +65,8 @@
body = self.admin_client.show_qos_policy(policy['id'])
show_policy = body['policy']
self.assertIn('project_id', show_policy)
- self.assertIn('tenant_id', show_policy)
self.assertEqual(self.admin_client.tenant_id,
show_policy['project_id'])
- self.assertEqual(self.admin_client.tenant_id,
- show_policy['tenant_id'])
@decorators.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
def test_list_policy_filter_by_name(self):
@@ -89,7 +87,7 @@
policy = self.create_qos_policy(name='test-policy',
description='',
shared=False,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
self.admin_client.update_qos_policy(policy['id'],
description='test policy desc2',
shared=True)
@@ -105,7 +103,7 @@
policy = self.create_qos_policy(name='test-policy',
description='',
shared=False,
- tenant_id=self.client.tenant_id)
+ project_id=self.client.tenant_id)
self.assertRaises(
exceptions.Forbidden,
self.client.update_qos_policy,
@@ -116,7 +114,7 @@
policy = self.create_qos_policy(name='test-policy',
description='',
shared=False,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
self.assertRaises(
exceptions.NotFound,
self.client.update_qos_policy,
@@ -127,7 +125,7 @@
policy = self.create_qos_policy(name='test-policy',
description='',
shared=True,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
self.admin_client.update_qos_policy(policy['id'],
description='test policy desc2')
@@ -350,7 +348,7 @@
name='test-policy-shared',
description='shared policy',
shared=True,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
obtained_policy = self.client.show_qos_policy(policy['id'])['policy']
self.assertEqual(obtained_policy, policy)
@@ -365,7 +363,7 @@
def test_default_policy_creating_network_without_policy(self):
project_id = self.create_project()['id']
policy = self.create_qos_policy(name='test-policy',
- tenant_id=project_id,
+ project_id=project_id,
is_default=True)
network = self.create_network('test network', client=self.admin_client,
project_id=project_id)
@@ -377,10 +375,10 @@
def test_default_policy_creating_network_with_policy(self):
project_id = self.create_project()['id']
self.create_qos_policy(name='test-policy',
- tenant_id=project_id,
+ project_id=project_id,
is_default=True)
policy = self.create_qos_policy(name='test-policy',
- tenant_id=project_id)
+ project_id=project_id)
network = self.create_network('test network', client=self.admin_client,
project_id=project_id,
qos_policy_id=policy['id'])
@@ -392,7 +390,7 @@
def test_user_create_port_with_admin_qos_policy(self):
qos_policy = self.create_qos_policy(
name='test-policy',
- tenant_id=self.admin_client.tenant_id,
+ project_id=self.admin_client.tenant_id,
shared=False)
network = self.create_network(
'test network', client=self.admin_client,
@@ -535,7 +533,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False,
- tenant_id=self.client.tenant_id)
+ project_id=self.client.tenant_id)
rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
max_kbps=1,
max_burst_kbps=1,
@@ -550,7 +548,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
max_kbps=1,
max_burst_kbps=1,
@@ -585,10 +583,53 @@
self.assertIn(rule1['id'], rules_ids)
self.assertNotIn(rule2['id'], rules_ids)
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.create_shared_resources,
+ """Creation of shared resources should be allowed,
+ setting the create_shared_resources option as 'True' is needed""")
+ @decorators.idempotent_id('d911707e-fa2c-11e9-9553-5076af30bbf5')
+ def test_attach_and_detach_a_policy_by_a_tenant(self):
+ # As an admin create an non shared QoS policy,add a rule
+ # and associate it with a network
+ self.network = self.create_network()
+ policy = self.create_qos_policy(name='test-policy',
+ description='test policy for attach',
+ shared=False)
+
+ self.admin_client.create_bandwidth_limit_rule(
+ policy['id'], 1024, 1024)
+
+ self.admin_client.update_network(
+ self.network['id'], qos_policy_id=policy['id'])
+
+ # As a tenant, try to detach the policy from the network
+ # The operation should be forbidden
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.client.update_network,
+ self.network['id'], qos_policy_id=None)
+
+ # As an admin, make the policy shared
+ self.admin_client.update_qos_policy(policy['id'], shared=True)
+
+ # As a tenant, try to detach the policy from the network
+ # The operation should be allowed
+ self.client.update_network(self.network['id'],
+ qos_policy_id=None)
+
+ retrieved_network = self.admin_client.show_network(self.network['id'])
+ self.assertIsNone(retrieved_network['network']['qos_policy_id'])
+
+ # As a tenant, try to delete the policy from the network
+ # should be forbidden
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.client.delete_qos_policy,
+ policy['id'])
+
class QosBandwidthLimitRuleWithDirectionTestJSON(
QosBandwidthLimitRuleTestJSON):
-
required_extensions = (
QosBandwidthLimitRuleTestJSON.required_extensions +
['qos-bw-limit-direction']
@@ -598,6 +639,50 @@
('egress', {'direction': 'egress'}),
]
+ @classmethod
+ @base.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
+ def resource_setup(cls):
+ super(QosBandwidthLimitRuleWithDirectionTestJSON, cls).resource_setup()
+
+ @decorators.idempotent_id('c8cbe502-0f7e-11ea-8d71-362b9e155667')
+ def test_create_policy_with_multiple_rules(self):
+ # Create a policy with multiple rules
+ policy = self.create_qos_policy(name='test-policy1',
+ description='test policy1',
+ shared=False)
+
+ rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
+ max_kbps=1024,
+ max_burst_kbps=1024,
+ direction=n_constants.
+ EGRESS_DIRECTION)
+ rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
+ max_kbps=1024,
+ max_burst_kbps=1024,
+ direction=n_constants.
+ INGRESS_DIRECTION)
+ # Check that the rules were added to the policy
+ rules = self.admin_client.list_bandwidth_limit_rules(
+ policy['id'])['bandwidth_limit_rules']
+ rules_ids = [rule['id'] for rule in rules]
+ self.assertIn(rule1['id'], rules_ids)
+ self.assertIn(rule2['id'], rules_ids)
+
+ # Check that the rules creation fails for the same rule types
+ self.assertRaises(exceptions.Conflict,
+ self.create_qos_bandwidth_limit_rule,
+ policy_id=policy['id'],
+ max_kbps=1025,
+ max_burst_kbps=1025,
+ direction=n_constants.EGRESS_DIRECTION)
+
+ self.assertRaises(exceptions.Conflict,
+ self.create_qos_bandwidth_limit_rule,
+ policy_id=policy['id'],
+ max_kbps=1025,
+ max_burst_kbps=1025,
+ direction=n_constants.INGRESS_DIRECTION)
+
class RbacSharedQosPoliciesTest(base.BaseAdminNetworkTest):
@@ -610,23 +695,23 @@
super(RbacSharedQosPoliciesTest, cls).resource_setup()
cls.client2 = cls.os_alt.network_client
- def _create_qos_policy(self, tenant_id=None):
+ def _create_qos_policy(self, project_id=None):
args = {'name': data_utils.rand_name('test-policy'),
'description': 'test policy',
'shared': False,
- 'tenant_id': tenant_id}
+ 'project_id': project_id}
qos_policy = self.admin_client.create_qos_policy(**args)['policy']
self.addCleanup(self.admin_client.delete_qos_policy, qos_policy['id'])
return qos_policy
- def _make_admin_policy_shared_to_tenant_id(self, tenant_id):
+ def _make_admin_policy_shared_to_project_id(self, project_id):
policy = self._create_qos_policy()
rbac_policy = self.admin_client.create_rbac_policy(
object_type='qos_policy',
object_id=policy['id'],
action='access_as_shared',
- target_tenant=tenant_id,
+ target_tenant=project_id,
)['rbac_policy']
return {'policy': policy, 'rbac_policy': rbac_policy}
@@ -645,7 +730,7 @@
qos_pol = self.create_qos_policy(
name=data_utils.rand_name('test-policy'),
description='test-shared-policy', shared=False,
- tenant_id=self.admin_client.tenant_id)
+ project_id=self.admin_client.tenant_id)
self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies'])
# test update shared False -> True
@@ -676,7 +761,7 @@
self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies'])
def _create_net_bound_qos_rbacs(self):
- res = self._make_admin_policy_shared_to_tenant_id(
+ res = self._make_admin_policy_shared_to_project_id(
self.client.tenant_id)
qos_policy, rbac_for_client_tenant = res['policy'], res['rbac_policy']
@@ -694,22 +779,22 @@
return rbac_for_client_tenant, rbac_wildcard
@decorators.idempotent_id('328b1f70-d424-11e5-a57f-54ee756c66df')
- def test_net_bound_shared_policy_wildcard_and_tenant_id_wild_remove(self):
+ def test_net_bound_shared_policy_wildcard_and_project_id_wild_remove(self):
client_rbac, wildcard_rbac = self._create_net_bound_qos_rbacs()
# globally unshare the qos-policy, the specific share should remain
self.admin_client.delete_rbac_policy(wildcard_rbac['id'])
self.client.list_rbac_policies(id=client_rbac['id'])
@decorators.idempotent_id('1997b00c-0c75-4e43-8ce2-999f9fa555ee')
- def test_net_bound_shared_policy_wildcard_and_tenant_id_wild_remains(self):
+ def test_net_bound_shared_policy_wildcard_and_projectid_wild_remains(self):
client_rbac, wildcard_rbac = self._create_net_bound_qos_rbacs()
# remove client_rbac policy the wildcard share should remain
self.admin_client.delete_rbac_policy(client_rbac['id'])
self.client.list_rbac_policies(id=wildcard_rbac['id'])
@decorators.idempotent_id('2ace9adc-da6e-11e5-aafe-54ee756c66df')
- def test_policy_sharing_with_wildcard_and_tenant_id(self):
- res = self._make_admin_policy_shared_to_tenant_id(
+ def test_policy_sharing_with_wildcard_and_project_id(self):
+ res = self._make_admin_policy_shared_to_project_id(
self.client.tenant_id)
qos_policy, rbac = res['policy'], res['rbac_policy']
qos_pol = self.client.show_qos_policy(qos_policy['id'])['policy']
@@ -732,7 +817,7 @@
@decorators.idempotent_id('9f85c76a-a350-11e5-8ae5-54ee756c66df')
def test_policy_target_update(self):
- res = self._make_admin_policy_shared_to_tenant_id(
+ res = self._make_admin_policy_shared_to_project_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
@@ -746,7 +831,7 @@
@decorators.idempotent_id('a9b39f46-a350-11e5-97c7-54ee756c66df')
def test_network_presence_prevents_policy_rbac_policy_deletion(self):
- res = self._make_admin_policy_shared_to_tenant_id(
+ res = self._make_admin_policy_shared_to_project_id(
self.client2.tenant_id)
qos_policy_id = res['policy']['id']
self._create_network(qos_policy_id, self.client2)
@@ -798,14 +883,14 @@
object_type='qos_policy', object_id=policy['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
- ('tenant_id', 'target_tenant'))
+ ('project_id', 'target_tenant'))
for fields in field_args:
res = self.admin_client.list_rbac_policies(fields=fields)
self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
@decorators.idempotent_id('c10d993a-a350-11e5-9c7a-54ee756c66df')
def test_rbac_policy_show(self):
- res = self._make_admin_policy_shared_to_tenant_id(
+ res = self._make_admin_policy_shared_to_project_id(
self.client.tenant_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
@@ -840,7 +925,7 @@
@decorators.idempotent_id('cd7d755a-a350-11e5-a344-54ee756c66df')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
- qos_policy = self._make_admin_policy_shared_to_tenant_id(
+ qos_policy = self._make_admin_policy_shared_to_project_id(
self.client.tenant_id)['policy']
with testtools.ExpectedException(exceptions.BadRequest):
self.client.create_rbac_policy(
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index 26a8c05..3c611eb 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -17,12 +17,16 @@
from neutron_lib import constants
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
import testtools
from neutron_tempest_plugin.api import base
from neutron_tempest_plugin.api import base_security_groups
+from oslo_log import log
+
+LOG = log.getLogger(__name__)
class SecGroupTest(base.BaseAdminNetworkTest):
@@ -76,6 +80,39 @@
self.assertIn(
security_group_rule['id'], observerd_security_group_rules_ids)
+ @decorators.idempotent_id('b5923b1a-4d33-44e1-af25-088dcb55b02b')
+ def test_list_security_group_rules_contains_all_rules(self):
+ """Test list security group rules.
+
+ This test checks if all SG rules which belongs to the tenant OR
+ which belongs to the tenant's security group are listed.
+ """
+ security_group = self.create_security_group()
+ protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
+ security_group_rule = self.create_security_group_rule(
+ security_group=security_group,
+ project={'id': self.admin_client.tenant_id},
+ client=self.admin_client,
+ protocol=protocol,
+ direction=constants.INGRESS_DIRECTION)
+
+ # Create also other SG with some custom rule to check that regular user
+ # can't see this rule
+ admin_security_group = self.create_security_group(
+ project={'id': self.admin_client.tenant_id},
+ client=self.admin_client)
+ admin_security_group_rule = self.create_security_group_rule(
+ security_group=admin_security_group,
+ project={'id': self.admin_client.tenant_id},
+ client=self.admin_client,
+ protocol=protocol,
+ direction=constants.INGRESS_DIRECTION)
+
+ rules = self.client.list_security_group_rules()['security_group_rules']
+ rules_ids = [rule['id'] for rule in rules]
+ self.assertIn(security_group_rule['id'], rules_ids)
+ self.assertNotIn(admin_security_group_rule['id'], rules_ids)
+
@decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
def test_create_bulk_sec_groups(self):
# Creates 2 sec-groups in one request
@@ -91,6 +128,186 @@
self.assertIsNotNone(secgrp['id'])
+class BaseSecGroupQuota(base.BaseAdminNetworkTest):
+
+ def _create_max_allowed_sg_amount(self):
+ sg_amount = self._get_sg_amount()
+ sg_quota = self._get_sg_quota()
+ sg_to_create = sg_quota - sg_amount
+ self._create_security_groups(sg_to_create)
+
+ def _create_security_groups(self, amount):
+ for _ in range(amount):
+ sg = self.create_security_group()
+ self.addCleanup(self.delete_security_group, sg)
+
+ def _increase_sg_quota(self):
+ sg_quota = self._get_sg_quota()
+ new_sg_quota = 2 * sg_quota
+ self._set_sg_quota(new_sg_quota)
+ self.assertEqual(self._get_sg_quota(), new_sg_quota,
+ "Security group quota wasn't changed correctly")
+
+ def _decrease_sg_quota(self):
+ sg_quota = self._get_sg_quota()
+ new_sg_quota = sg_quota // 2
+ self._set_sg_quota(new_sg_quota)
+ self.assertEqual(self._get_sg_quota(), new_sg_quota,
+ "Security group quota wasn't changed correctly")
+
+ def _set_sg_quota(self, val):
+ sg_quota = self._get_sg_quota()
+ project_id = self.client.tenant_id
+ self.admin_client.update_quotas(project_id, **{'security_group': val})
+ self.addCleanup(self.admin_client.update_quotas,
+ project_id, **{'security_group': sg_quota})
+
+ def _get_sg_quota(self):
+ project_id = self.client.tenant_id
+ quotas = self.admin_client.show_quotas(project_id)
+ return quotas['quota']['security_group']
+
+ def _get_sg_amount(self):
+ project_id = self.client.tenant_id
+ filter_query = {'project_id': project_id}
+ security_groups = self.client.list_security_groups(**filter_query)
+ return len(security_groups['security_groups'])
+
+
+class SecGroupQuotaTest(BaseSecGroupQuota):
+
+ credentials = ['primary', 'admin']
+ required_extensions = ['security-group', 'quotas']
+
+ @decorators.idempotent_id('1826aa02-090d-4717-b43a-50ee449b02e7')
+ def test_sg_quota_values(self):
+ values = [-1, 0, 10, 2147483647]
+ for value in values:
+ self._set_sg_quota(value)
+ self.assertEqual(value, self._get_sg_quota())
+
+ @decorators.idempotent_id('df7981fb-b83a-4779-b13e-65494ef44a72')
+ def test_max_allowed_sg_amount(self):
+ self._create_max_allowed_sg_amount()
+ self.assertEqual(self._get_sg_quota(), self._get_sg_amount())
+
+ @decorators.idempotent_id('623d909c-6ef8-43d6-93ee-97086e2651e8')
+ def test_sg_quota_increased(self):
+ self._create_max_allowed_sg_amount()
+ self._increase_sg_quota()
+ self._create_max_allowed_sg_amount()
+ self.assertEqual(self._get_sg_quota(), self._get_sg_amount(),
+ "Amount of security groups doesn't match quota")
+
+ @decorators.idempotent_id('ba95676c-8d9a-4482-b4ec-74d51a4602a6')
+ def test_sg_quota_decrease_less_than_created(self):
+ self._create_max_allowed_sg_amount()
+ self._decrease_sg_quota()
+
+ @decorators.idempotent_id('d43cf1a7-aa7e-4c41-9340-627a1a6ab961')
+ def test_create_sg_when_quota_disabled(self):
+ sg_amount = self._get_sg_amount()
+ self._set_sg_quota(-1)
+ self._create_security_groups(10)
+ new_sg_amount = self._get_sg_amount()
+ self.assertGreater(new_sg_amount, sg_amount)
+
+
+class BaseSecGroupRulesQuota(base.BaseAdminNetworkTest):
+
+ def _create_max_allowed_sg_rules_amount(self, port_index=1):
+ sg_rules_amount = self._get_sg_rules_amount()
+ sg_rules_quota = self._get_sg_rules_quota()
+ sg_rules_to_create = sg_rules_quota - sg_rules_amount
+ port_index += sg_rules_to_create
+ self._create_security_group_rules(sg_rules_to_create,
+ port_index=port_index)
+
+ def _create_security_group_rules(self, amount, port_index=1):
+ for i in range(amount):
+ self.create_security_group_rule(**{
+ 'project_id': self.client.tenant_id,
+ 'direction': 'ingress',
+ 'port_range_max': port_index + i,
+ 'port_range_min': port_index + i,
+ 'protocol': 'tcp'})
+
+ def _increase_sg_rules_quota(self):
+ sg_rules_quota = self._get_sg_rules_quota()
+ new_sg_rules_quota = 2 * sg_rules_quota
+ self._set_sg_rules_quota(new_sg_rules_quota)
+ self.assertGreater(self._get_sg_rules_quota(), sg_rules_quota,
+ "Security group rule quota wasnt changed correctly")
+ return new_sg_rules_quota
+
+ def _decrease_sg_rules_quota(self):
+ sg_rules_quota = self._get_sg_rules_quota()
+ new_sg_rules_quota = sg_rules_quota // 2
+ self._set_sg_rules_quota(new_sg_rules_quota)
+ return new_sg_rules_quota
+
+ def _set_sg_rules_quota(self, val):
+ project_id = self.client.tenant_id
+ self.admin_client.update_quotas(project_id,
+ **{'security_group_rule': val})
+ LOG.info('Trying to update security group rule quota {} '.format(val))
+
+ def _get_sg_rules_quota(self):
+ project_id = self.client.tenant_id
+ quotas = self.admin_client.show_quotas(project_id)
+ return quotas['quota']['security_group_rule']
+
+ def _get_sg_rules_amount(self):
+ project_id = self.client.tenant_id
+ filter_query = {'project_id': project_id}
+ security_group_rules = self.client.list_security_group_rules(
+ **filter_query)
+ return len(security_group_rules['security_group_rules'])
+
+
+class SecGroupRulesQuotaTest(BaseSecGroupRulesQuota):
+
+ credentials = ['primary', 'admin']
+ required_extensions = ['security-group', 'quotas']
+
+ def setUp(self):
+ super(SecGroupRulesQuotaTest, self).setUp()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_client.reset_quotas, self.client.tenant_id)
+ self._set_sg_rules_quota(10)
+
+ @decorators.idempotent_id('77ec038c-5638-11ea-8e2d-0242ac130003')
+ def test_sg_rules_quota_increased(self):
+ """Test security group rules quota increased.
+
+ This test checks if it is possible to increase the SG rules Quota
+ value and creates security group rules according to new quota value.
+ """
+ self._create_max_allowed_sg_rules_amount()
+ new_quota = self._increase_sg_rules_quota()
+ port_index = new_quota
+ self._create_max_allowed_sg_rules_amount(port_index)
+ quota_set = self._get_sg_rules_quota()
+ self.assertEqual(quota_set, self._get_sg_rules_amount(),
+ "Amount of security groups rules doesn't match quota")
+
+ @decorators.idempotent_id('37508c8d-270b-4b93-8007-72876a1fec38')
+ def test_sg_rules_quota_values(self):
+ """Test security group rules quota values.
+
+ This test checks if it is possible to change the SG rules Quota
+ values, different values.
+ """
+ sg_rules_quota = self._get_sg_rules_quota()
+ project_id = self.client.tenant_id
+ self.addCleanup(self.admin_client.update_quotas,
+ project_id, **{'security_group_rule': sg_rules_quota})
+ values = [-1, 0, 10, 2147483647]
+ for value in values:
+ self._set_sg_rules_quota(value)
+ self.assertEqual(value, self._get_sg_rules_quota())
+
+
class SecGroupProtocolTest(base.BaseNetworkTest):
protocol_names = base_security_groups.V4_PROTOCOL_NAMES
@@ -184,19 +401,19 @@
name=data_utils.rand_name('test-sg'),
project={'id': self.admin_client.tenant_id})
- def _make_admin_sg_shared_to_tenant_id(self, tenant_id):
+ def _make_admin_sg_shared_to_project_id(self, project_id):
sg = self._create_security_group()
rbac_policy = self.admin_client.create_rbac_policy(
object_type='security_group',
object_id=sg['id'],
action='access_as_shared',
- target_tenant=tenant_id,
+ target_tenant=project_id,
)['rbac_policy']
return {'security_group': sg, 'rbac_policy': rbac_policy}
@decorators.idempotent_id('2a41eb8f-2a35-11e9-bae9-acde48001122')
def test_policy_target_update(self):
- res = self._make_admin_sg_shared_to_tenant_id(
+ res = self._make_admin_sg_shared_to_project_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
@@ -210,7 +427,7 @@
@decorators.idempotent_id('2a619a8a-2a35-11e9-90d9-acde48001122')
def test_port_presence_prevents_policy_rbac_policy_deletion(self):
- res = self._make_admin_sg_shared_to_tenant_id(
+ res = self._make_admin_sg_shared_to_project_id(
self.client2.tenant_id)
sg_id = res['security_group']['id']
net = self.create_network(client=self.client2)
@@ -244,7 +461,7 @@
# ensure that 'client2' can't see the rbac-policy sharing the
# sg to it because the rbac-policy belongs to 'client'
self.assertNotIn(rbac_policy['id'], [p['id'] for p in
- self.client2.list_rbac_policies()['rbac_policies']])
+ self.client2.list_rbac_policies()['rbac_policies']])
@decorators.idempotent_id('2a9fd480-2a35-11e9-9cb6-acde48001122')
def test_filter_fields(self):
@@ -253,14 +470,14 @@
object_type='security_group', object_id=sg['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
- ('tenant_id', 'target_tenant'))
+ ('project_id', 'target_tenant'))
for fields in field_args:
res = self.admin_client.list_rbac_policies(fields=fields)
self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
@decorators.idempotent_id('2abf8f9e-2a35-11e9-85f7-acde48001122')
def test_rbac_policy_show(self):
- res = self._make_admin_sg_shared_to_tenant_id(
+ res = self._make_admin_sg_shared_to_project_id(
self.client.tenant_id)
p1 = res['rbac_policy']
p2 = self.admin_client.create_rbac_policy(
@@ -296,7 +513,7 @@
@decorators.idempotent_id('2aff3900-2a35-11e9-96b3-acde48001122')
def test_regular_client_blocked_from_sharing_anothers_policy(self):
- sg = self._make_admin_sg_shared_to_tenant_id(
+ sg = self._make_admin_sg_shared_to_project_id(
self.client.tenant_id)['security_group']
with testtools.ExpectedException(exceptions.BadRequest):
self.client.create_rbac_policy(
diff --git a/neutron_tempest_plugin/api/test_security_groups_negative.py b/neutron_tempest_plugin/api/test_security_groups_negative.py
index 1fcbd18..24e2289 100644
--- a/neutron_tempest_plugin/api/test_security_groups_negative.py
+++ b/neutron_tempest_plugin/api/test_security_groups_negative.py
@@ -15,11 +15,13 @@
from neutron_lib import constants
from neutron_lib.db import constants as db_const
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
from neutron_tempest_plugin.api import base_security_groups
+from neutron_tempest_plugin.api import test_security_groups
LONG_NAME_NG = 'x' * (db_const.NAME_FIELD_SIZE + 1)
@@ -84,6 +86,41 @@
self.os_primary.network_client.delete_security_group,
security_group_id=security_group['id'])
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('867d67c3-7e26-4288-a27b-e3d0649ee54b')
+ def test_assign_sec_group_twice(self):
+ net = self.create_network()
+ port = self.create_port(net)
+ sg = self.create_security_group()
+ self.assertRaises(lib_exc.BadRequest,
+ self.update_port,
+ port,
+ **{'security_groups': [sg['id'], sg['id']]})
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('d5ecb408-eb7e-47c1-a56f-353967dbd1c2')
+ def test_assign_nonexistent_sec_group(self):
+ net = self.create_network()
+ port = self.create_port(net)
+ self.assertRaises(lib_exc.NotFound,
+ self.update_port,
+ port,
+ **{'security_groups': [data_utils.rand_uuid()]})
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('98ef378d-81a2-43f6-bb6f-735c04cdef91')
+ def test_no_sec_group_changes_after_assignment_failure(self):
+ net = self.create_network()
+ port = self.create_port(net)
+ sg_list_before_failure = port['security_groups']
+ self.assertRaises(lib_exc.NotFound,
+ self.update_port,
+ port,
+ **{'security_groups': [data_utils.rand_uuid()]})
+ port_details_new = self.client.show_port(port['id'])['port']
+ sg_list_after_failure = port_details_new['security_groups']
+ self.assertEqual(sg_list_before_failure, sg_list_after_failure)
+
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
_ip_version = constants.IP_VERSION_6
@@ -114,3 +151,22 @@
def test_create_security_group_rule_with_ipv6_protocol_integers(self):
self._test_create_security_group_rule_with_bad_protocols(
base_security_groups.V6_PROTOCOL_INTS)
+
+
+class NegativeSecGroupQuotaTest(test_security_groups.BaseSecGroupQuota):
+
+ credentials = ['primary', 'admin']
+ required_extensions = ['security-group', 'quotas']
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('63f00cba-fcf5-4000-a3ee-eca58a1795c1')
+ def test_create_excess_sg(self):
+ self._set_sg_quota(0)
+ self.assertRaises(lib_exc.Conflict, self.create_security_group)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('90a83445-bbc2-49d8-8c85-a111c08cd7fb')
+ def test_sg_quota_incorrect_values(self):
+ values = [-2, 2147483648, "value"]
+ for value in values:
+ self.assertRaises(lib_exc.BadRequest, self._set_sg_quota, value)
diff --git a/neutron_tempest_plugin/api/test_subnetpools.py b/neutron_tempest_plugin/api/test_subnetpools.py
index 9d927cf..38c721f 100644
--- a/neutron_tempest_plugin/api/test_subnetpools.py
+++ b/neutron_tempest_plugin/api/test_subnetpools.py
@@ -12,10 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
@@ -418,3 +420,171 @@
self._test_list_validation_filters(self.list_kwargs)
self._test_list_validation_filters({
'unknown_filter': 'value'}, filter_is_valid=False)
+
+
+class RbacSubnetPoolTest(SubnetPoolsTestBase):
+
+ force_tenant_isolation = True
+ credentials = ['primary', 'alt', 'admin']
+ required_extensions = ['rbac-subnetpool']
+
+ @classmethod
+ def resource_setup(cls):
+ super(RbacSubnetPoolTest, cls).resource_setup()
+ cls.client2 = cls.os_alt.network_client
+
+ def _make_admin_snp_shared_to_project_id(self, project_id):
+ snp = self._create_subnetpool(is_admin=True)
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='subnetpool',
+ object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=project_id,
+ )['rbac_policy']
+ return {'subnetpool': snp, 'rbac_policy': rbac_policy}
+
+ @decorators.idempotent_id('71b35ad0-51cd-40da-985d-89a51c95ec6a')
+ def test_policy_target_update(self):
+ res = self._make_admin_snp_shared_to_project_id(
+ self.client.tenant_id)
+ # change to client2
+ update_res = self.admin_client.update_rbac_policy(
+ res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
+ self.assertEqual(self.client2.tenant_id,
+ update_res['rbac_policy']['target_tenant'])
+ # make sure everything else stayed the same
+ res['rbac_policy'].pop('target_tenant')
+ update_res['rbac_policy'].pop('target_tenant')
+ self.assertEqual(res['rbac_policy'], update_res['rbac_policy'])
+
+ @decorators.idempotent_id('451d9d38-65a0-4916-a805-1460d6a938d1')
+ def test_subnet_presence_prevents_rbac_policy_deletion(self):
+ res = self._make_admin_snp_shared_to_project_id(
+ self.client2.tenant_id)
+ network = self.create_network(client=self.client2)
+ subnet = self.client2.create_subnet(
+ network_id=network['id'],
+ ip_version=4,
+ subnetpool_id=res['subnetpool']['id'],
+ name=data_utils.rand_name("rbac-subnetpool"),
+ )["subnet"]
+ self.addCleanup(self.client2.delete_network, network['id'])
+ self.addCleanup(
+ self.admin_client.delete_subnetpool,
+ res['subnetpool']['id']
+ )
+ self.addCleanup(self.client2.delete_subnet, subnet['id'])
+
+ # a port with shared sg should prevent the deletion of an
+ # rbac-policy required for it to be shared
+ with testtools.ExpectedException(lib_exc.Conflict):
+ self.admin_client.delete_rbac_policy(res['rbac_policy']['id'])
+
+ @decorators.idempotent_id('f74a71de-9abf-49c6-8199-4ac7f53e383b')
+ @utils.requires_ext(extension='rbac-address-scope', service='network')
+ def test_cannot_share_if_no_access_to_address_scope(self):
+ # Create Address Scope shared only to client but not to client2
+ a_s = self.admin_client.create_address_scope(
+ name=data_utils.rand_name("rbac-subnetpool"),
+ ip_version=4
+ )["address_scope"]
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='address_scope', object_id=a_s['id'],
+ action='access_as_shared',
+ target_tenant=self.client.tenant_id)['rbac_policy']
+
+ # Create subnet pool owned by client with shared AS
+ snp = self._create_subnetpool(address_scope_id=a_s["id"])
+
+ with testtools.ExpectedException(lib_exc.BadRequest):
+ self.client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id
+ )
+
+ # cleanup
+ self.client.delete_subnetpool(snp["id"])
+ self.admin_client.delete_rbac_policy(rbac_policy['id'])
+ self.admin_client.delete_address_scope(a_s['id'])
+
+ @decorators.idempotent_id('9cf8bba5-0163-4083-9397-678bb9b5f5a2')
+ def test_regular_client_shares_to_another_regular_client(self):
+ # owned by self.admin_client
+ snp = self._create_subnetpool(is_admin=True)
+ with testtools.ExpectedException(lib_exc.NotFound):
+ self.client.show_subnetpool(snp['id'])
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=self.client.tenant_id)['rbac_policy']
+ self.client.show_subnetpool(snp['id'])
+
+ self.assertIn(rbac_policy,
+ self.admin_client.list_rbac_policies()['rbac_policies'])
+ # ensure that 'client2' can't see the rbac-policy sharing the
+ # as to it because the rbac-policy belongs to 'client'
+ self.assertNotIn(rbac_policy['id'], [p['id'] for p in
+ self.client2.list_rbac_policies()['rbac_policies']])
+
+ @decorators.idempotent_id('17b2b437-a5fa-4340-ad98-912a986d0d7c')
+ def test_filter_fields(self):
+ snp = self._create_subnetpool()
+ self.admin_client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared', target_tenant=self.client2.tenant_id)
+ field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
+ ('project_id', 'target_tenant'))
+ for fields in field_args:
+ res = self.admin_client.list_rbac_policies(fields=fields)
+ self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
+
+ @decorators.idempotent_id('e59e4502-4e6a-4e49-b446-a5d5642bbd69')
+ def test_rbac_policy_show(self):
+ res = self._make_admin_snp_shared_to_project_id(
+ self.client.tenant_id)
+ p1 = res['rbac_policy']
+ p2 = self.admin_client.create_rbac_policy(
+ object_type='subnetpool',
+ object_id=res['subnetpool']['id'],
+ action='access_as_shared',
+ target_tenant='*')['rbac_policy']
+
+ self.assertEqual(
+ p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
+ self.assertEqual(
+ p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
+
+ @decorators.idempotent_id('1c24c28c-eb1e-466e-af29-255cf127653a')
+ def test_filter_rbac_policies(self):
+ snp = self._create_subnetpool()
+ rbac_pol1 = self.admin_client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)['rbac_policy']
+ rbac_pol2 = self.admin_client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
+ 'rbac_policies']
+ res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
+ 'rbac_policies']
+ self.assertEqual(1, len(res1))
+ self.assertEqual(1, len(res2))
+ self.assertEqual(rbac_pol1['id'], res1[0]['id'])
+ self.assertEqual(rbac_pol2['id'], res2[0]['id'])
+
+ @decorators.idempotent_id('63d9acbe-403c-4e77-9ffd-80e636a4621e')
+ def test_regular_client_blocked_from_sharing_anothers_policy(self):
+ snp = self._make_admin_snp_shared_to_project_id(
+ self.client.tenant_id)['subnetpool']
+ with testtools.ExpectedException(lib_exc.BadRequest):
+ self.client.create_rbac_policy(
+ object_type='subnetpool', object_id=snp['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)
+
+ # make sure the rbac-policy is invisible to the tenant for which it's
+ # being shared
+ self.assertFalse(self.client.list_rbac_policies()['rbac_policies'])
diff --git a/neutron_tempest_plugin/api/test_subnetpools_negative.py b/neutron_tempest_plugin/api/test_subnetpools_negative.py
index 214a012..1e222df 100644
--- a/neutron_tempest_plugin/api/test_subnetpools_negative.py
+++ b/neutron_tempest_plugin/api/test_subnetpools_negative.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
import netaddr
from oslo_utils import uuidutils
@@ -171,6 +172,10 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('3396ec6c-cb80-4ebe-b897-84e904580bdf')
+ @testtools.skipIf(
+ utils.is_extension_enabled('rbac-address-scope', 'network'),
+ reason="Test is outdated starting from Ussuri release."
+ )
@utils.requires_ext(extension='address-scope', service='network')
def test_tenant_create_subnetpool_associate_shared_address_scope(self):
address_scope = self.create_address_scope(
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index 823a95d..1f83bd8 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -235,7 +235,9 @@
'segmentation_id': segmentation_id2}]
# Validate that subport got segmentation details from the network
- self.assertEqual(expected_subports, trunk['sub_ports'])
+ self.assertEqual(
+ sorted(expected_subports, key=lambda subport: subport['port_id']),
+ sorted(trunk['sub_ports'], key=lambda subport: subport['port_id']))
class TrunkTestMtusJSONBase(TrunkTestJSONBase):
diff --git a/neutron_tempest_plugin/common/ip.py b/neutron_tempest_plugin/common/ip.py
index 1702bd3..70a3dd5 100644
--- a/neutron_tempest_plugin/common/ip.py
+++ b/neutron_tempest_plugin/common/ip.py
@@ -15,6 +15,7 @@
# under the License.
import collections
+import re
import subprocess
import netaddr
@@ -299,6 +300,23 @@
netaddr.IPNetwork(subnet['cidr']).prefixlen)
+def arp_table():
+ # 192.168.0.16 0x1 0x2 dc:a6:32:06:56:51 * enp0s31f6
+ regex_str = (r"([^ ]+)\s+(0x\d+)\s+(0x\d+)\s+(\w{2}\:\w{2}\:\w{2}\:\w{2}\:"
+ r"\w{2}\:\w{2})\s+([\w+\*]+)\s+([\-\w]+)")
+ regex = re.compile(regex_str)
+ arp_table = []
+ with open('/proc/net/arp', 'r') as proc_file:
+ for line in proc_file.readlines():
+ m = regex.match(line)
+ if m:
+ arp_table.append(ARPregister(
+ ip_address=m.group(1), hw_type=m.group(2),
+ flags=m.group(3), mac_address=m.group(4),
+ mask=m.group(5), device=m.group(6)))
+ return arp_table
+
+
class Route(HasProperties,
collections.namedtuple('Route',
['dest', 'properties'])):
@@ -314,3 +332,40 @@
@property
def src_ip(self):
return netaddr.IPAddress(self.src)
+
+ def __str__(self):
+ properties_str = ' '.join('%s %s' % (k, v)
+ for k, v in self.properties.items())
+ return '%(dest)s %(properties)s' % {'dest': self.dest,
+ 'properties': properties_str}
+
+
+class ARPregister(collections.namedtuple(
+ 'ARPregister',
+ ['ip_address', 'hw_type', 'flags', 'mac_address', 'mask', 'device'])):
+
+ def __str__(self):
+ return '%s %s %s %s %s %s' % (self.ip_address, self.hw_type,
+ self.flags, self.mac_address, self.mask,
+ self.device)
+
+
+def find_valid_cidr(valid_cidr='10.0.0.0/8', used_cidr=None):
+ total_ips = netaddr.IPSet(netaddr.IPNetwork(valid_cidr))
+ if used_cidr:
+ used_network = netaddr.IPNetwork(used_cidr)
+ netmask = used_network.netmask.netmask_bits()
+ valid_ips = total_ips.difference(netaddr.IPSet(used_network))
+ else:
+ valid_ips = total_ips
+ netmask = 24
+
+ for ip in valid_ips:
+ valid_network = netaddr.IPNetwork('%s/%s' % (ip, netmask))
+ if valid_network in valid_ips:
+ return valid_network.cidr
+
+ exception_str = 'No valid CIDR found in %s' % valid_cidr
+ if used_cidr:
+ exception_str += ', used CIDR %s' % used_cidr
+ raise Exception(exception_str)
diff --git a/neutron_tempest_plugin/common/shell.py b/neutron_tempest_plugin/common/shell.py
index bd4a7a3..eebb07d 100644
--- a/neutron_tempest_plugin/common/shell.py
+++ b/neutron_tempest_plugin/common/shell.py
@@ -46,7 +46,7 @@
:param timeout: command execution timeout in seconds
- :param check: when False it doesn't raises ShellCommandError when
+ :param check: when False it doesn't raises ShellCommandFailed when
exit status is not zero. True by default
:returns: STDOUT text when command execution terminates with zero exit
@@ -57,7 +57,7 @@
try to read STDOUT and STDERR buffers (not fully implemented) before
raising the exception.
- :raises ShellCommandError: when command execution terminates with non-zero
+ :raises ShellCommandFailed: when command execution terminates with non-zero
exit status.
"""
ssh_client = ssh_client or SSH_PROXY_CLIENT
@@ -110,7 +110,7 @@
except lib_exc.SSHExecCommandFailed as ex:
# Please note class SSHExecCommandFailed has been re-based on
- # top of ShellCommandError
+ # top of ShellCommandFailed
stdout = ex.stdout
stderr = ex.stderr
exit_status = ex.exit_status
@@ -174,7 +174,7 @@
stdout=self.stdout)
elif self.exit_status != 0:
- raise exceptions.ShellCommandError(command=self.command,
- exit_status=self.exit_status,
- stderr=self.stderr,
- stdout=self.stdout)
+ raise exceptions.ShellCommandFailed(command=self.command,
+ exit_status=self.exit_status,
+ stderr=self.stderr,
+ stdout=self.stdout)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index 96f0ef9..fa731d8 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -133,47 +133,20 @@
look_for_keys=look_for_keys, key_filename=key_file,
port=port, create_proxy_client=False, **kwargs)
- # attribute used to keep reference to opened client connection
- _client = None
-
def connect(self, *args, **kwargs):
"""Creates paramiko.SSHClient and connect it to remote SSH server
- In case this method is called more times it returns the same client
- and no new SSH connection is created until close method is called.
-
:returns: paramiko.Client connected to remote server.
:raises tempest.lib.exceptions.SSHTimeout: in case it fails to connect
to remote server.
"""
- client = self._client
- if client is None:
- client = super(Client, self)._get_ssh_connection(
- *args, **kwargs)
- self._client = client
-
- return client
-
- # This overrides superclass protected method to make sure exec_command
- # method is going to reuse the same SSH client and connection if called
- # more times
- _get_ssh_connection = connect
+ return super(Client, self)._get_ssh_connection(*args, **kwargs)
# This overrides superclass test_connection_auth method forbidding it to
# close connection
test_connection_auth = connect
- def close(self):
- """Closes connection to SSH server and cleanup resources."""
- client = self._client
- if client is not None:
- client.close()
- self._client = None
-
- def __exit__(self, _exception_type, _exception_value, _traceback):
- self.close()
-
def open_session(self):
"""Gets connection to SSH server and open a new paramiko.Channel
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index bd7a367..34e7464 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -18,7 +18,6 @@
"""Utilities and helper functions."""
-import functools
import threading
import time
try:
@@ -27,6 +26,7 @@
from urllib import parse as urlparse
import eventlet
+from tempest.lib import exceptions
SCHEMA_PORT_MAPPING = {
"http": 80,
@@ -83,22 +83,6 @@
raise WaitTimeout("Timed out after %d seconds" % timeout)
-# TODO(haleyb): move to neutron-lib
-# code copied from neutron repository - neutron/tests/base.py
-def unstable_test(reason):
- def decor(f):
- @functools.wraps(f)
- def inner(self, *args, **kwargs):
- try:
- return f(self, *args, **kwargs)
- except Exception as e:
- msg = ("%s was marked as unstable because of %s, "
- "failure was: %s") % (self.id(), reason, e)
- raise self.skipTest(msg)
- return inner
- return decor
-
-
def override_class(overriden_class, overrider_class):
"""Override class definition with a MixIn class
@@ -123,3 +107,30 @@
if scheme in SCHEMA_PORT_MAPPING and not port:
netloc = netloc + ":" + str(SCHEMA_PORT_MAPPING[scheme])
return urlparse.urlunparse((scheme, netloc, url, params, query, fragment))
+
+
+def kill_nc_process(ssh_client):
+ cmd = "killall -q nc"
+ try:
+ ssh_client.exec_command(cmd)
+ except exceptions.SSHExecCommandFailed:
+ pass
+
+
+def process_is_running(ssh_client, process_name):
+ try:
+ ssh_client.exec_command("pidof %s" % process_name)
+ return True
+ except exceptions.SSHExecCommandFailed:
+ return False
+
+
+def spawn_http_server(ssh_client, port, message):
+ cmd = ("(echo -e 'HTTP/1.1 200 OK\r\n'; echo '%(msg)s') "
+ "| sudo nc -lp %(port)d &" % {'msg': message, 'port': port})
+ ssh_client.exec_command(cmd)
+
+
+def call_url_remote(ssh_client, url):
+ cmd = "curl %s --retry 3 --connect-timeout 2" % url
+ return ssh_client.exec_command(cmd)
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index 54dc16e..28d6b76 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -67,7 +67,7 @@
# Multicast tests settings
cfg.StrOpt('multicast_group_range',
- default='224.0.0.120-224.0.0.250',
+ default='225.0.0.120-225.0.0.250',
help='Unallocated multi-cast IPv4 range, which will be used to '
'test the multi-cast support.'),
@@ -115,6 +115,20 @@
help='Name of ssh user to use with advanced image in tests. '
'This is required if advanced image has to be used in '
'tests.'),
+
+ # Option for creating QoS policies configures as "shared".
+ # The default is false in order to prevent undesired usage
+ # while testing in parallel.
+ cfg.BoolOpt('create_shared_resources',
+ default=False,
+ help='Allow creation of shared resources.'
+ 'The default value is false.'),
+ cfg.BoolOpt('is_igmp_snooping_enabled',
+ default=False,
+ help='Indicates whether IGMP snooping is enabled or not. '
+ 'If True, multicast test(s) will assert that multicast '
+ 'traffic is not being flooded to all ports. Defaults '
+ 'to False.'),
]
# TODO(amuller): Redo configuration options registration as part of the planned
diff --git a/neutron_tempest_plugin/exceptions.py b/neutron_tempest_plugin/exceptions.py
index 895cb40..398bc1c 100644
--- a/neutron_tempest_plugin/exceptions.py
+++ b/neutron_tempest_plugin/exceptions.py
@@ -96,5 +96,5 @@
exceptions.SSHExecCommandFailed, ShellCommandFailed)
# Above code created a new SSHExecCommandFailed class based on top
-# of ShellCommandError
+# of ShellCommandFailed
assert issubclass(exceptions.SSHExecCommandFailed, ShellCommandFailed)
diff --git a/neutron_tempest_plugin/scenario/admin/test_floatingip.py b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
index 511452c..a08acc3 100644
--- a/neutron_tempest_plugin/scenario/admin/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
@@ -85,7 +85,7 @@
server_ssh_clients.append(ssh.Client(
fips[i]['floating_ip_address'], CONF.validation.image_ssh_user,
pkey=self.keypair['private_key']))
- return server_ssh_clients, fips
+ return servers, server_ssh_clients, fips
@decorators.idempotent_id('6bba729b-3fb6-494b-9e1e-82bbd89a1045')
def test_two_vms_fips(self):
@@ -99,6 +99,7 @@
hyper = self._list_hypervisors()[0]['hypervisor_hostname']
# Get availability zone list to pass it for vm creation
avail_zone = self._list_availability_zones()[0]['zoneName']
- server_ssh_clients, fips = self._create_vms(hyper, avail_zone)
+ servers, server_ssh_clients, fips = self._create_vms(hyper, avail_zone)
self.check_remote_connectivity(
- server_ssh_clients[0], fips[1]['floating_ip_address'])
+ server_ssh_clients[0], fips[1]['floating_ip_address'],
+ servers=servers)
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index 5a29aa1..9dd830c 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -12,6 +12,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import distutils
+import re
import subprocess
from debtcollector import removals
@@ -26,9 +28,12 @@
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base as base_api
+from neutron_tempest_plugin.common import ip as ip_utils
from neutron_tempest_plugin.common import shell
from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
from neutron_tempest_plugin.scenario import constants
CONF = config.CONF
@@ -36,6 +41,48 @@
LOG = log.getLogger(__name__)
+def get_ncat_version(ssh_client=None):
+ cmd = "ncat --version 2>&1"
+ try:
+ version_result = shell.execute(cmd, ssh_client=ssh_client).stdout
+ except exceptions.ShellCommandFailed:
+ m = None
+ else:
+ m = re.match(r"Ncat: Version ([\d.]+) *.", version_result)
+ # NOTE(slaweq): by default lets assume we have ncat 7.60 which is in Ubuntu
+ # 18.04 which is used on u/s gates
+ return distutils.version.StrictVersion(m.group(1) if m else '7.60')
+
+
+def get_ncat_server_cmd(port, protocol, msg=None):
+ udp = ''
+ if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
+ udp = '-u'
+ cmd = "nc %(udp)s -p %(port)s -lk " % {
+ 'udp': udp, 'port': port}
+ if msg:
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ cmd += "-c 'echo %s' &" % msg
+ else:
+ cmd += "-e echo %s &" % msg
+ else:
+ cmd += "< /dev/zero &"
+ return cmd
+
+
+def get_ncat_client_cmd(ip_address, port, protocol):
+ udp = ''
+ if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
+ udp = '-u'
+ cmd = 'echo "knock knock" | nc '
+ ncat_version = get_ncat_version()
+ if ncat_version > distutils.version.StrictVersion('7.60'):
+ cmd += '-z '
+ cmd += '-w 1 %(udp)s %(host)s %(port)s' % {
+ 'udp': udp, 'host': ip_address, 'port': port}
+ return cmd
+
+
class BaseTempestTestCase(base_api.BaseNetworkTest):
def create_server(self, flavor_ref, image_ref, key_name, networks,
@@ -219,6 +266,7 @@
except lib_exc.SSHTimeout as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
+ self._log_local_network_status()
raise
def _log_console_output(self, servers=None):
@@ -229,6 +277,11 @@
servers = self.os_primary.servers_client.list_servers()
servers = servers['servers']
for server in servers:
+ # NOTE(slaweq): sometimes servers are passed in dictionary with
+ # "server" key as first level key and in other cases it may be that
+ # it is just the "inner" dict without "server" key. Lets try to
+ # handle both cases
+ server = server.get("server") or server
try:
console_output = (
self.os_primary.servers_client.get_console_output(
@@ -239,10 +292,16 @@
LOG.debug("Server %s disappeared(deleted) while looking "
"for the console log", server['id'])
+ def _log_local_network_status(self):
+ local_routes = ip_utils.IPCommand().list_routes()
+ LOG.debug('Local routes:\n%s', '\n'.join(str(r) for r in local_routes))
+ arp_table = ip_utils.arp_table()
+ LOG.debug('Local ARP table:\n%s', '\n'.join(str(r) for r in arp_table))
+
def _check_remote_connectivity(self, source, dest, count,
should_succeed=True,
nic=None, mtu=None, fragmentation=True,
- timeout=None):
+ timeout=None, pattern=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
@@ -253,12 +312,13 @@
:param mtu: mtu size for the packet to be sent
:param fragmentation: Flag for packet fragmentation
:param timeout: Timeout for all ping packet(s) to succeed
+ :param pattern: hex digits included in ICMP messages
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_host(source, host, count,
size=CONF.validation.ping_size, nic=None, mtu=None,
- fragmentation=True):
+ fragmentation=True, pattern=None):
IP_VERSION_4 = neutron_lib_constants.IP_VERSION_4
IP_VERSION_6 = neutron_lib_constants.IP_VERSION_6
@@ -274,13 +334,16 @@
cmd += ' -M do'
size = str(net_utils.get_ping_payload_size(
mtu=mtu, ip_version=ip_version))
+ if pattern:
+ cmd += ' -p {pattern}'.format(pattern=pattern)
cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
return source.exec_command(cmd)
def ping_remote():
try:
result = ping_host(source, dest, count, nic=nic, mtu=mtu,
- fragmentation=fragmentation)
+ fragmentation=fragmentation,
+ pattern=pattern)
except lib_exc.SSHExecCommandFailed:
LOG.warning('Failed to ping IP: %s via a ssh connection '
@@ -301,12 +364,13 @@
def check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None, mtu=None, fragmentation=True,
servers=None, timeout=None,
- ping_count=CONF.validation.ping_count):
+ ping_count=CONF.validation.ping_count,
+ pattern=None):
try:
self.assertTrue(self._check_remote_connectivity(
source, dest, ping_count, should_succeed, nic, mtu,
fragmentation,
- timeout=timeout))
+ timeout=timeout, pattern=pattern))
except lib_exc.SSHTimeout as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
@@ -408,21 +472,31 @@
self._log_console_output(servers)
raise
- def nc_listen(self, server, ssh_client, port, protocol, echo_msg):
+ def ensure_nc_listen(self, ssh_client, port, protocol, echo_msg=None,
+ servers=None):
+ """Ensure that nc server listening on the given TCP/UDP port is up.
+
+ Listener is created always on remote host.
+ """
+ def spawn_and_check_process():
+ self.nc_listen(ssh_client, port, protocol, echo_msg, servers)
+ return utils.process_is_running(ssh_client, "nc")
+
+ utils.wait_until_true(spawn_and_check_process)
+
+ def nc_listen(self, ssh_client, port, protocol, echo_msg=None,
+ servers=None):
"""Create nc server listening on the given TCP/UDP port.
Listener is created always on remote host.
"""
- udp = ''
- if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
- udp = '-u'
- cmd = "sudo nc %(udp)s -p %(port)s -lk -e echo %(msg)s &" % {
- 'udp': udp, 'port': port, 'msg': echo_msg}
try:
- return ssh_client.exec_command(cmd)
+ return ssh_client.execute_script(
+ get_ncat_server_cmd(port, protocol, echo_msg),
+ become_root=True)
except lib_exc.SSHTimeout as ssh_e:
LOG.debug(ssh_e)
- self._log_console_output([server])
+ self._log_console_output(servers)
raise
def nc_client(self, ip_address, port, protocol):
@@ -430,11 +504,7 @@
Client is always executed locally on host where tests are executed.
"""
- udp = ''
- if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
- udp = '-u'
- cmd = 'echo "knock knock" | nc -w 1 %(udp)s %(host)s %(port)s' % {
- 'udp': udp, 'host': ip_address, 'port': port}
+ cmd = get_ncat_client_cmd(ip_address, port, protocol)
result = shell.execute_local_command(cmd)
self.assertEqual(0, result.exit_status)
return result.stdout
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
index 78d8d95..1a7468a 100644
--- a/neutron_tempest_plugin/scenario/test_connectivity.py
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -13,11 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
+
+from neutron_lib import constants
from tempest.common import compute
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from neutron_tempest_plugin.common import ip as ip_utils
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
@@ -62,6 +66,8 @@
for vm in vms:
self.wait_for_server_active(vm['server'])
+ return vms
+
@decorators.idempotent_id('8944b90d-1766-4669-bd8a-672b5d106bb7')
def test_connectivity_through_2_routers(self):
ap1_net = self.create_network()
@@ -105,7 +111,7 @@
routes=[{"destination": ap1_subnet['cidr'],
"nexthop": ap1_wan_port['fixed_ips'][0]['ip_address']}])
- self._create_servers(ap1_internal_port, ap2_internal_port)
+ servers = self._create_servers(ap1_internal_port, ap2_internal_port)
ap1_fip = self.create_and_associate_floatingip(
ap1_internal_port['id'])
@@ -114,7 +120,8 @@
pkey=self.keypair['private_key'])
self.check_remote_connectivity(
- ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'])
+ ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'],
+ servers=servers)
@decorators.idempotent_id('b72c3b77-3396-4144-b05d-9cd3c0099893')
def test_connectivity_router_east_west_traffic(self):
@@ -141,7 +148,7 @@
self.create_router_interface(router['id'], subnet_1['id'])
self.create_router_interface(router['id'], subnet_2['id'])
- self._create_servers(internal_port_1, internal_port_2)
+ servers = self._create_servers(internal_port_1, internal_port_2)
fip = self.create_and_associate_floatingip(
internal_port_1['id'])
@@ -151,7 +158,7 @@
self.check_remote_connectivity(
sshclient, internal_port_2['fixed_ips'][0]['ip_address'],
- ping_count=10)
+ ping_count=10, servers=servers)
@utils.requires_ext(extension="dvr", service="network")
@decorators.idempotent_id('69d3650a-5c32-40bc-ae56-5c4c849ddd37')
@@ -161,14 +168,14 @@
Subnet is connected to dvr and non-dvr routers in the same time, test
ensures that connectivity from VM to both routers is working.
- Test scenario:
+ Test scenario: (NOTE: 10.1.0.0/24 private CIDR is used as an example)
+----------------+ +------------+
| Non-dvr router | | DVR router |
| | | |
- | 10.0.0.1 | | 10.0.0.x |
+ | 10.1.0.1 | | 10.1.0.x |
+-------+--------+ +-----+------+
| |
- | 10.0.0.0/24 |
+ | 10.1.0.0/24 |
+----------------+----------------+
|
+-+-+
@@ -176,16 +183,28 @@
+---+
where:
- 10.0.0.1 - is subnet's gateway IP address,
- 10.0.0.x - is any other IP address taken from subnet's range
+ 10.1.0.1 - is subnet's gateway IP address,
+ 10.1.0.x - is any other IP address taken from subnet's range
- Test ensures that both 10.0.0.1 and 10.0.0.x IP addresses are
+ Test ensures that both 10.1.0.1 and 10.1.0.x IP addresses are
reachable from VM.
"""
+ ext_network = self.client.show_network(self.external_network_id)
+ for ext_subnetid in ext_network['network']['subnets']:
+ ext_subnet = self.os_admin.network_client.show_subnet(ext_subnetid)
+ ext_cidr = ext_subnet['subnet']['cidr']
+ if ext_subnet['subnet']['ip_version'] == constants.IP_VERSION_4:
+ break
+ else:
+ self.fail('No IPv4 subnet was found in external network %s' %
+ ext_network['network']['id'])
+
+ subnet_cidr = ip_utils.find_valid_cidr(used_cidr=ext_cidr)
+ gw_ip = netaddr.IPAddress(subnet_cidr.first + 1)
network = self.create_network()
subnet = self.create_subnet(
- network, cidr="10.0.0.0/24", gateway="10.0.0.1")
+ network, cidr=str(subnet_cidr), gateway=str(gw_ip))
non_dvr_router = self.create_router_by_client(
tenant_id=self.client.tenant_id,
@@ -222,7 +241,7 @@
pkey=self.keypair['private_key'])
self.check_remote_connectivity(
- sshclient, '10.0.0.1', ping_count=10)
+ sshclient, str(gw_ip), ping_count=10, servers=[vm])
self.check_remote_connectivity(
sshclient, dvr_router_port['fixed_ips'][0]['ip_address'],
- ping_count=10)
+ ping_count=10, servers=[vm])
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
index 20062c1..533f043 100644
--- a/neutron_tempest_plugin/scenario/test_dns_integration.py
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -23,6 +23,7 @@
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
+from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
@@ -40,19 +41,19 @@
DNSMixin = object
-class DNSIntegrationTests(base.BaseTempestTestCase, DNSMixin):
+class BaseDNSIntegrationTests(base.BaseTempestTestCase, DNSMixin):
credentials = ['primary']
@classmethod
def setup_clients(cls):
- super(DNSIntegrationTests, cls).setup_clients()
+ super(BaseDNSIntegrationTests, cls).setup_clients()
cls.dns_client = cls.os_tempest.zones_client
cls.query_client = cls.os_tempest.query_client
cls.query_client.build_timeout = 30
@classmethod
def skip_checks(cls):
- super(DNSIntegrationTests, cls).skip_checks()
+ super(BaseDNSIntegrationTests, cls).skip_checks()
if not ('designate' in CONF.service_available and
CONF.service_available.designate):
raise cls.skipException("Designate support is required")
@@ -62,7 +63,7 @@
@classmethod
@utils.requires_ext(extension="dns-integration", service="network")
def resource_setup(cls):
- super(DNSIntegrationTests, cls).resource_setup()
+ super(BaseDNSIntegrationTests, cls).resource_setup()
_, cls.zone = cls.dns_client.create_zone()
cls.addClassResourceCleanup(cls.dns_client.delete_zone,
cls.zone['id'], ignore_errors=lib_exc.NotFound)
@@ -71,6 +72,7 @@
cls.network = cls.create_network(dns_domain=cls.zone['name'])
cls.subnet = cls.create_subnet(cls.network)
+ cls.subnet_v6 = cls.create_subnet(cls.network, ip_version=6)
cls.router = cls.create_router_by_client()
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair()
@@ -93,13 +95,16 @@
fip = self.create_floatingip(port=port)
return {'port': port, 'fip': fip, 'server': server}
- def _verify_dns_records(self, address, name):
+ def _verify_dns_records(self, address, name, found=True, record_type='A'):
+ client = self.query_client
forward = name + '.' + self.zone['name']
reverse = ipaddress.ip_address(address).reverse_pointer
- dns_waiters.wait_for_query(self.query_client, forward, 'A')
- dns_waiters.wait_for_query(self.query_client, reverse, 'PTR')
- fwd_response = self.query_client.query(forward, 'A')
- rev_response = self.query_client.query(reverse, 'PTR')
+ dns_waiters.wait_for_query(client, forward, record_type, found)
+ dns_waiters.wait_for_query(client, reverse, 'PTR', found)
+ if not found:
+ return
+ fwd_response = client.query(forward, record_type)
+ rev_response = client.query(reverse, 'PTR')
for r in fwd_response:
for rr in r.answer:
self.assertIn(address, rr.to_text())
@@ -107,15 +112,81 @@
for rr in r.answer:
self.assertIn(forward, rr.to_text())
+
+class DNSIntegrationTests(BaseDNSIntegrationTests):
@decorators.idempotent_id('850ee378-4b5a-4f71-960e-0e7b12e03a34')
def test_server_with_fip(self):
name = data_utils.rand_name('server-test')
server = self._create_server(name=name)
server_ip = server['fip']['floating_ip_address']
self._verify_dns_records(server_ip, name)
+ self.delete_floatingip(server['fip'])
+ self._verify_dns_records(server_ip, name, found=False)
@decorators.idempotent_id('a8f2fade-8d5c-40f9-80f0-3de4b8d91985')
def test_fip(self):
name = data_utils.rand_name('fip-test')
fip = self._create_floatingip_with_dns(name)
- self._verify_dns_records(fip['floating_ip_address'], name)
+ addr = fip['floating_ip_address']
+ self._verify_dns_records(addr, name)
+ self.delete_floatingip(fip)
+ self._verify_dns_records(addr, name, found=False)
+
+
+class DNSIntegrationAdminTests(BaseDNSIntegrationTests,
+ base_api.BaseAdminNetworkTest):
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def resource_setup(cls):
+ super(DNSIntegrationAdminTests, cls).resource_setup()
+ # TODO(jh): We should add the segmentation_id as tempest option
+ # so that it can be changed to match the deployment if needed
+ cls.network2 = cls.create_network(dns_domain=cls.zone['name'],
+ provider_network_type='vxlan',
+ provider_segmentation_id=12345)
+ cls.subnet2 = cls.create_subnet(cls.network2)
+
+ @decorators.idempotent_id('fa6477ce-a12b-41da-b671-5a3bbdafab07')
+ def test_port_on_special_network(self):
+ name = data_utils.rand_name('port-test')
+ port = self.create_port(self.network2,
+ dns_name=name)
+ addr = port['fixed_ips'][0]['ip_address']
+ self._verify_dns_records(addr, name)
+ self.client.delete_port(port['id'])
+ self._verify_dns_records(addr, name, found=False)
+
+
+class DNSIntegrationExtraTests(BaseDNSIntegrationTests):
+
+ required_extensions = ["subnet-dns-publish-fixed-ip"]
+
+ @classmethod
+ def resource_setup(cls):
+ super(DNSIntegrationExtraTests, cls).resource_setup()
+ cls.network2 = cls.create_network()
+ cls.subnet2 = cls.create_subnet(cls.network2)
+ cls.subnet2_v6 = cls.create_subnet(cls.network2,
+ ip_version=6,
+ dns_publish_fixed_ip=True)
+
+ @decorators.idempotent_id('e10e0e5d-69ac-4172-b39f-27ab344b7f99')
+ def test_port_with_publishing_subnet(self):
+ name = data_utils.rand_name('port-test')
+ port = self.create_port(self.network2,
+ dns_domain=self.zone['name'],
+ dns_name=name)
+ fixed_ips = port['fixed_ips']
+ if fixed_ips[1]['subnet_id'] == self.subnet2_v6['id']:
+ v6_index = 1
+ else:
+ v6_index = 0
+ addr_v4 = port['fixed_ips'][1 - v6_index]['ip_address']
+ addr_v6 = port['fixed_ips'][v6_index]['ip_address']
+ self._verify_dns_records(addr_v6, name, record_type='AAAA')
+ self._verify_dns_records(addr_v4, name, found=False)
+ self.client.delete_port(port['id'])
+ self._verify_dns_records(addr_v6, name, record_type='AAAA',
+ found=False)
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
index 8b8a4a1..2a137b5 100644
--- a/neutron_tempest_plugin/scenario/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -17,6 +17,7 @@
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
+from neutron_lib.utils import test
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
@@ -134,10 +135,12 @@
# Check connectivity
self.check_remote_connectivity(ssh_client,
- dest_server['port']['fixed_ips'][0]['ip_address'])
+ dest_server['port']['fixed_ips'][0]['ip_address'],
+ servers=[src_server, dest_server])
if self.dest_has_fip:
self.check_remote_connectivity(ssh_client,
- dest_server['fip']['floating_ip_address'])
+ dest_server['fip']['floating_ip_address'],
+ servers=[src_server, dest_server])
class FloatingIpSameNetwork(FloatingIpTestCasesMixin,
@@ -152,7 +155,7 @@
same_network = True
- @common_utils.unstable_test("bug 1717302")
+ @test.unstable_test("bug 1717302")
@decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
def test_east_west(self):
self._test_east_west()
@@ -170,7 +173,7 @@
same_network = False
- @common_utils.unstable_test("bug 1717302")
+ @test.unstable_test("bug 1717302")
@decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
def test_east_west(self):
self._test_east_west()
@@ -200,7 +203,8 @@
pkey=self.keypair['private_key'],
proxy_client=proxy_client)
self.check_remote_connectivity(ssh_client,
- gateway_external_ip)
+ gateway_external_ip,
+ servers=[proxy, src_server])
class FloatingIPPortDetailsTest(FloatingIpTestCasesMixin,
@@ -213,7 +217,7 @@
def resource_setup(cls):
super(FloatingIPPortDetailsTest, cls).resource_setup()
- @common_utils.unstable_test("bug 1815585")
+ @test.unstable_test("bug 1815585")
@decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
def test_floatingip_port_details(self):
"""Tests the following:
@@ -375,7 +379,6 @@
self.fip['id'])['floatingip']
self.assertEqual(policy_id, fip['qos_policy_id'])
- self._create_file_for_bw_tests(ssh_client)
common_utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
@@ -418,7 +421,8 @@
self.fip = self.create_floatingip(port=ports[0])
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
- self.keypair['private_key'])
+ self.keypair['private_key'],
+ servers=servers)
self.client.update_floatingip(self.fip['id'], port_id=ports[1]['id'])
def _wait_for_fip_associated():
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index 13ca797..d19286c 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -69,11 +69,14 @@
# in very long boot times.
self.check_remote_connectivity(
ssh_client, leia_port['fixed_ips'][0]['ip_address'],
- timeout=CONF.validation.ping_timeout * 10)
+ timeout=CONF.validation.ping_timeout * 10,
+ servers=[self.server, leia])
resolv_conf = ssh_client.exec_command('cat /etc/resolv.conf')
self.assertIn('openstackgate.local', resolv_conf)
self.assertNotIn('starwars', resolv_conf)
- self.check_remote_connectivity(ssh_client, 'leia')
- self.check_remote_connectivity(ssh_client, 'leia.openstackgate.local')
+ self.check_remote_connectivity(ssh_client, 'leia',
+ servers=[self.server, leia])
+ self.check_remote_connectivity(ssh_client, 'leia.openstackgate.local',
+ servers=[self.server, leia])
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
index f4b918c..410c64e 100644
--- a/neutron_tempest_plugin/scenario/test_migration.py
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -17,6 +17,7 @@
from neutron_lib.api.definitions import portbindings as pb
from neutron_lib import constants as const
+from neutron_lib.utils import test
from tempest.common import utils
from tempest.lib import decorators
import testtools
@@ -224,7 +225,7 @@
class NetworkMigrationFromDVRHA(NetworkMigrationTestBase):
- @common_utils.unstable_test("bug 1756301")
+ @test.unstable_test("bug 1756301")
@decorators.idempotent_id('1be9b2e2-379c-40a4-a269-6687b81df691')
@testtools.skipUnless(
CONF.neutron_plugin_options.l3_agent_mode == 'dvr_snat',
@@ -233,7 +234,7 @@
self._test_migration(before_dvr=True, before_ha=True,
after_dvr=False, after_ha=False)
- @common_utils.unstable_test("bug 1756301")
+ @test.unstable_test("bug 1756301")
@decorators.idempotent_id('55957267-4e84-4314-a2f7-7cd36a2df04b')
@testtools.skipUnless(
CONF.neutron_plugin_options.l3_agent_mode == 'dvr_snat',
@@ -242,7 +243,7 @@
self._test_migration(before_dvr=True, before_ha=True,
after_dvr=False, after_ha=True)
- @common_utils.unstable_test("bug 1756301")
+ @test.unstable_test("bug 1756301")
@decorators.idempotent_id('d6bedff1-72be-4a9a-8ea2-dc037cd838e0')
@testtools.skipUnless(
CONF.neutron_plugin_options.l3_agent_mode == 'dvr_snat',
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index df730c6..31319ec 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -129,7 +129,8 @@
for fip in (fip1, fip2):
self.check_connectivity(
fip['floating_ip_address'],
- self.username, self.keypair['private_key'])
+ self.username, self.keypair['private_key'],
+ servers=[server1, server2])
return server_ssh_client1, fip1, server_ssh_client2, fip2
@testtools.skipUnless(
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
index d511b3b..566ac95 100644
--- a/neutron_tempest_plugin/scenario/test_multicast.py
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -15,6 +15,7 @@
import netaddr
from neutron_lib import constants
+from neutron_lib.utils import test
from oslo_log import log
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -110,6 +111,14 @@
'result_file': result_file}
+def get_unregistered_script(group, result_file):
+ return """#!/bin/bash
+export LC_ALL=en_US.UTF-8
+tcpdump -i any -s0 -vv host %(group)s -vvneA -s0 -l &> %(result_file)s &
+ """ % {'group': group,
+ 'result_file': result_file}
+
+
class BaseMulticastTest(object):
credentials = ['primary']
@@ -124,6 +133,7 @@
multicast_message = "Big Bang"
receiver_output_file = "/tmp/receiver_mcast_out"
sender_output_file = "/tmp/sender_mcast_out"
+ unregistered_output_file = "/tmp/unregistered_mcast_out"
@classmethod
def skip_checks(cls):
@@ -197,16 +207,16 @@
server['ssh_client'] = ssh.Client(server['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
- self._check_python_installed_on_server(server['ssh_client'],
- server['id'])
+ self._check_cmd_installed_on_server(server['ssh_client'],
+ server['id'], PYTHON3_BIN)
return server
- def _check_python_installed_on_server(self, ssh_client, server_id):
+ def _check_cmd_installed_on_server(self, ssh_client, server_id, cmd):
try:
- ssh_client.execute_script('which %s' % PYTHON3_BIN)
+ ssh_client.execute_script('which %s' % cmd)
except exceptions.SSHScriptFailed:
raise self.skipException(
- "%s is not available on server %s" % (PYTHON3_BIN, server_id))
+ "%s is not available on server %s" % (cmd, server_id))
def _prepare_sender(self, server, mcast_address):
check_script = get_sender_script(
@@ -225,11 +235,24 @@
server['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
- self._check_python_installed_on_server(ssh_client, server['id'])
+ self._check_cmd_installed_on_server(ssh_client, server['id'],
+ PYTHON3_BIN)
server['ssh_client'].execute_script(
'echo "%s" > ~/multicast_traffic_receiver.py' % check_script)
- @utils.unstable_test("bug 1850288")
+ def _prepare_unregistered(self, server, mcast_address):
+ check_script = get_unregistered_script(
+ group=mcast_address, result_file=self.unregistered_output_file)
+ ssh_client = ssh.Client(
+ server['fip']['floating_ip_address'],
+ self.username,
+ pkey=self.keypair['private_key'])
+ self._check_cmd_installed_on_server(ssh_client, server['id'],
+ 'tcpdump')
+ server['ssh_client'].execute_script(
+ 'echo "%s" > ~/unregistered_traffic_receiver.sh' % check_script)
+
+ @test.unstable_test("bug 1850288")
@decorators.idempotent_id('113486fc-24c9-4be4-8361-03b1c9892867')
def test_multicast_between_vms_on_same_network(self):
"""Test multicast messaging between two servers on the same network
@@ -240,9 +263,26 @@
receivers = [self._create_server() for _ in range(1)]
# Sender can be also receiver of multicast traffic
receivers.append(sender)
- self._check_multicast_conectivity(sender=sender, receivers=receivers)
+ unregistered = self._create_server()
+ self._check_multicast_conectivity(sender=sender, receivers=receivers,
+ unregistered=unregistered)
- def _check_multicast_conectivity(self, sender, receivers):
+ def _is_multicast_traffic_expected(self, mcast_address):
+ """Checks if multicast traffic is expected to arrive.
+
+ Checks if multicast traffic is expected to arrive to the
+ unregistered VM.
+
+ If IGMP snooping is enabled, multicast traffic should not be
+ flooded unless the destination IP is in the range of 224.0.0.X
+ [0].
+
+ [0] https://tools.ietf.org/html/rfc4541 (See section 2.1.2)
+ """
+ return (mcast_address.startswith('224.0.0') or not
+ CONF.neutron_plugin_options.is_igmp_snooping_enabled)
+
+ def _check_multicast_conectivity(self, sender, receivers, unregistered):
"""Test multi-cast messaging between two servers
[Sender server] -> ... some network topology ... -> [Receiver server]
@@ -256,6 +296,12 @@
path=file_path))
return msg in result
+ self._prepare_unregistered(unregistered, mcast_address)
+
+ # Run the unregistered node script
+ unregistered['ssh_client'].execute_script(
+ "bash ~/unregistered_traffic_receiver.sh", become_root=True)
+
self._prepare_sender(sender, mcast_address)
receiver_ids = []
for receiver in receivers:
@@ -294,6 +340,18 @@
for receiver_id in receiver_ids:
self.assertIn(receiver_id, replies_result)
+ # Kill the tcpdump command running on the unregistered node so
+ # tcpdump flushes its output to the output file
+ unregistered['ssh_client'].execute_script(
+ "killall tcpdump && sleep 2", become_root=True)
+
+ unregistered_result = unregistered['ssh_client'].execute_script(
+ "cat {path} || echo '{path} not exists yet'".format(
+ path=self.unregistered_output_file))
+ num_of_pckt = (1 if self._is_multicast_traffic_expected(mcast_address)
+ else 0)
+ self.assertIn('%d packets captured' % num_of_pckt, unregistered_result)
+
class MulticastTestIPv4(BaseMulticastTest, base.BaseTempestTestCase):
diff --git a/neutron_tempest_plugin/scenario/test_port_forwardings.py b/neutron_tempest_plugin/scenario/test_port_forwardings.py
index 06f175b..ab04050 100644
--- a/neutron_tempest_plugin/scenario/test_port_forwardings.py
+++ b/neutron_tempest_plugin/scenario/test_port_forwardings.py
@@ -81,27 +81,32 @@
return servers
def _test_udp_port_forwarding(self, servers):
+
+ def _message_received(server, ssh_client, expected_msg):
+ self.nc_listen(ssh_client,
+ server['port_forwarding_udp']['internal_port'],
+ constants.PROTO_NAME_UDP,
+ expected_msg,
+ [server])
+ received_msg = self.nc_client(
+ self.fip['floating_ip_address'],
+ server['port_forwarding_udp']['external_port'],
+ constants.PROTO_NAME_UDP)
+ return expected_msg in received_msg
+
for server in servers:
- msg = "%s-UDP-test" % server['name']
+ expected_msg = "%s-UDP-test" % server['name']
ssh_client = ssh.Client(
self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'],
port=server['port_forwarding_tcp']['external_port'])
- self.nc_listen(server,
- ssh_client,
- server['port_forwarding_udp']['internal_port'],
- constants.PROTO_NAME_UDP,
- msg)
- for server in servers:
- expected_msg = "%s-UDP-test" % server['name']
- self.assertIn(
- expected_msg, self.nc_client(
- self.fip['floating_ip_address'],
- server['port_forwarding_udp']['external_port'],
- constants.PROTO_NAME_UDP))
+ utils.wait_until_true(
+ lambda: _message_received(server, ssh_client, expected_msg),
+ exception=RuntimeError(
+ "Timed out waiting for message from server {!r} ".format(
+ server['id'])))
- @utils.unstable_test("bug 1850800")
@decorators.idempotent_id('ab40fc48-ca8d-41a0-b2a3-f6679c847bfe')
def test_port_forwarding_to_2_servers(self):
udp_sg_rule = {'protocol': constants.PROTO_NAME_UDP,
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
index ba8cc88..938d2b0 100644
--- a/neutron_tempest_plugin/scenario/test_qos.py
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -19,8 +19,8 @@
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log as logging
from tempest.common import utils as tutils
+from tempest.common import waiters
from tempest.lib import decorators
-from tempest.lib import exceptions
from neutron_tempest_plugin.api import base as base_api
from neutron_tempest_plugin.common import ssh
@@ -70,49 +70,26 @@
credentials = ['primary', 'admin']
force_tenant_isolation = False
- FILE_SIZE = 1024 * 1024
TOLERANCE_FACTOR = 1.5
BUFFER_SIZE = 512
- COUNT = FILE_SIZE / BUFFER_SIZE
LIMIT_BYTES_SEC = (constants.LIMIT_KILO_BITS_PER_SECOND * 1024 *
TOLERANCE_FACTOR / 8.0)
- FILE_PATH = "/tmp/img"
-
NC_PORT = 1234
- FILE_DOWNLOAD_TIMEOUT = 120
-
- def _create_file_for_bw_tests(self, ssh_client):
- cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
- % {'bs': self.BUFFER_SIZE, 'count': self.COUNT,
- 'file_path': self.FILE_PATH})
- ssh_client.exec_command(cmd, timeout=5)
- cmd = "stat -c %%s %s" % self.FILE_PATH
- filesize = ssh_client.exec_command(cmd, timeout=5)
- if int(filesize.strip()) != self.FILE_SIZE:
- raise sc_exceptions.FileCreationFailedException(
- file=self.FILE_PATH)
-
- @staticmethod
- def _kill_nc_process(ssh_client):
- cmd = "killall -q nc"
- try:
- ssh_client.exec_command(cmd, timeout=5)
- except exceptions.SSHExecCommandFailed:
- pass
+ DOWNLOAD_DURATION = 5
+ # NOTE(mjozefcz): This makes around 10 retries.
+ CHECK_TIMEOUT = DOWNLOAD_DURATION * 10
def _check_bw(self, ssh_client, host, port, expected_bw=LIMIT_BYTES_SEC):
- self._kill_nc_process(ssh_client)
- cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
- 'port': port, 'file_path': self.FILE_PATH})
- ssh_client.exec_command(cmd, timeout=5)
+ utils.kill_nc_process(ssh_client)
+ self.ensure_nc_listen(ssh_client, port, "tcp")
# Open TCP socket to remote VM and download big file
start_time = time.time()
- socket_timeout = self.FILE_SIZE * self.TOLERANCE_FACTOR / expected_bw
- client_socket = _connect_socket(host, port, socket_timeout)
+ client_socket = _connect_socket(
+ host, port, constants.SOCKET_CONNECT_TIMEOUT)
total_bytes_read = 0
try:
- while total_bytes_read < self.FILE_SIZE:
+ while time.time() - start_time < self.DOWNLOAD_DURATION:
data = client_socket.recv(self.BUFFER_SIZE)
total_bytes_read += len(data)
@@ -122,15 +99,17 @@
LOG.debug("time_elapsed = %(time_elapsed).16f, "
"total_bytes_read = %(total_bytes_read)d, "
- "bytes_per_second = %(bytes_per_second)d",
+ "bytes_per_second = %(bytes_per_second)d, "
+ "expected_bw = %(expected_bw)d.",
{'time_elapsed': time_elapsed,
'total_bytes_read': total_bytes_read,
- 'bytes_per_second': bytes_per_second})
+ 'bytes_per_second': bytes_per_second,
+ 'expected_bw': expected_bw})
return bytes_per_second <= expected_bw
except socket.timeout:
LOG.warning('Socket timeout while reading the remote file, bytes '
'read: %s', total_bytes_read)
- self._kill_nc_process(ssh_client)
+ utils.kill_nc_process(ssh_client)
return False
finally:
client_socket.close()
@@ -160,6 +139,47 @@
shared=True)
return policy['policy']['id']
+ def _create_server_by_port(self, port=None):
+ """Launch an instance using a port interface;
+
+ In case that the given port is None, a new port is created,
+ activated and configured with inbound SSH and TCP connection.
+ """
+ # Create and activate the port that will be assign to the instance.
+ if port is None:
+ secgroup = self.create_security_group()
+ self.create_loginable_secgroup_rule(
+ secgroup_id=secgroup['id'])
+
+ secgroup_rules = [{'protocol': 'tcp',
+ 'direction': 'ingress',
+ 'port_range_min': self.NC_PORT,
+ 'port_range_max': self.NC_PORT,
+ 'remote_ip_prefix': '0.0.0.0/0'}]
+
+ self.create_secgroup_rules(secgroup_rules,
+ secgroup['id'])
+
+ port = self.create_port(self.network,
+ security_groups=[secgroup['id']])
+ self.fip = self.create_floatingip(port=port)
+
+ keypair = self.create_keypair()
+
+ server_kwargs = {
+ 'flavor_ref': CONF.compute.flavor_ref,
+ 'image_ref': CONF.compute.image_ref,
+ 'key_name': keypair['name'],
+ 'networks': [{'port': port['id']}],
+ }
+
+ server = self.create_server(**server_kwargs)
+ self.wait_for_server_active(server['server'])
+ self.check_connectivity(self.fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ keypair['private_key'])
+ return server, port
+
class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
@classmethod
@@ -216,16 +236,13 @@
self.os_admin.network_client.update_network(
self.network['id'], qos_policy_id=bw_limit_policy_id)
- # Create file on VM
- self._create_file_for_bw_tests(ssh_client)
-
# Basic test, Check that actual BW while downloading file
# is as expected (Original BW)
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
- timeout=self.FILE_DOWNLOAD_TIMEOUT,
+ timeout=self.CHECK_TIMEOUT,
sleep=1)
# As admin user update QoS rule
@@ -242,7 +259,7 @@
self.fip['floating_ip_address'],
port=self.NC_PORT,
expected_bw=QoSTest.LIMIT_BYTES_SEC * 2),
- timeout=self.FILE_DOWNLOAD_TIMEOUT,
+ timeout=self.CHECK_TIMEOUT,
sleep=1)
# Create a new QoS policy
@@ -265,7 +282,7 @@
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
- timeout=self.FILE_DOWNLOAD_TIMEOUT,
+ timeout=self.CHECK_TIMEOUT,
sleep=1)
# As admin user update QoS rule
@@ -280,6 +297,98 @@
utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
- port=self.NC_PORT, expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
- timeout=self.FILE_DOWNLOAD_TIMEOUT,
+ port=self.NC_PORT,
+ expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
+ timeout=self.CHECK_TIMEOUT,
sleep=1)
+
+ @decorators.idempotent_id('66e5673e-0522-11ea-8d71-362b9e155667')
+ def test_attach_previously_used_port_to_new_instance(self):
+ """The test spawns new instance using port with QoS policy.
+
+ Ports with attached QoS policy could be used multiple times.
+ The policy rules have to be enforced on the new machines.
+ """
+ self.network = self.create_network()
+ self.subnet = self.create_subnet(self.network)
+ self.router = self.create_router_by_client()
+ self.create_router_interface(self.router['id'], self.subnet['id'])
+
+ vm, vm_port = self._create_server_by_port()
+
+ port_policy = self.os_admin.network_client.create_qos_policy(
+ name='port-policy',
+ description='policy for attach',
+ shared=False)['policy']
+
+ rule = self.os_admin.network_client.create_bandwidth_limit_rule(
+ policy_id=port_policy['id'],
+ max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
+ max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
+ 'bandwidth_limit_rule']
+
+ self.os_admin.network_client.update_port(
+ vm_port['id'], qos_policy_id=port_policy['id'])
+
+ self.os_primary.servers_client.delete_server(vm['server']['id'])
+ waiters.wait_for_server_termination(
+ self.os_primary.servers_client,
+ vm['server']['id'])
+
+ # Launch a new server using the same port with attached policy
+ self._create_server_by_port(port=vm_port)
+
+ retrieved_port = self.os_admin.network_client.show_port(
+ vm_port['id'])
+ self.assertEqual(port_policy['id'],
+ retrieved_port['port']['qos_policy_id'],
+ """The expected policy ID is {0},
+ the actual value is {1}""".
+ format(port_policy['id'],
+ retrieved_port['port']['qos_policy_id']))
+
+ retrieved_policy = self.os_admin.network_client.show_qos_policy(
+ retrieved_port['port']['qos_policy_id'])
+
+ retrieved_rule_id = retrieved_policy['policy']['rules'][0]['id']
+ self.assertEqual(rule['id'],
+ retrieved_rule_id,
+ """The expected rule ID is {0},
+ the actual value is {1}""".
+ format(rule['id'], retrieved_rule_id))
+
+ @decorators.idempotent_id('4eee64da-5646-11ea-82b4-0242ac130003')
+ def test_create_instance_using_network_with_existing_policy(self):
+ network = self.create_network()
+
+ qos_policy = self.os_admin.network_client.create_qos_policy(
+ name='network-policy',
+ shared=False)['policy']
+
+ rule = self.os_admin.network_client.create_bandwidth_limit_rule(
+ policy_id=qos_policy['id'],
+ max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
+ max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)
+
+ network = self.os_admin.network_client.update_network(
+ network['id'],
+ qos_policy_id=qos_policy['id'])['network']
+ self.setup_network_and_server(network=network)
+ retrieved_net = self.client.show_network(network['id'])
+ self.assertEqual(qos_policy['id'],
+ retrieved_net['network']['qos_policy_id'],
+ """The expected policy ID is {0},
+ the actual value is {1}""".
+ format(qos_policy['id'],
+ retrieved_net['network']['qos_policy_id']))
+
+ retrieved_policy = self.os_admin.network_client.show_qos_policy(
+ retrieved_net['network']['qos_policy_id'])
+ retrieved_rule_id = retrieved_policy['policy']['rules'][0]['id']
+
+ self.assertEqual(rule['bandwidth_limit_rule']['id'],
+ retrieved_rule_id,
+ """The expected rule ID is {0},
+ the actual value is {1}""".
+ format(rule['bandwidth_limit_rule']['id'],
+ retrieved_rule_id))
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index 7b43a7e..dc14857 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -16,9 +16,11 @@
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants as const
@@ -30,6 +32,39 @@
credentials = ['primary', 'admin']
required_extensions = ['router', 'security-group']
+ def _verify_http_connection(self, ssh_client, ssh_server,
+ test_ip, test_port, should_pass=True):
+ """Verify if HTTP connection works using remote hosts.
+
+ :param ssh.Client ssh_client: The client host active SSH client.
+ :param ssh.Client ssh_server: The HTTP server host active SSH client.
+ :param string test_ip: IP address of HTTP server
+ :param string test_port: Port of HTTP server
+ :param bool should_pass: Wheter test should pass or not.
+
+ :return: if passed or not
+ :rtype: bool
+ """
+ utils.kill_nc_process(ssh_server)
+ url = 'http://%s:%d' % (test_ip, test_port)
+ utils.spawn_http_server(ssh_server, port=test_port, message='foo_ok')
+ try:
+ ret = utils.call_url_remote(ssh_client, url)
+ if should_pass:
+ self.assertIn('foo_ok', ret)
+ return
+ self.assertNotIn('foo_ok', ret)
+ except Exception as e:
+ if not should_pass:
+ return
+ raise e
+
+ @classmethod
+ def setup_credentials(cls):
+ super(NetworkSecGroupTest, cls).setup_credentials()
+ cls.project_id = cls.os_primary.credentials.tenant_id
+ cls.network_client = cls.os_admin.network_client
+
@classmethod
def resource_setup(cls):
super(NetworkSecGroupTest, cls).resource_setup()
@@ -40,6 +75,12 @@
cls.create_router_interface(router['id'], cls.subnet['id'])
cls.keypair = cls.create_keypair()
+ def setUp(self):
+ super(NetworkSecGroupTest, self).setUp()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.network_client.reset_quotas, self.project_id)
+ self.network_client.update_quotas(self.project_id, security_group=-1)
+
def create_vm_testing_sec_grp(self, num_servers=2, security_groups=None,
ports=None):
"""Create instance for security group testing
@@ -100,11 +141,12 @@
# make sure ICMP connectivity works
self.check_remote_connectivity(server_ssh_clients[0], fips[1][
- 'fixed_ip_address'], should_succeed=should_succeed)
+ 'fixed_ip_address'], should_succeed=should_succeed,
+ servers=servers)
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d764')
def test_default_sec_grp_scenarios(self):
- server_ssh_clients, fips, _ = self.create_vm_testing_sec_grp()
+ server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp()
# Check ssh connectivity when you add sec group rule, enabling ssh
self.create_loginable_secgroup_rule(
self.os_primary.network_client.list_security_groups()[
@@ -121,7 +163,8 @@
# Check ICMP connectivity between VMs without specific rule for that
# It should work though the rule is not configured
self.check_remote_connectivity(
- server_ssh_clients[0], fips[1]['fixed_ip_address'])
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ servers=servers)
# Check ICMP connectivity from VM to external network
subnets = self.os_admin.network_client.list_subnets(
@@ -132,7 +175,8 @@
ext_net_ip = subnet['gateway_ip']
break
self.assertTrue(ext_net_ip)
- self.check_remote_connectivity(server_ssh_clients[0], ext_net_ip)
+ self.check_remote_connectivity(server_ssh_clients[0], ext_net_ip,
+ servers=servers)
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d864')
def test_protocol_number_rule(self):
@@ -256,7 +300,8 @@
rule_list, secgroup_id=ssh_secgrp['security_group']['id'])
# verify ICMP connectivity between instances works
self.check_remote_connectivity(
- server_ssh_clients[0], fips[1]['fixed_ip_address'])
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ servers=servers)
# make sure ICMP connectivity doesn't work from framework
self.ping_ip_address(fips[0]['floating_ip_address'],
should_succeed=False)
@@ -293,3 +338,65 @@
self.check_connectivity(fip['floating_ip_address'],
CONF.validation.image_ssh_user,
self.keypair['private_key'])
+
+ @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad489')
+ def test_multiple_ports_portrange_remote(self):
+ ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+ num_servers=3)
+ secgroups = []
+ ports = []
+
+ # Create remote and test security groups
+ for i in range(0, 2):
+ secgroups.append(
+ self.create_security_group(name='secgrp-%d' % i))
+ # configure sec groups to support SSH connectivity
+ self.create_loginable_secgroup_rule(
+ secgroup_id=secgroups[-1]['id'])
+
+ # Configure security groups, first two servers as remotes
+ for i, server in enumerate(servers):
+ port = self.client.list_ports(
+ network_id=self.network['id'], device_id=server['server'][
+ 'id'])['ports'][0]
+ ports.append(port)
+ secgroup = secgroups[0 if i in range(0, 2) else 1]
+ self.client.update_port(port['id'], security_groups=[
+ secgroup['id']])
+
+ # verify SSH functionality
+ for fip in fips:
+ self.check_connectivity(fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ self.keypair['private_key'])
+
+ test_ip = ports[2]['fixed_ips'][0]['ip_address']
+
+ # verify that conections are not working
+ for port in range(80, 84):
+ self._verify_http_connection(
+ ssh_clients[0],
+ ssh_clients[2],
+ test_ip, port,
+ should_pass=False)
+
+ # add two remote-group rules with port-ranges
+ rule_list = [{'protocol': constants.PROTO_NUM_TCP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'port_range_min': '80',
+ 'port_range_max': '81',
+ 'remote_group_id': secgroups[0]['id']},
+ {'protocol': constants.PROTO_NUM_TCP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'port_range_min': '82',
+ 'port_range_max': '83',
+ 'remote_group_id': secgroups[0]['id']}]
+ self.create_secgroup_rules(
+ rule_list, secgroup_id=secgroups[1]['id'])
+
+ # verify that conections are working
+ for port in range(80, 84):
+ self._verify_http_connection(
+ ssh_clients[0],
+ ssh_clients[2],
+ test_ip, port)
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index 6d855f1..585af06 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -39,7 +39,7 @@
class TrunkTest(base.BaseTempestTestCase):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
force_tenant_isolation = False
@classmethod
@@ -279,10 +279,72 @@
should_succeed=False)
# allow intra-security-group traffic
- self.create_pingable_secgroup_rule(self.security_group['id'])
+ sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
+ self.addCleanup(
+ self.os_primary.network_client.delete_security_group_rule,
+ sg_rule['id'])
self.check_remote_connectivity(
vm1.ssh_client,
- vm2.subport['fixed_ips'][0]['ip_address'])
+ vm2.subport['fixed_ips'][0]['ip_address'],
+ servers=[vm1, vm2])
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @testtools.skipUnless(
+ (CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced),
+ "Advanced image is required to run this test.")
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('ecd7de30-1c90-4280-b97c-1bed776d5d07')
+ def test_trunk_vm_migration(self):
+ '''Test connectivity after migration of the server with trunk
+
+ A successfully migrated server shows a VERIFY_RESIZE status that
+ requires confirmation. Need to reconfigure VLAN interface on server
+ side after migration is finished as the configuration doesn't survive
+ the reboot.
+ '''
+ vlan_tag = 10
+ vlan_network = self.create_network()
+ vlan_subnet = self.create_subnet(vlan_network)
+ sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
+ self.addCleanup(
+ self.os_primary.network_client.delete_security_group_rule,
+ sg_rule['id'])
+
+ use_advanced_image = (
+ not CONF.neutron_plugin_options.default_image_is_advanced)
+ servers = {}
+ for role in ['migrate', 'connection_test']:
+ servers[role] = self._create_server_with_trunk_port(
+ subport_network=vlan_network,
+ segmentation_id=vlan_tag,
+ use_advanced_image=use_advanced_image)
+ for role in ['migrate', 'connection_test']:
+ self.wait_for_server_active(servers[role].server)
+ self._configure_vlan_subport(vm=servers[role],
+ vlan_tag=vlan_tag,
+ vlan_subnet=vlan_subnet)
+
+ self.check_remote_connectivity(
+ servers['connection_test'].ssh_client,
+ servers['migrate'].subport['fixed_ips'][0]['ip_address'])
+
+ client = self.os_admin.compute.ServersClient()
+ client.migrate_server(servers['migrate'].server['id'])
+ self.wait_for_server_status(servers['migrate'].server,
+ 'VERIFY_RESIZE')
+ client.confirm_resize_server(servers['migrate'].server['id'])
+ self._configure_vlan_subport(vm=servers['migrate'],
+ vlan_tag=vlan_tag,
+ vlan_subnet=vlan_subnet)
+
+ self.check_remote_connectivity(
+ servers['connection_test'].ssh_client,
+ servers['migrate'].subport['fixed_ips'][0]['ip_address'])
@testtools.skipUnless(
(CONF.neutron_plugin_options.advanced_image_ref or
@@ -308,29 +370,35 @@
use_advanced_image=use_advanced_image)
normal_network_server = self._create_server_with_network(self.network)
vlan_network_server = self._create_server_with_network(vlan_network)
+ vms = [normal_network_server, vlan_network_server]
self._configure_vlan_subport(vm=trunk_network_server,
vlan_tag=vlan_tag,
vlan_subnet=vlan_subnet)
- for vm in [normal_network_server, vlan_network_server]:
+ for vm in vms:
self.wait_for_server_active(vm.server)
# allow ICMP traffic
- self.create_pingable_secgroup_rule(self.security_group['id'])
+ sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
+ self.addCleanup(
+ self.os_primary.network_client.delete_security_group_rule,
+ sg_rule['id'])
# Ping from trunk_network_server to normal_network_server
# via parent port
self.check_remote_connectivity(
trunk_network_server.ssh_client,
normal_network_server.port['fixed_ips'][0]['ip_address'],
- should_succeed=True)
+ should_succeed=True,
+ servers=vms)
# Ping from trunk_network_server to vlan_network_server via VLAN
# interface should success
self.check_remote_connectivity(
trunk_network_server.ssh_client,
vlan_network_server.port['fixed_ips'][0]['ip_address'],
- should_succeed=True)
+ should_succeed=True,
+ servers=vms)
# Delete the trunk
self.delete_trunk(
@@ -344,7 +412,8 @@
self.check_remote_connectivity(
trunk_network_server.ssh_client,
normal_network_server.port['fixed_ips'][0]['ip_address'],
- should_succeed=True)
+ should_succeed=True,
+ servers=vms)
# Ping from trunk_network_server to vlan_network_server via VLAN
# interface should fail after trunk deleted
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 0b7dd6d..f056c7f 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -601,7 +601,7 @@
return service_client.ResponseBody(resp, body)
def create_qos_policy(self, name, description=None, shared=False,
- tenant_id=None, is_default=False):
+ project_id=None, is_default=False):
uri = '%s/qos/policies' % self.uri_prefix
post_data = {
'policy': {
@@ -612,8 +612,8 @@
}
if description is not None:
post_data['policy']['description'] = description
- if tenant_id is not None:
- post_data['policy']['tenant_id'] = tenant_id
+ if project_id is not None:
+ post_data['policy']['project_id'] = project_id
resp, body = self.post(uri, self.serialize(post_data))
body = self.deserialize_single(body)
self.expected_success(201, resp.status)
@@ -893,6 +893,15 @@
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
+ def list_security_group_rules(self, **kwargs):
+ uri = '%s/security-group-rules' % self.uri_prefix
+ if kwargs:
+ uri += '?' + urlparse.urlencode(kwargs, doseq=1)
+ resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
def create_security_group_rule(self, direction, security_group_id,
**kwargs):
post_body = {'security_group_rule': kwargs}
@@ -1074,21 +1083,6 @@
self.expected_success(204, resp.status)
service_client.ResponseBody(resp, body)
- def create_network_keystone_v3(self, name, project_id, tenant_id=None):
- uri = '%s/networks' % self.uri_prefix
- post_data = {
- 'network': {
- 'name': name,
- 'project_id': project_id
- }
- }
- if tenant_id is not None:
- post_data['network']['tenant_id'] = tenant_id
- resp, body = self.post(uri, self.serialize(post_data))
- body = self.deserialize_single(body)
- self.expected_success(201, resp.status)
- return service_client.ResponseBody(resp, body)
-
def list_extensions(self, **filters):
uri = self.get_uri("extensions")
if filters:
diff --git a/neutron_tempest_plugin/vpnaas/__init__.py b/neutron_tempest_plugin/vpnaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/__init__.py
diff --git a/neutron_tempest_plugin/vpnaas/api/__init__.py b/neutron_tempest_plugin/vpnaas/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/api/__init__.py
diff --git a/neutron_tempest_plugin/vpnaas/api/base_vpnaas.py b/neutron_tempest_plugin/vpnaas/api/base_vpnaas.py
new file mode 100644
index 0000000..0e54380
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/api/base_vpnaas.py
@@ -0,0 +1,160 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2016 Hewlett Packard Enterprise Development Company LP
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common.utils import data_utils
+
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.vpnaas.services import clients_vpnaas
+
+CONF = config.CONF
+
+
+class BaseNetworkTest(base.BaseNetworkTest):
+ @classmethod
+ def resource_setup(cls):
+ super(BaseNetworkTest, cls).resource_setup()
+ cls.vpnservices = []
+ cls.ikepolicies = []
+ cls.ipsecpolicies = []
+ cls.ipsec_site_connections = []
+ cls.endpoint_groups = []
+
+ @classmethod
+ def get_client_manager(cls, credential_type=None, roles=None,
+ force_new=None):
+ manager = super(BaseNetworkTest, cls).get_client_manager(
+ credential_type=credential_type,
+ roles=roles,
+ force_new=force_new)
+ # Neutron uses a different clients manager than the one in the Tempest
+ return clients_vpnaas.Manager(manager.credentials)
+
+ @classmethod
+ def resource_cleanup(cls):
+ if CONF.service_available.neutron:
+ # Clean up ipsec connections
+ for ipsec_site_connection in cls.ipsec_site_connections:
+ cls._try_delete_resource(
+ cls.client.delete_ipsec_site_connection,
+ ipsec_site_connection['id'])
+
+ # Clean up ipsec endpoint group
+ for endpoint_group in cls.endpoint_groups:
+ cls._try_delete_resource(cls.client.delete_endpoint_group,
+ endpoint_group['id'])
+
+ # Clean up ipsec policies
+ for ipsecpolicy in cls.ipsecpolicies:
+ cls._try_delete_resource(cls.client.delete_ipsecpolicy,
+ ipsecpolicy['id'])
+ # Clean up ike policies
+ for ikepolicy in cls.ikepolicies:
+ cls._try_delete_resource(cls.client.delete_ikepolicy,
+ ikepolicy['id'])
+ # Clean up vpn services
+ for vpnservice in cls.vpnservices:
+ cls._try_delete_resource(cls.client.delete_vpnservice,
+ vpnservice['id'])
+ super(BaseNetworkTest, cls).resource_cleanup()
+
+ @classmethod
+ def create_vpnservice(cls, subnet_id, router_id, name=None):
+ """Wrapper utility that returns a test vpn service."""
+ if name is None:
+ name = data_utils.rand_name("vpnservice-")
+ body = cls.client.create_vpnservice(
+ subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
+ name=name)
+ vpnservice = body['vpnservice']
+ cls.vpnservices.append(vpnservice)
+ return vpnservice
+
+ @classmethod
+ def create_vpnservice_no_subnet(cls, router_id):
+ """Wrapper utility that returns a test vpn service."""
+ body = cls.client.create_vpnservice(
+ router_id=router_id, admin_state_up=True,
+ name=data_utils.rand_name("vpnservice-"))
+ vpnservice = body['vpnservice']
+ cls.vpnservices.append(vpnservice)
+ return vpnservice
+
+ @classmethod
+ def create_ikepolicy(cls, name):
+ """Wrapper utility that returns a test ike policy."""
+ body = cls.client.create_ikepolicy(name=name)
+ ikepolicy = body['ikepolicy']
+ cls.ikepolicies.append(ikepolicy)
+ return ikepolicy
+
+ @classmethod
+ def create_ipsecpolicy(cls, name):
+ """Wrapper utility that returns a test ipsec policy."""
+ body = cls.client.create_ipsecpolicy(name=name)
+ ipsecpolicy = body['ipsecpolicy']
+ cls.ipsecpolicies.append(ipsecpolicy)
+ return ipsecpolicy
+
+ @classmethod
+ def create_ipsec_site_connection(cls, ikepolicy_id, ipsecpolicy_id,
+ vpnservice_id, psk="secret",
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidrs=None,
+ name=None):
+ """Wrapper utility that returns a test vpn connection."""
+ if peer_cidrs is None:
+ peer_cidrs = ['1.1.1.0/24', '2.2.2.0/24']
+ if name is None:
+ name = data_utils.rand_name("ipsec_site_connection-")
+ body = cls.client.create_ipsec_site_connection(
+ psk=psk,
+ initiator="bi-directional",
+ ipsecpolicy_id=ipsecpolicy_id,
+ admin_state_up=True,
+ mtu=1500,
+ ikepolicy_id=ikepolicy_id,
+ vpnservice_id=vpnservice_id,
+ peer_address=peer_address,
+ peer_id=peer_id,
+ peer_cidrs=peer_cidrs,
+ name=name)
+ ipsec_site_connection = body['ipsec_site_connection']
+ cls.ipsec_site_connections.append(ipsec_site_connection)
+ return ipsec_site_connection
+
+ @classmethod
+ def create_endpoint_group(cls, name, type, endpoints):
+ """Wrapper utility that returns a test ipsec policy."""
+ body = cls.client.create_endpoint_group(
+ endpoints=endpoints,
+ type=type,
+ description='endpoint type:' + type,
+ name=name)
+ endpoint_group = body['endpoint_group']
+ cls.endpoint_groups.append(endpoint_group)
+ return endpoint_group
+
+
+class BaseAdminNetworkTest(BaseNetworkTest):
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseAdminNetworkTest, cls).setup_clients()
+ cls.admin_client = cls.os_admin.network_client
+ cls.identity_admin_client = cls.os_admin.tenants_client
diff --git a/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
new file mode 100644
index 0000000..ab48a2f
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
@@ -0,0 +1,910 @@
+# Copyright 2012,2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_lib.db import constants as db_const
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.vpnaas.api import base_vpnaas as base
+
+CONF = config.CONF
+
+_LONG_NAME = 'x' * (db_const.NAME_FIELD_SIZE + 1)
+_LONG_DESCRIPTION = 'y' * (db_const.DESCRIPTION_FIELD_SIZE + 1)
+
+
+class VPNaaSTestJSON(base.BaseAdminNetworkTest):
+
+ """VPNaaS API tests.
+
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+ List, Show, Create, Delete, and Update VPN Service
+ List, Show, Create, Delete, and Update IKE policy
+ List, Show, Create, Delete, and Update IPSec policy
+ """
+
+ @classmethod
+ def resource_setup(cls):
+ if not test.is_extension_enabled('vpnaas', 'network'):
+ msg = "vpnaas extension not enabled."
+ raise cls.skipException(msg)
+ super(VPNaaSTestJSON, cls).resource_setup()
+ cls.ext_net_id = CONF.network.public_network_id
+ network_name = data_utils.rand_name('network-')
+ cls.network = cls.create_network(network_name)
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.router = cls.create_router(
+ data_utils.rand_name("router"),
+ external_network_id=CONF.network.public_network_id)
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
+ cls.router['id'])
+ vpnservice2 = cls.create_vpnservice_no_subnet(cls.router['id'])
+ cls.vpnservice_no_subnet = vpnservice2
+
+ cls.ikepolicy = cls.create_ikepolicy(
+ data_utils.rand_name("ike-policy-"))
+ cls.ipsecpolicy = cls.create_ipsecpolicy(
+ data_utils.rand_name("ipsec-policy-"))
+
+ cls.endpoint_group_local = cls.create_endpoint_group(
+ data_utils.rand_name("endpoint-group-local-"),
+ 'subnet',
+ cls.subnet['id'])
+
+ cls.endpoint_group_remote = cls.create_endpoint_group(
+ data_utils.rand_name("endpoint-group-remote-"),
+ 'cidr',
+ ["10.101.0.0/24", "10.102.0.0/24"])
+
+ cls.ipsec_site_connection = cls.create_ipsec_site_connection(
+ cls.ikepolicy['id'],
+ cls.ipsecpolicy['id'],
+ cls.vpnservice['id'])
+
+ def _delete_ike_policy(self, ike_policy_id):
+ # Deletes a ike policy and verifies if it is deleted or not
+ ike_list = list()
+ all_ike = self.client.list_ikepolicies()
+ for ike in all_ike['ikepolicies']:
+ ike_list.append(ike['id'])
+ if ike_policy_id in ike_list:
+ self.client.delete_ikepolicy(ike_policy_id)
+ # Asserting that the policy is not found in list after deletion
+ ikepolicies = self.client.list_ikepolicies()
+ ike_id_list = list()
+ for i in ikepolicies['ikepolicies']:
+ ike_id_list.append(i['id'])
+ self.assertNotIn(ike_policy_id, ike_id_list)
+
+ def _delete_ipsec_policy(self, ipsec_policy_id):
+ # Deletes an ike policy if it exists
+ try:
+ self.client.delete_ipsecpolicy(ipsec_policy_id)
+
+ except lib_exc.NotFound:
+ pass
+
+ def _delete_ipsec_site_connection(self, conn_id):
+ # Deletes an ipsec site connection if it exists
+ try:
+ self.client.delete_ipsec_site_connection(conn_id)
+ except lib_exc.NotFound:
+ pass
+
+ def _assertExpected(self, expected, actual):
+ # Check if not expected keys/values exists in actual response body
+ for key, value in expected.items():
+ self.assertIn(key, actual)
+ self.assertEqual(value, actual[key])
+
+ def _delete_vpn_service(self, vpn_service_id):
+ self.client.delete_vpnservice(vpn_service_id)
+ # Asserting if vpn service is found in the list after deletion
+ body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertNotIn(vpn_service_id, vpn_services)
+
+ def _delete_endpoint_group(self, endpoint_group_id):
+ # Delete a endpoint-group and verifies if it is deleted or not
+ endpoint_group_list = list()
+ all_endpoint = self.client.list_endpoint_groups()
+ for endpoint in all_endpoint['endpoint_groups']:
+ endpoint_group_list.append(endpoint['id'])
+ if endpoint_group_id in endpoint_group_list:
+ self.client.delete_endpoint_group(endpoint_group_id)
+ # Asserting that the endpoint is not found in list after deletion
+ endpoint_group = self.client.list_endpoint_groups()
+ for e in endpoint_group['endpoint_groups']:
+ endpoint_group_list.append(e['id'])
+ self.assertNotIn(endpoint_group_list, endpoint_group_id)
+
+ def _get_tenant_id(self):
+ """Returns the tenant_id of the client current user"""
+ return self.client.tenant_id
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('74dcf2d3-a40e-4a6c-a25a-747d764bee81')
+ def test_admin_create_ipsec_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+ # Create IPSec policy for the newly created tenant
+ name = data_utils.rand_name('ipsec-policy')
+ body = (self.admin_client.
+ create_ipsecpolicy(name=name, tenant_id=tenant_id))
+ ipsecpolicy = body['ipsecpolicy']
+ self.assertIsNotNone(ipsecpolicy['id'])
+ self.addCleanup(self.admin_client.delete_ipsecpolicy,
+ ipsecpolicy['id'])
+
+ # Assert that created ipsec policy is found in API list call
+ body = self.client.list_ipsecpolicies()
+ ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
+ self.assertIn(ipsecpolicy['id'], ipsecpolicies)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('016b1861-fe55-4184-ba3c-e049ebbeb570')
+ def test_admin_create_vpn_service_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create vpn service for the newly created tenant
+ network2 = self.create_network()
+ subnet2 = self.create_subnet(network2)
+ router2 = self.create_router(data_utils.rand_name('router-'),
+ external_network_id=self.ext_net_id)
+ self.create_router_interface(router2['id'], subnet2['id'])
+ name = data_utils.rand_name('vpn-service')
+ body = self.admin_client.create_vpnservice(
+ subnet_id=subnet2['id'],
+ router_id=router2['id'],
+ name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ vpnservice = body['vpnservice']
+ self.assertIsNotNone(vpnservice['id'])
+ self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
+ # Assert that created vpnservice is found in API list call
+ body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertIn(vpnservice['id'], vpn_services)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('8f33c292-558d-4fdb-b32c-ab1677e8bdc8')
+ def test_admin_create_ike_policy_for_tenant(self):
+ tenant_id = self._get_tenant_id()
+
+ # Create IKE policy for the newly created tenant
+ name = data_utils.rand_name('ike-policy')
+ body = (self.admin_client.
+ create_ikepolicy(name=name, ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1",
+ tenant_id=tenant_id))
+ ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
+ self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
+
+ # Assert that created ike policy is found in API list call
+ body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertIn(ikepolicy['id'], ikepolicies)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('2997641b-3f2f-4fdf-9af6-bfb9997ecd3b')
+ def test_list_vpn_services(self):
+ # Verify the VPN service exists in the list of all VPN services
+ body = self.client.list_vpnservices()
+ vpnservices = body['vpnservices']
+ self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('8780a28a-deb2-40b0-8433-66f37005c058')
+ def test_create_update_delete_vpn_service(self):
+ # Creates a VPN service and sets up deletion
+ network1 = self.create_network()
+ subnet1 = self.create_subnet(network1)
+ router1 = self.create_router(data_utils.rand_name('router-'),
+ external_network_id=self.ext_net_id)
+ self.create_router_interface(router1['id'], subnet1['id'])
+ name = data_utils.rand_name('vpn-service1')
+ body = self.client.create_vpnservice(subnet_id=subnet1['id'],
+ router_id=router1['id'],
+ name=name,
+ admin_state_up=True)
+ vpnservice = body['vpnservice']
+ self.addCleanup(self._delete_vpn_service, vpnservice['id'])
+ # Assert if created vpnservices are not found in vpnservices list
+ body = self.client.list_vpnservices()
+ vpn_services = [vs['id'] for vs in body['vpnservices']]
+ self.assertIsNotNone(vpnservice['id'])
+ self.assertIn(vpnservice['id'], vpn_services)
+
+ # TODO(raies): implement logic to update vpnservice
+ # VPNaaS client function to update is implemented.
+ # But precondition is that current state of vpnservice
+ # should be "ACTIVE" not "PENDING*"
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('631d33ec-8d34-49e1-9e53-576959ea2c57')
+ def test_show_vpn_service(self):
+ # Verifies the details of a vpn service
+ body = self.client.show_vpnservice(self.vpnservice['id'])
+ vpnservice = body['vpnservice']
+ self.assertEqual(self.vpnservice['id'], vpnservice['id'])
+ self.assertEqual(self.vpnservice['name'], vpnservice['name'])
+ self.assertEqual(self.vpnservice['description'],
+ vpnservice['description'])
+ self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
+ self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
+ self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
+ valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
+ "PENDING_UPDATE", "PENDING_DELETE"]
+ self.assertIn(vpnservice['status'], valid_status)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('b6a665cf-d3df-417f-9477-bd0ef2c56c36')
+ def test_list_ike_policies(self):
+ # Verify the ike policy exists in the list of all IKE policies
+ body = self.client.list_ikepolicies()
+ ikepolicies = body['ikepolicies']
+ self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('d9ecb858-7136-4ef5-abba-325c84c57d78')
+ def test_create_update_delete_ike_policy(self):
+ # Creates a IKE policy
+ name = data_utils.rand_name('ike-policy')
+ body = (self.client.create_ikepolicy(
+ name=name,
+ ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1"))
+ ikepolicy = body['ikepolicy']
+ self.assertIsNotNone(ikepolicy['id'])
+ self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
+
+ # Update IKE Policy
+ new_ike = {'name': data_utils.rand_name("New-IKE"),
+ 'description': "Updated ike policy",
+ 'encryption_algorithm': "aes-256",
+ 'ike_version': "v2",
+ 'pfs': "group14",
+ 'lifetime': {'units': "seconds", 'value': 2000}}
+ self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
+ # Confirm that update was successful by verifying using 'show'
+ body = self.client.show_ikepolicy(ikepolicy['id'])
+ ike_policy = body['ikepolicy']
+ for key, value in new_ike.items():
+ self.assertIn(key, ike_policy)
+ self.assertEqual(value, ike_policy[key])
+
+ # Verification of ike policy delete
+ self.client.delete_ikepolicy(ikepolicy['id'])
+ body = self.client.list_ikepolicies()
+ ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
+ self.assertNotIn(ike_policy['id'], ikepolicies)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('5efd625b-41f2-4b80-90e0-81786f3e3309')
+ def test_show_ike_policy(self):
+ # Verifies the details of a ike policy
+ body = self.client.show_ikepolicy(self.ikepolicy['id'])
+ ikepolicy = body['ikepolicy']
+ self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
+ self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
+ self.assertEqual(self.ikepolicy['description'],
+ ikepolicy['description'])
+ self.assertEqual(self.ikepolicy['encryption_algorithm'],
+ ikepolicy['encryption_algorithm'])
+ self.assertEqual(self.ikepolicy['auth_algorithm'],
+ ikepolicy['auth_algorithm'])
+ self.assertEqual(self.ikepolicy['tenant_id'],
+ ikepolicy['tenant_id'])
+ self.assertEqual(self.ikepolicy['pfs'],
+ ikepolicy['pfs'])
+ self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
+ ikepolicy['phase1_negotiation_mode'])
+ self.assertEqual(self.ikepolicy['ike_version'],
+ ikepolicy['ike_version'])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('c3f7a459-3ccb-42b3-8fab-5ad7a5a791b1')
+ def test_list_ipsec_policies(self):
+ # Verify the ipsec policy exists in the list of all ipsec policies
+ body = self.client.list_ipsecpolicies()
+ ipsecpolicies = body['ipsecpolicies']
+ self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('e3952941-9b9e-48fc-9f36-fc41707a16b4')
+ def test_create_update_delete_ipsec_policy(self):
+ # Creates an ipsec policy
+ ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
+ 'pfs': 'group5',
+ 'encryption_algorithm': "aes-128",
+ 'auth_algorithm': 'sha1'}
+ resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
+ ipsecpolicy = resp_body['ipsecpolicy']
+ self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
+ self._assertExpected(ipsec_policy_body, ipsecpolicy)
+ # Verification of ipsec policy update
+ new_ipsec = {'description': 'Updated ipsec policy',
+ 'pfs': 'group2',
+ 'name': data_utils.rand_name("New-IPSec"),
+ 'encryption_algorithm': "aes-256",
+ 'lifetime': {'units': "seconds", 'value': '2000'}}
+ body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
+ **new_ipsec)
+ updated_ipsec_policy = body['ipsecpolicy']
+ self._assertExpected(new_ipsec, updated_ipsec_policy)
+ # Verification of ipsec policy delete
+ self.client.delete_ipsecpolicy(ipsecpolicy['id'])
+ self.assertRaises(lib_exc.NotFound,
+ self.client.delete_ipsecpolicy, ipsecpolicy['id'])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('86d68b8d-5935-46db-b096-235f70825678')
+ def test_show_ipsec_policy(self):
+ # Verifies the details of an ipsec policy
+ body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
+ ipsecpolicy = body['ipsecpolicy']
+ self._assertExpected(self.ipsecpolicy, ipsecpolicy)
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('95a4a55a-3a10-4f53-9e8a-45be359a245e')
+ def test_create_vpnservice_long_name(self):
+ """Test excessively long name.
+
+ Without REST checks, this call would return 500 INTERNAL SERVER
+ error on internal db failure instead.
+ """
+ name = _LONG_NAME
+ self.assertRaises(
+ lib_exc.BadRequest, self.client.create_vpnservice,
+ subnet_id=self.subnet['id'], router_id=self.router['id'],
+ name=name, admin_state_up=True)
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('6e88b4fb-de5e-464e-97db-a04c68070589')
+ def test_create_vpnservice_long_description(self):
+ name = data_utils.rand_name('vpn-service1')
+ description = _LONG_DESCRIPTION
+ self.assertRaises(
+ lib_exc.BadRequest, self.client.create_vpnservice,
+ subnet_id=self.subnet['id'], router_id=self.router['id'],
+ name=name, description=description, admin_state_up=True)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('fdb5a215-0671-449b-91e4-dd58fb223947')
+ def test_list_vpn_connections(self):
+ # Verify the VPN service exists in the list of all VPN services
+ body = self.client.list_ipsec_site_connections()
+ ipsec_site_connections = body['ipsec_site_connections']
+ self.assertIn(self.ipsec_site_connection['id'],
+ [v['id'] for v in ipsec_site_connections])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('92cfbb15-4299-4fe9-a4e1-f11167e3b057')
+ def test_create_delete_vpn_connection_with_legacy_mode(self):
+ # Verify create VPN connection
+ name = data_utils.rand_name("ipsec_site_connection-")
+ body = self.client.create_ipsec_site_connection(
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidrs=['10.1.1.0/24', '10.2.2.0/24'],
+ name=name,
+ mtu=1500,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+ ipsec_site_connection = body['ipsec_site_connection']
+ self.assertEqual(ipsec_site_connection['name'], name)
+ self.assertEqual(ipsec_site_connection['mtu'], 1500)
+ self.addCleanup(self._delete_ipsec_site_connection,
+ ipsec_site_connection['id'])
+
+ # Verification of IPsec connection delete
+ self.client.delete_ipsec_site_connection(ipsec_site_connection['id'])
+ body = self.client.list_ipsec_site_connections()
+ ipsec_site_connections = body['ipsec_site_connections']
+ self.assertNotIn(ipsec_site_connection['id'],
+ [v['id'] for v in ipsec_site_connections])
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('e9977b38-9cd8-4aa6-8bd5-ba15f41c368c')
+ def test_create_vpn_connection_missing_peer_cidr(self):
+ # Verify create VPN connection with JSON missing peer cidr
+ # in legacy mode
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ name=name,
+ mtu=1500,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('06a268fa-c53b-4cd1-9350-b83892e4a394')
+ def test_create_vpn_service_subnet_not_on_router(self):
+ # Verify create VPN service with a subnet not on router
+ tenant_id = self._get_tenant_id()
+
+ # Create vpn service for the newly created tenant
+ network2 = self.create_network()
+ subnet2 = self.create_subnet(network2)
+ router2 = self.create_router(data_utils.rand_name('router-'),
+ external_network_id=self.ext_net_id)
+ self.addCleanup(self.admin_client.delete_router, router2['id'])
+ self.addCleanup(self.admin_client.delete_network, network2['id'])
+ name = data_utils.rand_name('vpn-service')
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.admin_client.create_vpnservice,
+ subnet_id=subnet2['id'],
+ router_id=router2['id'],
+ name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('7678798a-fc20-46cd-ad78-b6b3d599de18')
+ def test_create_vpn_connection_small_MTU(self):
+ # Verify create VPN connection with small MTU
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidrs=['10.1.1.0/24', '10.2.2.0/24'],
+ name=name,
+ mtu=63,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('d9e1af94-1fbf-4183-b857-82328d4f4b97')
+ def test_create_vpn_connection_small_dpd(self):
+ # Verify create VPN connection with small dpd
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidrs=['10.1.1.0/24', '10.2.2.0/24'],
+ name=name,
+ dpd=59,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('ea6ae270-9ea4-4446-9022-b7ef7d986762')
+ def test_create_vpn_connection_wrong_peer_cidr(self):
+ # Verify create VPN connection with wrong peer cidr
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidrs=['1.0.0.0/33'],
+ name=name,
+ mtu=1500,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('68e17a9b-5b33-4d3e-90f2-01a6728d2c26')
+ def test_create_connection_with_cidr_and_endpoint_group(self):
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='subnet',
+ endpoints=subnet_id)
+ endpoint_group_local = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_local['id'])
+ name = data_utils.rand_name('endpoint_group')
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='cidr',
+ endpoints=["10.103.0.0/24", "10.104.0.0/24"])
+ endpoint_group_remote = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_remote['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice_no_subnet['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_cidr="10.1.0.0/24",
+ peer_ep_group_id=endpoint_group_local['id'],
+ local_ep_group_id=endpoint_group_remote['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('d101a6a7-67d3-4418-9d9e-65c4507a9148')
+ def test_create_vpn_connection_with_missing_remote_endpoint_group(self):
+ # Verify create VPN connection without subnet in vpnservice
+ # and has only local endpoint group
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ tenant_id = self._get_tenant_id()
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='subnet',
+ endpoints=subnet_id)
+ endpoint_group = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice_no_subnet['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ local_ep_group_id=endpoint_group['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('b5dd8d1e-587b-4e26-84f0-dcb814c65e57')
+ def test_create_vpn_connection_with_missing_local_endpoint_group(self):
+ # Verify create VPN connection without subnet in vpnservice
+ # and only have only local endpoint group
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ tenant_id = self._get_tenant_id()
+ name = data_utils.rand_name('endpoint_group')
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='cidr',
+ endpoints=["10.101.0.0/24", "10.102.0.0/24"])
+ endpoint_group = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice_no_subnet['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_ep_group_id=endpoint_group['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('427bbc1b-4040-42e4-b661-6395a0bd8762')
+ def test_create_connection_with_mix_ip_endpoint_group(self):
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='subnet',
+ endpoints=subnet_id)
+ endpoint_group_local = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_local['id'])
+ name_v6 = data_utils.rand_name('endpoint_group')
+ body_v6 = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name_v6,
+ type='cidr',
+ endpoints=["fec0:101::/64", "fec0:102::/64"])
+ endpoint_group_remote = body_v6['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_remote['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertEqual(endpoint_group_local['type'], 'subnet')
+ self.assertEqual(endpoint_group_remote['type'], 'cidr')
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice_no_subnet['id'],
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ peer_ep_group_id=endpoint_group_local['id'],
+ local_ep_group_id=endpoint_group_remote['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('eac56769-ec2d-4817-ba45-ca101d676bfc')
+ def test_create_connection_with_subnet_and_remote_endpoint_group(self):
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ name = data_utils.rand_name('endpoint_group')
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='cidr',
+ endpoints=["10.101.0.0/24", "10.102.0.0/24"])
+ endpoint_group = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ peer_ep_group_id=endpoint_group['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('92cd1221-3c7c-47ea-adad-8d8c2fc0f247')
+ def test_create_connection_with_subnet_and_local_endpoint_group(self):
+ tenant_id = self._get_tenant_id()
+ # Create endpoint group for the newly created tenant
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ body = self.client.create_endpoint_group(
+ tenant_id=tenant_id,
+ name=name,
+ type='subnet',
+ endpoints=subnet_id)
+ endpoint_group = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Create connections
+ name = data_utils.rand_name("ipsec_site_connection-")
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_ipsec_site_connection,
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice['id'],
+ peer_address="172.24.4.233",
+ local_ep_group_id=endpoint_group['id'],
+ name=name,
+ admin_state_up=True,
+ initiator="bi-directional",
+ psk="secret")
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('e0cb784d-ebd9-4ad1-84e1-0cba915784d0')
+ def test_create_update_delete_endpoint_group(self):
+ # Creates a endpoint-group
+ name = data_utils.rand_name('endpoint_group')
+ body = (self.client.create_endpoint_group(
+ name=name,
+ type='cidr',
+ endpoints=["10.2.0.0/24", "10.3.0.0/24"]))
+ endpoint_group = body['endpoint_group']
+ self.assertIsNotNone(endpoint_group['id'])
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Update endpoint-group
+ body = {'name': data_utils.rand_name("new_endpoint_group")}
+ self.client.update_endpoint_group(endpoint_group['id'],
+ name=name)
+ # Confirm that update was successful by verifying using 'show'
+ body = self.client.show_endpoint_group(endpoint_group['id'])
+ endpoint_group = body['endpoint_group']
+ self.assertEqual(name, endpoint_group['name'])
+ # Verification of endpoint-group delete
+ endpoint_group_id = endpoint_group['id']
+ self.client.delete_endpoint_group(endpoint_group['id'])
+ body = self.client.list_endpoint_groups()
+ endpoint_group = [enp['id'] for enp in body['endpoint_groups']]
+ self.assertNotIn(endpoint_group_id, endpoint_group)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('69ee29e9-f72c-4d84-b6ab-c3f5576440fc')
+ def test_admin_create_endpoint_group_for_tenant(self):
+ # Create endpoint group for the newly created tenant
+ tenant_id = self._get_tenant_id()
+ name = data_utils.rand_name('endpoint_group')
+ body = (self.client.
+ create_endpoint_group(
+ name=name,
+ type='cidr',
+ endpoints=["10.2.0.0/24", "10.3.0.0/24"],
+ tenant_id=tenant_id))
+ endpoint_group = body['endpoint_group']
+ self.assertIsNotNone(endpoint_group['id'])
+ self.addCleanup(self._delete_endpoint_group, endpoint_group['id'])
+ # Assert that created endpoint group is found in API list call
+ endpoint_group_id = endpoint_group['id']
+ self.client.delete_endpoint_group(endpoint_group['id'])
+ body = self.client.list_endpoint_groups()
+ endpoint_group = [enp['id'] for enp in body['endpoint_groups']]
+ self.assertNotIn(endpoint_group_id, endpoint_group)
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('7a584d9c-0f64-4d39-a66c-261bfa46e369')
+ def test_show_endpoint_group(self):
+ # Verifies the details of an endpoint group
+ body = self.client.show_endpoint_group(self.endpoint_group_local['id'])
+ endpoint_group = body['endpoint_group']
+ self.assertEqual(self.endpoint_group_local['id'], endpoint_group['id'])
+ self.assertEqual(self.endpoint_group_local['name'],
+ endpoint_group['name'])
+ self.assertEqual(self.endpoint_group_local['description'],
+ endpoint_group['description'])
+ self.assertEqual(self.endpoint_group_local['tenant_id'],
+ endpoint_group['tenant_id'])
+ self.assertEqual(self.endpoint_group_local['type'],
+ endpoint_group['type'])
+ self.assertEqual(self.endpoint_group_local['endpoints'],
+ endpoint_group['endpoints'])
+ # Verifies the details of an endpoint group
+ body = self.client.show_endpoint_group(
+ self.endpoint_group_remote['id'])
+ endpoint_group = body['endpoint_group']
+ # endpoint_group_remote = endpoint_group['id']
+ self.assertEqual(self.endpoint_group_remote['id'],
+ endpoint_group['id'])
+ self.assertEqual(self.endpoint_group_remote['name'],
+ endpoint_group['name'])
+ self.assertEqual(self.endpoint_group_remote['description'],
+ endpoint_group['description'])
+ self.assertEqual(self.endpoint_group_remote['tenant_id'],
+ endpoint_group['tenant_id'])
+ self.assertEqual(self.endpoint_group_remote['type'],
+ endpoint_group['type'])
+ self.assertEqual(self.endpoint_group_remote['endpoints'],
+ endpoint_group['endpoints'])
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('386f703e-445e-4208-abd2-df66066fd876')
+ def test_create_delete_vpn_connection_with_ep_group(self):
+ # Creates a endpoint-group with type cidr
+ name = data_utils.rand_name('endpoint_group')
+ body = self.client.create_endpoint_group(
+ name=name,
+ type='cidr',
+ endpoints=["10.2.0.0/24", "10.3.0.0/24"])
+ endpoint_group_remote = body['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_remote['id'])
+ # Creates a endpoint-group with type subnet
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ body2 = self.client.create_endpoint_group(
+ name=name,
+ type='subnet',
+ endpoints=subnet_id)
+ endpoint_group_local = body2['endpoint_group']
+ self.addCleanup(self._delete_endpoint_group,
+ endpoint_group_local['id'])
+ # Verify create VPN connection
+ name = data_utils.rand_name("ipsec_site_connection-")
+ body = self.client.create_ipsec_site_connection(
+ ipsecpolicy_id=self.ipsecpolicy['id'],
+ ikepolicy_id=self.ikepolicy['id'],
+ vpnservice_id=self.vpnservice_no_subnet['id'],
+ peer_ep_group_id=endpoint_group_remote['id'],
+ local_ep_group_id=endpoint_group_local['id'],
+ name=name,
+ mtu=1500,
+ admin_state_up=True,
+ initiator="bi-directional",
+ peer_address="172.24.4.233",
+ peer_id="172.24.4.233",
+ psk="secret")
+ ipsec_site_connection = body['ipsec_site_connection']
+ self.assertEqual(ipsec_site_connection['name'], name)
+ self.assertEqual(ipsec_site_connection['mtu'], 1500)
+ self.addCleanup(self._delete_ipsec_site_connection,
+ ipsec_site_connection['id'])
+
+ # Verification of IPsec connection delete
+ self.client.delete_ipsec_site_connection(ipsec_site_connection['id'])
+ body = self.client.list_ipsec_site_connections()
+ ipsec_site_connections = body['ipsec_site_connections']
+ self.assertNotIn(ipsec_site_connection['id'],
+ [v['id'] for v in ipsec_site_connections])
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('0ef1b2b0-9b4b-49f1-9473-cb4a0b625543')
+ def test_fail_create_endpoint_group_when_wrong_type(self):
+ # Creates a endpoint-group with wrong type
+ name = data_utils.rand_name('endpoint_group')
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_endpoint_group,
+ name=name,
+ type='subnet',
+ endpoints=["10.2.0.0/24", "10.3.0.0/24"])
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('670844d2-f7b0-466c-8fd7-7d2ad6e832ee')
+ def test_fail_create_endpoint_group_when_provide_subnet_id_with_cidr(self):
+ # Creates a endpoint-group when provide subnet id with type cidr
+ name = data_utils.rand_name('endpoint_group')
+ subnet_id = self.subnet['id']
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_endpoint_group,
+ name=name,
+ type='cidr',
+ endpoints=subnet_id)
+
+ @decorators.attr(type=['negative', 'smoke'])
+ @decorators.idempotent_id('d7516513-f2a2-42bd-8cea-baba73b93a22')
+ def test_fail_create_endpoint_group_with_mixed_IP_version(self):
+ # Creates a endpoint-group with mixed IP version
+ name = data_utils.rand_name('endpoint_group')
+ self.assertRaises(
+ lib_exc.BadRequest,
+ self.client.create_endpoint_group,
+ name=name,
+ type='cidr',
+ endpoints=["10.2.0.0/24", "2000::1"])
diff --git a/neutron_tempest_plugin/vpnaas/scenario/__init__.py b/neutron_tempest_plugin/vpnaas/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/scenario/__init__.py
diff --git a/neutron_tempest_plugin/vpnaas/scenario/base_vpnaas.py b/neutron_tempest_plugin/vpnaas/scenario/base_vpnaas.py
new file mode 100644
index 0000000..c09b40e
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/scenario/base_vpnaas.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2017 Midokura SARL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.vpnaas.api import base_vpnaas as base_api
+
+
+class BaseTempestTestCase(base_api.BaseNetworkTest, base.BaseTempestTestCase):
+ pass
diff --git a/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
new file mode 100644
index 0000000..1e5c60a
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
@@ -0,0 +1,297 @@
+# Copyright (c) 2017 Midokura SARL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from oslo_config import cfg
+import testtools
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest.lib.common import ssh
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import constants
+from neutron_tempest_plugin.vpnaas.scenario import base_vpnaas as base
+
+
+CONF = config.CONF
+
+# NOTE(huntxu): This is a workaround due to a upstream bug [1].
+# VPNaaS 4in6 and 6in4 is not working properly with LibreSwan 3.19+.
+# In OpenStack zuul checks the base CentOS 7 node is using Libreswan 3.20 on
+# CentOS 7.4. So we need to provide a way to skip the 4in6 and 6in4 test cases
+# for zuul.
+#
+# Once the upstream bug gets fixed and the base node uses a newer version of
+# Libreswan with that fix, we can remove this.
+#
+# [1] https://github.com/libreswan/libreswan/issues/175
+CONF.register_opt(
+ cfg.BoolOpt('skip_4in6_6in4_tests',
+ default=False,
+ help='Whether to skip 4in6 and 6in4 test cases.'),
+ 'neutron_vpnaas_plugin_options'
+)
+
+
+class Vpnaas(base.BaseTempestTestCase):
+ """Test the following topology
+
+ +-------------------+
+ | public |
+ | network |
+ | |
+ +-+---------------+-+
+ | |
+ | |
+ +-------+-+ +-+-------+
+ | LEFT | | RIGHT |
+ | router | <--VPN--> | router |
+ | | | |
+ +----+----+ +----+----+
+ | |
+ +----+----+ +----+----+
+ | LEFT | | RIGHT |
+ | network | | network |
+ | | | |
+ +---------+ +---------+
+ """
+
+ credentials = ['primary', 'admin']
+ inner_ipv6 = False
+ outer_ipv6 = False
+
+ @classmethod
+ @utils.requires_ext(extension="vpnaas", service="network")
+ def resource_setup(cls):
+ super(Vpnaas, cls).resource_setup()
+
+ # common
+ cls.keypair = cls.create_keypair()
+ cls.secgroup = cls.os_primary.network_client.create_security_group(
+ name=data_utils.rand_name('secgroup-'))['security_group']
+ cls.security_groups.append(cls.secgroup)
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.ikepolicy = cls.create_ikepolicy(
+ data_utils.rand_name("ike-policy-"))
+ cls.ipsecpolicy = cls.create_ipsecpolicy(
+ data_utils.rand_name("ipsec-policy-"))
+
+ cls.extra_subnet_attributes = {}
+ if cls.inner_ipv6:
+ cls.create_v6_pingable_secgroup_rule(
+ secgroup_id=cls.secgroup['id'])
+ cls.extra_subnet_attributes['ipv6_address_mode'] = 'slaac'
+ cls.extra_subnet_attributes['ipv6_ra_mode'] = 'slaac'
+
+ # LEFT
+ cls.router = cls.create_router(
+ data_utils.rand_name('left-router'),
+ admin_state_up=True,
+ external_network_id=CONF.network.public_network_id)
+ cls.network = cls.create_network(network_name='left-network')
+ ip_version = 6 if cls.inner_ipv6 else 4
+ v4_cidr = netaddr.IPNetwork('10.20.0.0/24')
+ v6_cidr = netaddr.IPNetwork('2001:db8:0:2::/64')
+ cidr = v6_cidr if cls.inner_ipv6 else v4_cidr
+ cls.subnet = cls.create_subnet(
+ cls.network, ip_version=ip_version, cidr=cidr, name='left-subnet',
+ **cls.extra_subnet_attributes)
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+ # Gives an internal IPv4 subnet for floating IP to the left server,
+ # we use it to ssh into the left server.
+ if cls.inner_ipv6:
+ v4_subnet = cls.create_subnet(
+ cls.network, ip_version=4, name='left-v4-subnet')
+ cls.create_router_interface(cls.router['id'], v4_subnet['id'])
+
+ # RIGHT
+ cls._right_network, cls._right_subnet, cls._right_router = \
+ cls._create_right_network()
+
+ @classmethod
+ def create_v6_pingable_secgroup_rule(cls, secgroup_id=None, client=None):
+ # NOTE(huntxu): This method should be moved into the base class, along
+ # with the v4 version.
+ """This rule is intended to permit inbound ping6"""
+
+ rule_list = [{'protocol': 'ipv6-icmp',
+ 'direction': 'ingress',
+ 'port_range_min': 128, # type
+ 'port_range_max': 0, # code
+ 'ethertype': 'IPv6',
+ 'remote_ip_prefix': '::/0'}]
+ client = client or cls.os_primary.network_client
+ cls.create_secgroup_rules(rule_list, client=client,
+ secgroup_id=secgroup_id)
+
+ @classmethod
+ def _create_right_network(cls):
+ router = cls.create_router(
+ data_utils.rand_name('right-router'),
+ admin_state_up=True,
+ external_network_id=CONF.network.public_network_id)
+ network = cls.create_network(network_name='right-network')
+ v4_cidr = netaddr.IPNetwork('10.10.0.0/24')
+ v6_cidr = netaddr.IPNetwork('2001:db8:0:1::/64')
+ cidr = v6_cidr if cls.inner_ipv6 else v4_cidr
+ ip_version = 6 if cls.inner_ipv6 else 4
+ subnet = cls.create_subnet(
+ network, ip_version=ip_version, cidr=cidr, name='right-subnet',
+ **cls.extra_subnet_attributes)
+ cls.create_router_interface(router['id'], subnet['id'])
+
+ return network, subnet, router
+
+ def _create_server(self, create_floating_ip=True, network=None):
+ if network is None:
+ network = self.network
+ port = self.create_port(network, security_groups=[self.secgroup['id']])
+ if create_floating_ip:
+ fip = self.create_and_associate_floatingip(port['id'])
+ else:
+ fip = None
+ server = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'port': port['id']}])['server']
+ waiters.wait_for_server_status(self.os_primary.servers_client,
+ server['id'],
+ constants.SERVER_STATUS_ACTIVE)
+ return {'port': port, 'fip': fip, 'server': server}
+
+ def _setup_vpn(self):
+ sites = [
+ dict(name="left", network=self.network, subnet=self.subnet,
+ router=self.router),
+ dict(name="right", network=self._right_network,
+ subnet=self._right_subnet, router=self._right_router),
+ ]
+ psk = data_utils.rand_name('mysecret')
+ for i in range(0, 2):
+ site = sites[i]
+ site['vpnservice'] = self.create_vpnservice(
+ site['subnet']['id'], site['router']['id'],
+ name=data_utils.rand_name('%s-vpnservice' % site['name']))
+ for i in range(0, 2):
+ site = sites[i]
+ vpnservice = site['vpnservice']
+ peer = sites[1 - i]
+ if self.outer_ipv6:
+ peer_address = peer['vpnservice']['external_v6_ip']
+ if not peer_address:
+ msg = "Public network must have an IPv6 subnet."
+ raise self.skipException(msg)
+ else:
+ peer_address = peer['vpnservice']['external_v4_ip']
+ self.create_ipsec_site_connection(
+ self.ikepolicy['id'],
+ self.ipsecpolicy['id'],
+ vpnservice['id'],
+ peer_address=peer_address,
+ peer_id=peer_address,
+ peer_cidrs=[peer['subnet']['cidr']],
+ psk=psk,
+ name=data_utils.rand_name(
+ '%s-ipsec-site-connection' % site['name']))
+
+ def _get_ip_on_subnet_for_port(self, port, subnet_id):
+ for fixed_ip in port['fixed_ips']:
+ if fixed_ip['subnet_id'] == subnet_id:
+ return fixed_ip['ip_address']
+ msg = "Cannot get IP address on specified subnet %s for port %r." % (
+ subnet_id, port)
+ raise self.fail(msg)
+
+ def _test_vpnaas(self):
+ # RIGHT
+ right_server = self._create_server(network=self._right_network,
+ create_floating_ip=False)
+ right_ip = self._get_ip_on_subnet_for_port(
+ right_server['port'], self._right_subnet['id'])
+
+ # LEFT
+ left_server = self._create_server()
+ ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ # check LEFT -> RIGHT connectivity via VPN
+ self.check_remote_connectivity(ssh_client, right_ip,
+ should_succeed=False)
+ self._setup_vpn()
+ self.check_remote_connectivity(ssh_client, right_ip)
+
+ # Test VPN traffic and floating IP traffic don't interfere each other.
+ if not self.inner_ipv6:
+ # Assign a floating-ip and check connectivity.
+ # This is NOT via VPN.
+ fip = self.create_and_associate_floatingip(
+ right_server['port']['id'])
+ self.check_remote_connectivity(ssh_client,
+ fip['floating_ip_address'])
+
+ # check LEFT -> RIGHT connectivity via VPN again, to ensure
+ # the above floating-ip doesn't interfere the traffic.
+ self.check_remote_connectivity(ssh_client, right_ip)
+
+
+class Vpnaas4in4(Vpnaas):
+
+ @decorators.idempotent_id('aa932ab2-63aa-49cf-a2a0-8ae71ac2bc24')
+ def test_vpnaas(self):
+ self._test_vpnaas()
+
+
+class Vpnaas4in6(Vpnaas):
+ outer_ipv6 = True
+
+ @decorators.idempotent_id('2d5f18dc-6186-4deb-842b-051325bd0466')
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6,
+ 'IPv6 tests are disabled.')
+ @testtools.skipIf(
+ CONF.neutron_vpnaas_plugin_options.skip_4in6_6in4_tests,
+ 'VPNaaS 4in6 test is skipped.')
+ def test_vpnaas_4in6(self):
+ self._test_vpnaas()
+
+
+class Vpnaas6in4(Vpnaas):
+ inner_ipv6 = True
+
+ @decorators.idempotent_id('10febf33-c5b7-48af-aa13-94b4fb585a55')
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6,
+ 'IPv6 tests are disabled.')
+ @testtools.skipIf(
+ CONF.neutron_vpnaas_plugin_options.skip_4in6_6in4_tests,
+ 'VPNaaS 6in4 test is skipped.')
+ def test_vpnaas_6in4(self):
+ self._test_vpnaas()
+
+
+class Vpnaas6in6(Vpnaas):
+ inner_ipv6 = True
+ outer_ipv6 = True
+
+ @decorators.idempotent_id('8b503ffc-aeb0-4938-8dba-73c7323e276d')
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6,
+ 'IPv6 tests are disabled.')
+ def test_vpnaas_6in6(self):
+ self._test_vpnaas()
diff --git a/neutron_tempest_plugin/vpnaas/services/__init__.py b/neutron_tempest_plugin/vpnaas/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/services/__init__.py
diff --git a/neutron_tempest_plugin/vpnaas/services/clients_vpnaas.py b/neutron_tempest_plugin/vpnaas/services/clients_vpnaas.py
new file mode 100644
index 0000000..06abd4f
--- /dev/null
+++ b/neutron_tempest_plugin/vpnaas/services/clients_vpnaas.py
@@ -0,0 +1,70 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2016 Hewlett Packard Enterprise Development Company
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_tempest_plugin.api import clients as manager
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.services.network.json import network_client
+
+
+CONF = config.CONF
+
+
+class NetworkClient(network_client.NetworkClientJSON):
+
+ def pluralize(self, resource_name):
+
+ resource_plural_map = {
+ 'ikepolicy': 'ikepolicies',
+ 'ipsecpolicy': 'ipsecpolicies'
+ }
+
+ if resource_name in resource_plural_map:
+ return resource_plural_map.get(resource_name)
+
+ return super(NetworkClient, self).pluralize(resource_name)
+
+ def get_uri(self, plural_name):
+ # get service prefix from resource name
+
+ service_resource_prefix_list = [
+ 'vpnservices',
+ 'ikepolicies',
+ 'ipsecpolicies',
+ 'ipsec_site_connections',
+ 'endpoint_groups',
+ ]
+
+ if plural_name in service_resource_prefix_list:
+ plural_name = plural_name.replace("_", "-")
+ service_prefix = 'vpn'
+ uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
+ plural_name)
+ return uri
+
+ return super(NetworkClient, self).get_uri(plural_name)
+
+
+class Manager(manager.Manager):
+ def __init__(self, credentials=None, service=None):
+ super(Manager, self).__init__(credentials=credentials)
+ self.network_client = NetworkClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
diff --git a/releasenotes/notes/drop-py-2-7-74b8379cab4cdc5a.yaml b/releasenotes/notes/drop-py-2-7-74b8379cab4cdc5a.yaml
new file mode 100644
index 0000000..7d49171
--- /dev/null
+++ b/releasenotes/notes/drop-py-2-7-74b8379cab4cdc5a.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Python 2.7 support has been dropped. Last release of neutron-tempest-plugin
+ to support py2.7 is OpenStack Train. The minimum version of Python now
+ supported by neutron-tempest-plugin is Python 3.6.
diff --git a/releasenotes/notes/igmp-snooping-8d6d85608df8880a.yaml b/releasenotes/notes/igmp-snooping-8d6d85608df8880a.yaml
new file mode 100644
index 0000000..032be1f
--- /dev/null
+++ b/releasenotes/notes/igmp-snooping-8d6d85608df8880a.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Enhanced the ``test_multicast_between_vms_on_same_network`` adding
+ IGMP test coverage to it. A new VM running tcpdump is spawned as
+ part of the test to verify whether the traffic is reaching it or not.
+upgrade:
+ - |
+ Add a new configuration option called ``is_igmp_snooping_enabled``
+ to enable/disable IGMP testing as part of the
+ ``test_multicast_between_vms_on_same_network`` test case.
diff --git a/requirements.txt b/requirements.txt
index 9a5e99f..d3fa3eb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,7 +5,6 @@
pbr!=2.1.0,>=2.0.0 # Apache-2.0
neutron-lib>=1.25.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
-ipaddress>=1.0.17;python_version<'3.3' # PSF
netaddr>=0.7.18 # BSD
os-ken>=0.3.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index ff12b10..1ac729c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,6 +6,7 @@
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://opendev.org/openstack/neutron-tempest-plugin
+requires-python = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -13,8 +14,6 @@
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
diff --git a/test-requirements.txt b/test-requirements.txt
index 8b251f6..905420c 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,8 +7,7 @@
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3
python-subunit>=1.0.0 # Apache-2.0/BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
-sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
diff --git a/tox.ini b/tox.ini
index 95352a2..19e006a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,9 +1,11 @@
[tox]
-minversion = 2.0
+minversion = 3.1
envlist = pep8
skipsdist = True
+ignore_basepython_conflict = True
[testenv]
+basepython = python3
usedevelop = True
setenv =
VIRTUAL_ENV={envdir}
@@ -19,7 +21,6 @@
commands = stestr run --slowest {posargs}
[testenv:pep8]
-basepython = python3
commands =
sh ./tools/misc-sanity-checks.sh
flake8
@@ -27,11 +28,9 @@
sh
[testenv:venv]
-basepython = python3
commands = {posargs}
[testenv:cover]
-basepython = python3
setenv =
{[testenv]setenv}
PYTHON=coverage run --source neutron_tempest_plugin --parallel-mode
@@ -42,16 +41,13 @@
coverage xml -o cover/coverage.xml
[testenv:docs]
-basepython = python3
commands = python setup.py build_sphinx
[testenv:releasenotes]
-basepython = python3
commands =
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:debug]
-basepython = python3
commands = oslo_debug_helper -t neutron_tempest_plugin/ {posargs}
[flake8]