Merge "Add tempest tests for DVR router state management"
diff --git a/.zuul.yaml b/.zuul.yaml
index f5343d2..17c9e95 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -26,6 +26,7 @@
- dns-domain-ports
- dns-integration
- empty-string-filtering
+ - expose-port-forwarding-in-fip
- ext-gw-mode
- external-net
- extra_dhcp_opt
@@ -33,6 +34,7 @@
- filter-validation
- fip-port-details
- flavors
+ - floating-ip-port-forwarding
- floatingip-pools
- ip-substring-filtering
- l3-flavors
@@ -73,6 +75,7 @@
- standard-attr-tag
- standard-attr-timestamp
- subnet_allocation
+ - subnetpool-prefix-ops
- trunk
- trunk-details
- uplink-status-propagation
@@ -95,6 +98,7 @@
neutron-trunk: true
neutron-uplink-status-propagation: true
neutron-network-segment-range: true
+ neutron-port-forwarding: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -250,6 +254,7 @@
- dns-domain-ports
- dns-integration
- empty-string-filtering
+ - expose-port-forwarding-in-fip
- ext-gw-mode
- external-net
- extra_dhcp_opt
@@ -258,6 +263,7 @@
- fip-port-details
- flavors
- floatingip-pools
+ - floating-ip-port-forwarding
- ip-substring-filtering
- l3-flavors
- l3-ha
@@ -326,12 +332,14 @@
- dns-domain-ports
- dns-integration
- empty-string-filtering
+ - expose-port-forwarding-in-fip
- ext-gw-mode
- external-net
- extra_dhcp_opt
- extraroute
- fip-port-details
- flavors
+ - floating-ip-port-forwarding
- ip-substring-filtering
- l3-flavors
- l3-ha
@@ -386,7 +394,7 @@
devstack_localrc:
PHYSICAL_NETWORK: default
DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
+ IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
DEFAULT_IMAGE_NAME: cirros-0.3.4-i386-disk
ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1
ADVANCED_INSTANCE_TYPE: ds512M
@@ -501,7 +509,7 @@
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
PHYSICAL_NETWORK: default
DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
+ IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
DEFAULT_IMAGE_NAME: cirros-0.3.4-i386-disk
ADVANCED_IMAGE_NAME: ubuntu-16.04-server-cloudimg-amd64-disk1
ADVANCED_INSTANCE_TYPE: ds512M
@@ -520,6 +528,7 @@
neutron-segments: true
neutron-trunk: true
neutron-log: true
+ neutron-port-forwarding: true
cinder: true
devstack_local_conf:
post-config:
@@ -582,6 +591,7 @@
neutron-qos: true
neutron-trunk: true
neutron-log: true
+ neutron-port-forwarding: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -715,6 +725,52 @@
files:
- ^neutron_tempest_plugin/sfc/.*$
+- job:
+ name: neutron-tempest-plugin-bgpvpn-bagpipe
+ parent: neutron-tempest-plugin
+ required-projects:
+ - openstack/networking-bagpipe
+ - openstack/networking-bgpvpn
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.bgpvpn
+ network_api_extensions: *api_extensions_master
+ network_api_extensions_bgpvpn:
+ - bgpvpn
+ - bgpvpn-routes-control
+ devstack_localrc:
+ NETWORKING_BGPVPN_DRIVER: "BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe_v2.BaGPipeBGPVPNDriver:default"
+ BAGPIPE_DATAPLANE_DRIVER_IPVPN: "ovs"
+ BAGPIPE_BGP_PEERS: "-"
+ USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_bgpvpn) | join(',') }}"
+ devstack_plugins:
+ networking-bgpvpn: https://git.openstack.org/openstack/networking-bgpvpn
+ networking-bagpipe: https://git.openstack.org/openstack/networking-bagpipe
+
+- job:
+ name: neutron-tempest-plugin-fwaas
+ parent: neutron-tempest-plugin
+ timeout: 10800
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron-fwaas
+ - openstack/neutron
+ - openstack/neutron-tempest-plugin
+ - openstack/tempest
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.fwaas
+ tox_envlist: all-plugin
+ devstack_plugins:
+ neutron-fwaas: https://opendev.org/openstack/neutron-fwaas.git
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
+ network_api_extensions_common: *api_extensions_master
+ network_api_extensions_fwaas:
+ - fwaas_v2
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_fwaas) | join(',') }}"
+ files:
+ - ^neutron_tempest_plugin/fwaas/.*$
+
- project-template:
name: neutron-tempest-plugin-jobs
check:
@@ -777,3 +833,9 @@
check:
jobs:
- neutron-tempest-plugin-sfc
+ - neutron-tempest-plugin-bgpvpn-bagpipe
+ - neutron-tempest-plugin-fwaas
+ gate:
+ jobs:
+ - neutron-tempest-plugin-bgpvpn-bagpipe
+ - neutron-tempest-plugin-fwaas
diff --git a/HACKING.rst b/HACKING.rst
index cd3c49c..3392185 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,4 +1,4 @@
openstack Style Commandments
-===============================================
+============================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c3cdb16..3a4cff9 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -23,7 +23,6 @@
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
- #'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
@@ -37,7 +36,6 @@
master_doc = 'index'
# General information about the project.
-project = u'openstack'
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
@@ -65,15 +63,15 @@
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
-htmlhelp_basename = '%sdoc' % project
+htmlhelp_basename = 'openstackdoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
- '%s.tex' % project,
- u'%s Documentation' % project,
+ 'openstack.tex',
+ u'openstack Documentation',
u'OpenStack Developers', 'manual'),
]
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 7b91d94..79ac4a6 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -117,6 +117,7 @@
cls.ports = []
cls.routers = []
cls.floating_ips = []
+ cls.port_forwardings = []
cls.metering_labels = []
cls.service_profiles = []
cls.flavors = []
@@ -144,6 +145,10 @@
for trunk in cls.trunks:
cls._try_delete_resource(cls.delete_trunk, trunk)
+ # Clean up port forwardings
+ for pf in cls.port_forwardings:
+ cls._try_delete_resource(cls.delete_port_forwarding, pf)
+
# Clean up floating IPs
for floating_ip in cls.floating_ips:
cls._try_delete_resource(cls.delete_floatingip, floating_ip)
@@ -652,6 +657,66 @@
client.delete_floatingip(floating_ip['id'])
@classmethod
+ def create_port_forwarding(cls, fip_id, internal_port_id,
+ internal_port, external_port,
+ internal_ip_address=None, protocol="tcp",
+ client=None):
+ """Creates a port forwarding.
+
+ Create a port forwarding and schedule it for later deletion.
+ If a client is passed, then it is used for deleting the PF too.
+
+ :param fip_id: The ID of the floating IP address.
+
+ :param internal_port_id: The ID of the Neutron port associated to
+ the floating IP port forwarding.
+
+ :param internal_port: The TCP/UDP/other protocol port number of the
+ Neutron port fixed IP address associated to the floating ip
+ port forwarding.
+
+ :param external_port: The TCP/UDP/other protocol port number of
+ the port forwarding floating IP address.
+
+ :param internal_ip_address: The fixed IPv4 address of the Neutron
+ port associated to the floating IP port forwarding.
+
+ :param protocol: The IP protocol used in the floating IP port
+ forwarding.
+
+ :param client: network client to be used for creating and cleaning up
+ the floating IP port forwarding.
+ """
+
+ client = client or cls.client
+
+ pf = client.create_port_forwarding(
+ fip_id, internal_port_id, internal_port, external_port,
+ internal_ip_address, protocol)['port_forwarding']
+
+ # save ID of floating IP associated with port forwarding for final
+ # cleanup
+ pf['floatingip_id'] = fip_id
+
+ # save client to be used later in cls.delete_port_forwarding
+ # for final cleanup
+ pf['client'] = client
+ cls.port_forwardings.append(pf)
+ return pf
+
+ @classmethod
+ def delete_port_forwarding(cls, pf, client=None):
+ """Delete port forwarding
+
+ :param client: Client to be used
+ If client is not given it will use the client used to create
+ the port forwarding, or cls.client if unknown.
+ """
+
+ client = client or pf.get('client') or cls.client
+ client.delete_port_forwarding(pf['floatingip_id'], pf['id'])
+
+ @classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
interface = cls.client.add_router_interface_with_subnet_id(
@@ -1111,8 +1176,10 @@
def get_bare_url(self, url):
base_url = self.client.base_url
- self.assertTrue(url.startswith(base_url))
- return url[len(base_url):]
+ base_url_normalized = utils.normalize_url(base_url)
+ url_normalized = utils.normalize_url(url)
+ self.assertTrue(url_normalized.startswith(base_url_normalized))
+ return url_normalized[len(base_url_normalized):]
@classmethod
def _extract_resources(cls, body):
diff --git a/neutron_tempest_plugin/api/base_security_groups.py b/neutron_tempest_plugin/api/base_security_groups.py
index ca2c17a..952de95 100644
--- a/neutron_tempest_plugin/api/base_security_groups.py
+++ b/neutron_tempest_plugin/api/base_security_groups.py
@@ -47,8 +47,6 @@
for k, v in constants.IP_PROTOCOL_MAP.items()
if k in V4_PROTOCOL_NAMES}
-V6_PROTOCOL_LEGACY = {constants.PROTO_NAME_IPV6_ICMP_LEGACY}
-
V6_PROTOCOL_NAMES = {
'ipv6-encap',
'ipv6-frag',
@@ -60,4 +58,4 @@
V6_PROTOCOL_INTS = {v
for k, v in constants.IP_PROTOCOL_MAP.items()
- if k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY)}
+ if k in V6_PROTOCOL_NAMES}
diff --git a/neutron_tempest_plugin/api/test_port_forwardings.py b/neutron_tempest_plugin/api/test_port_forwardings.py
new file mode 100644
index 0000000..82c3a34
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_port_forwardings.py
@@ -0,0 +1,196 @@
+# Copyright 2019 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin import config
+
+CONF = config.CONF
+
+
+class PortForwardingTestJSON(base.BaseNetworkTest):
+
+ required_extensions = ['router', 'floating-ip-port-forwarding']
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortForwardingTestJSON, cls).resource_setup()
+ cls.ext_net_id = CONF.network.public_network_id
+
+ # Create network, subnet, router and add interface
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.router = cls.create_router(data_utils.rand_name('router'),
+ external_network_id=cls.ext_net_id)
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+ @decorators.idempotent_id('829a446e-46bc-41ce-b442-6e428aeb3c19')
+ def test_port_forwarding_life_cycle(self):
+ fip = self.create_floatingip()
+ port = self.create_port(self.network)
+ # Create port forwarding for one TCP port
+ created_pf = self.create_port_forwarding(
+ fip['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=1111, external_port=2222, protocol="tcp")
+ self.assertEqual(1111, created_pf['internal_port'])
+ self.assertEqual(2222, created_pf['external_port'])
+ self.assertEqual('tcp', created_pf['protocol'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ created_pf['internal_ip_address'])
+
+ # Show created port forwarding
+ body = self.client.get_port_forwarding(
+ fip['id'], created_pf['id'])
+ pf = body['port_forwarding']
+ self.assertEqual(1111, pf['internal_port'])
+ self.assertEqual(2222, pf['external_port'])
+ self.assertEqual('tcp', pf['protocol'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ pf['internal_ip_address'])
+
+ # Update port forwarding
+ body = self.client.update_port_forwarding(
+ fip['id'], pf['id'], internal_port=3333)
+ pf = body['port_forwarding']
+ self.assertEqual(3333, pf['internal_port'])
+ self.assertEqual(2222, pf['external_port'])
+ self.assertEqual('tcp', pf['protocol'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ pf['internal_ip_address'])
+
+ # Now lets try to remove Floating IP with existing port forwarding,
+ # this should fails
+ self.assertRaises(exceptions.Conflict,
+ self.delete_floatingip, fip)
+
+ # Delete port forwarding
+ self.client.delete_port_forwarding(fip['id'], pf['id'])
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_port_forwarding,
+ fip['id'], pf['id'])
+
+ # Now Floating IP should be deleted properly
+ self.delete_floatingip(fip)
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_floatingip, fip['id'])
+
+ @decorators.idempotent_id('aa842070-39ef-4b09-9df9-e723934f96f8')
+ @utils.requires_ext(extension="expose-port-forwarding-in-fip",
+ service="network")
+ def test_port_forwarding_info_in_fip_details(self):
+ fip = self.create_floatingip()
+ port = self.create_port(self.network)
+
+ # Ensure that FIP don't have information about any port forwarding yet
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self.assertEqual(0, len(fip['port_forwardings']))
+
+ # Now create port forwarding and ensure that it is visible in FIP's
+ # details
+ pf = self.create_port_forwarding(
+ fip['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=1111, external_port=2222, protocol="tcp")
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self.assertEqual(1, len(fip['port_forwardings']))
+ self.assertEqual(1111, fip['port_forwardings'][0]['internal_port'])
+ self.assertEqual(2222, fip['port_forwardings'][0]['external_port'])
+ self.assertEqual('tcp', fip['port_forwardings'][0]['protocol'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ fip['port_forwardings'][0]['internal_ip_address'])
+
+ # Delete port forwarding and ensure that it's not in FIP's details
+ # anymore
+ self.client.delete_port_forwarding(fip['id'], pf['id'])
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self.assertEqual(0, len(fip['port_forwardings']))
+
+ @decorators.idempotent_id('8202cded-7e82-4420-9585-c091105404f6')
+ def test_associate_2_port_forwardings_to_floating_ip(self):
+ fip = self.create_floatingip()
+ forwardings_data = [(1111, 2222), (3333, 4444)]
+ created_pfs = []
+ for data in forwardings_data:
+ internal_port = data[0]
+ external_port = data[1]
+ port = self.create_port(self.network)
+ created_pf = self.create_port_forwarding(
+ fip['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=internal_port, external_port=external_port,
+ protocol="tcp")
+ self.assertEqual(internal_port, created_pf['internal_port'])
+ self.assertEqual(external_port, created_pf['external_port'])
+ self.assertEqual('tcp', created_pf['protocol'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ created_pf['internal_ip_address'])
+ created_pfs.append(created_pf)
+
+ # Check that all PFs are visible in Floating IP details
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self.assertEqual(len(forwardings_data), len(fip['port_forwardings']))
+ for pf in created_pfs:
+ expected_pf = {
+ 'external_port': pf['external_port'],
+ 'internal_port': pf['internal_port'],
+ 'protocol': pf['protocol'],
+ 'internal_ip_address': pf['internal_ip_address']}
+ self.assertIn(expected_pf, fip['port_forwardings'])
+
+ # Test list of port forwardings
+ port_forwardings = self.client.list_port_forwardings(
+ fip['id'])['port_forwardings']
+ self.assertEqual(len(forwardings_data), len(port_forwardings))
+ for pf in created_pfs:
+ expected_pf = pf.copy()
+ expected_pf.pop('client')
+ expected_pf.pop('floatingip_id')
+ self.assertIn(expected_pf, port_forwardings)
+
+ @decorators.idempotent_id('6a34e811-66d1-4f63-aa4d-9013f15deb62')
+ def test_associate_port_forwarding_to_used_floating_ip(self):
+ port_for_fip = self.create_port(self.network)
+ fip = self.create_floatingip(port=port_for_fip)
+ port = self.create_port(self.network)
+ self.assertRaises(
+ exceptions.Conflict,
+ self.create_port_forwarding,
+ fip['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=1111, external_port=2222,
+ protocol="tcp")
+
+ @decorators.idempotent_id('4ca72d40-93e4-485f-a876-76caf33c1fe6')
+ def test_associate_port_forwarding_to_port_with_fip(self):
+ port = self.create_port(self.network)
+ self.create_floatingip(port=port)
+ fip_for_pf = self.create_floatingip()
+ self.assertRaises(
+ exceptions.Conflict,
+ self.create_port_forwarding,
+ fip_for_pf['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=1111, external_port=2222,
+ protocol="tcp")
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index d44ba50..26a8c05 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import random
+
from neutron_lib import constants
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -23,7 +25,7 @@
from neutron_tempest_plugin.api import base_security_groups
-class SecGroupTest(base.BaseNetworkTest):
+class SecGroupTest(base.BaseAdminNetworkTest):
required_extensions = ['security-group']
@@ -55,6 +57,25 @@
self.assertEqual(observed_security_group['description'],
new_description)
+ @decorators.idempotent_id('1fff0d57-bb6c-4528-9c1d-2326dce1c087')
+ def test_show_security_group_contains_all_rules(self):
+ security_group = self.create_security_group()
+ protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
+ security_group_rule = self.create_security_group_rule(
+ security_group=security_group,
+ project={'id': self.admin_client.tenant_id},
+ client=self.admin_client,
+ protocol=protocol,
+ direction=constants.INGRESS_DIRECTION)
+
+ observed_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
+ observerd_security_group_rules_ids = [
+ sgr['id'] for sgr in
+ observed_security_group['security_group_rules']]
+ self.assertIn(
+ security_group_rule['id'], observerd_security_group_rules_ids)
+
@decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
def test_create_bulk_sec_groups(self):
# Creates 2 sec-groups in one request
@@ -109,12 +130,42 @@
_ip_version = constants.IP_VERSION_6
protocol_names = base_security_groups.V6_PROTOCOL_NAMES
protocol_ints = base_security_groups.V6_PROTOCOL_INTS
- protocol_legacy_names = base_security_groups.V6_PROTOCOL_LEGACY
@decorators.idempotent_id('c7d17b41-3b4e-4add-bb3b-6af59baaaffa')
- def test_security_group_rule_protocol_legacy_names(self):
- self._test_security_group_rule_protocols(
- protocols=self.protocol_legacy_names)
+ def test_security_group_rule_protocol_legacy_icmpv6(self):
+ # These legacy protocols can be used to create security groups,
+ # but they could be shown either with their passed protocol name,
+ # or a canonical-ized version, depending on the neutron version.
+ # So we check against a list of possible values.
+ # TODO(haleyb): Remove once these legacy names are deprecated
+ protocols = {constants.PROTO_NAME_IPV6_ICMP_LEGACY:
+ constants.PROTO_NAME_IPV6_ICMP,
+ constants.PROTO_NAME_ICMP:
+ constants.PROTO_NAME_IPV6_ICMP}
+ for key, value in protocols.items():
+ self._test_security_group_rule_legacy(
+ protocol_list=[str(key), str(value)],
+ protocol=str(key),
+ direction=constants.INGRESS_DIRECTION,
+ ethertype=self.ethertype)
+
+ def _test_security_group_rule_legacy(self, protocol_list, **kwargs):
+ security_group = self.create_security_group()
+ security_group_rule = self.create_security_group_rule(
+ security_group=security_group, **kwargs)
+ observed_security_group_rule = self.client.show_security_group_rule(
+ security_group_rule['id'])['security_group_rule']
+ for key, value in kwargs.items():
+ if key == 'protocol':
+ self.assertIn(security_group_rule[key], protocol_list,
+ "{!r} does not match.".format(key))
+ self.assertIn(observed_security_group_rule[key], protocol_list,
+ "{!r} does not match.".format(key))
+ else:
+ self.assertEqual(value, security_group_rule[key],
+ "{!r} does not match.".format(key))
+ self.assertEqual(value, observed_security_group_rule[key],
+ "{!r} does not match.".format(key))
class RbacSharedSecurityGroupTest(base.BaseAdminNetworkTest):
diff --git a/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py b/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py
new file mode 100644
index 0000000..49cce5b
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py
@@ -0,0 +1,97 @@
+# Copyright 2019 SUSE LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import test_subnetpools
+
+SUBNETPOOL_NAME = 'smoke-subnetpool'
+SUBNET_NAME = 'smoke-subnet'
+
+
+class SubnetPoolPrefixOpsTestMixin(object):
+
+ def _compare_prefix_lists(self, list_expected, list_observed):
+ expected_set = netaddr.IPSet(iterable=list_expected)
+ observed_set = netaddr.IPSet(iterable=list_observed)
+
+ # compact the IPSet's
+ expected_set.compact()
+ observed_set.compact()
+
+ self.assertEqual(expected_set, observed_set)
+
+ @decorators.idempotent_id('b1d56d1f-2818-44ee-b6a3-3c1327c25318')
+ @utils.requires_ext(extension='subnetpool-prefix-ops', service='network')
+ def test_add_remove_prefix(self):
+ created_subnetpool = self._create_subnetpool()
+ req_body = {'prefixes': self.prefixes_to_add}
+
+ # Add a prefix to the subnet pool
+ resp = self.client.add_subnetpool_prefix(created_subnetpool['id'],
+ **req_body)
+ self._compare_prefix_lists(self.prefixes + self.prefixes_to_add,
+ resp['prefixes'])
+
+ # Remove the prefix from the subnet pool
+ resp = self.client.remove_subnetpool_prefix(created_subnetpool['id'],
+ **req_body)
+ self._compare_prefix_lists(self.prefixes, resp['prefixes'])
+
+ @decorators.idempotent_id('a36c18fc-10b5-4ebc-ab79-914f826c5bf5')
+ @utils.requires_ext(extension='subnetpool-prefix-ops', service='network')
+ def test_add_overlapping_prefix(self):
+ created_subnetpool = self._create_subnetpool()
+ req_body = {'prefixes': self.overlapping_prefixes}
+
+ # Add an overlapping prefix to the subnet pool
+ resp = self.client.add_subnetpool_prefix(created_subnetpool['id'],
+ **req_body)
+ self._compare_prefix_lists(self.prefixes + self.overlapping_prefixes,
+ resp['prefixes'])
+
+
+class SubnetPoolPrefixOpsIpv4Test(test_subnetpools.SubnetPoolsTestBase,
+ SubnetPoolPrefixOpsTestMixin):
+
+ prefixes = ['192.168.1.0/24', '10.10.10.0/24']
+ prefixes_to_add = ['192.168.2.0/24']
+ overlapping_prefixes = ['10.10.0.0/16']
+ min_prefixlen = 16
+ ip_version = 4
+
+ @classmethod
+ def resource_setup(cls):
+ super(SubnetPoolPrefixOpsIpv4Test, cls).resource_setup()
+ cls._subnetpool_data = {'prefixes': cls.prefixes,
+ 'min_prefixlen': cls.min_prefixlen}
+
+
+class SubnetPoolPrefixOpsIpv6Test(test_subnetpools.SubnetPoolsTestBase,
+ SubnetPoolPrefixOpsTestMixin):
+
+ prefixes = ['2001:db8:1234::/48', '2001:db8:1235::/48']
+ prefixes_to_add = ['2001:db8:4321::/48']
+ overlapping_prefixes = ['2001:db8:1234:1111::/64']
+ min_prefixlen = 48
+ ip_version = 6
+
+ @classmethod
+ def resource_setup(cls):
+ super(SubnetPoolPrefixOpsIpv6Test, cls).resource_setup()
+ cls._subnetpool_data = {'prefixes': cls.prefixes,
+ 'min_prefixlen': cls.min_prefixlen}
diff --git a/neutron_tempest_plugin/bgpvpn/__init__.py b/neutron_tempest_plugin/bgpvpn/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/__init__.py
diff --git a/neutron_tempest_plugin/bgpvpn/api/__init__.py b/neutron_tempest_plugin/bgpvpn/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/api/__init__.py
diff --git a/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
new file mode 100644
index 0000000..f3a7b11
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
@@ -0,0 +1,380 @@
+# Copyright (c) 2015 Ericsson.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from testtools import ExpectedException
+
+from neutron_tempest_plugin.bgpvpn.base import BaseBgpvpnTest as base
+
+
+class BgpvpnTest(base):
+ """Tests the following operations in the Neutron API:
+
+ create bgpvpn
+ delete bgpvpn
+ show bgpvpn
+ list bgpvpns
+ associate network to bgpvpn
+ disassociate network from bgpvpn
+ show network association
+ list network associations
+ update route targets
+
+ v2.0 of the Neutron API is assumed. It is also assumed that the following
+ options are defined in the [network] section of etc/tempest.conf:
+
+ ...
+ """
+
+ @decorators.idempotent_id('4f90deb2-eb8e-4e7d-9d68-c5b5cc657f7e')
+ def test_create_bgpvpn(self):
+ self.create_bgpvpn(self.bgpvpn_admin_client)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('0a911d61-d908-4c21-a11e-e403ac0d8e38')
+ def test_create_bgpvpn_as_non_admin_fail(self):
+ self.assertRaises(exceptions.Forbidden,
+ self.create_bgpvpn, self.bgpvpn_client)
+
+ @decorators.idempotent_id('709b23b0-9719-47df-9f53-b0812a5d5a48')
+ def test_delete_bgpvpn(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ self.delete_bgpvpn(self.bgpvpn_admin_client, bgpvpn)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('596abfc2-fd89-491d-863d-25459db1df4b')
+ def test_delete_bgpvpn_as_non_admin_fail(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ self.assertRaises(exceptions.Forbidden,
+ self.bgpvpn_client.delete_bgpvpn, bgpvpn['id'])
+
+ @decorators.idempotent_id('9fa29db8-35d0-4beb-a986-23c369499ab1')
+ def test_show_bgpvpn(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ bgpvpn_details = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])['bgpvpn']
+ self.assertEqual(bgpvpn['id'], bgpvpn_details['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('b20110bb-393b-4342-8b30-6486cd2b4fc6')
+ def test_show_bgpvpn_as_non_owner_fail(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_alt_client.show_bgpvpn, bgpvpn['id'])
+
+ @decorators.idempotent_id('7a7feca2-1c24-4f5d-ad4b-b0e5a712adb1')
+ def test_list_bgpvpn(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ bgpvpns = self.bgpvpn_client.list_bgpvpns()['bgpvpns']
+ self.assertIn(bgpvpn['id'],
+ [bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('4875e65d-0b65-40c0-9efd-309420686ab4')
+ def test_list_bgpvpn_as_non_owner_fail(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ bgpvpns_alt = self.bgpvpn_alt_client.list_bgpvpns()['bgpvpns']
+ self.assertNotIn(bgpvpn['id'],
+ [bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns_alt])
+
+ @decorators.idempotent_id('096281da-356d-4c04-bd55-784a26bb1b0c')
+ def test_list_show_network_association(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ network = self.networks_client.create_network()['network']
+
+ association = self.bgpvpn_client.create_network_association(
+ bgpvpn['id'], network['id'])['network_association']
+ net_assocs = self.bgpvpn_client\
+ .list_network_associations(bgpvpn['id'])['network_associations']
+ self.assertIn(association['id'],
+ [net_assoc['id'] for net_assoc in net_assocs])
+ net_assoc_details = self.bgpvpn_client\
+ .show_network_association(bgpvpn['id'],
+ association['id'])['network_association']
+ self.assertEqual(association['id'], net_assoc_details['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('57b0da93-8e37-459f-9aaf-f903acc36025')
+ def test_show_netassoc_as_non_owner_fail(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ network = self.networks_client.create_network()['network']
+
+ net_assoc = self.bgpvpn_client.create_network_association(
+ bgpvpn['id'], network['id'])['network_association']
+
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_alt_client.show_network_association,
+ bgpvpn['id'], net_assoc['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('2cbb10af-bf9c-4b32-b6a6-4066de783758')
+ def test_list_netassoc_as_non_owner_fail(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ network = self.networks_client.create_network()['network']
+
+ self.bgpvpn_client.create_network_association(bgpvpn['id'],
+ network['id'])
+ net_assocs_alt = self.bgpvpn_alt_client\
+ .list_network_associations(bgpvpn['id'])
+ self.assertFalse(net_assocs_alt['network_associations'])
+
+ @decorators.idempotent_id('51e1b079-aefa-4c37-8b1a-0567b3ef7954')
+ def test_associate_disassociate_network(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ network = self.networks_client.create_network()
+ network_id = network['network']['id']
+
+ # Associate the network to the bgpvpn resource
+ association = self.bgpvpn_client.create_network_association(
+ bgpvpn['id'], network_id)
+ self.assertEqual(association['network_association']['network_id'],
+ network_id)
+ updated_bgpvpn = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])
+ self.assertEqual(updated_bgpvpn['bgpvpn']['networks'], [network_id])
+
+ # Disassociate the network from the bgpvpn resource
+ self.bgpvpn_client.delete_network_association(
+ bgpvpn['id'],
+ association['network_association']['id'])
+ updated_bgpvpn = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])
+ self.assertEqual(updated_bgpvpn['bgpvpn']['networks'], [])
+
+ self.networks_client.delete_network(network_id)
+
+ @decorators.idempotent_id('559013fd-1e34-4fde-9599-f8aafe9ae716')
+ def test_update_route_target(self):
+ bgpvpn = self.create_bgpvpn(
+ self.bgpvpn_admin_client,
+ route_targets=['64512:1'],
+ import_targets=['64512:2'],
+ export_targets=['64512:3'])
+ bgpvpn = self.bgpvpn_admin_client.update_bgpvpn(
+ bgpvpn['id'],
+ route_targets=['64512:4'],
+ import_targets=['64512:5'],
+ export_targets=['64512:6']
+ )['bgpvpn']
+ self.assertEqual(['64512:4'], bgpvpn['route_targets'])
+ self.assertEqual(['64512:5'], bgpvpn['import_targets'])
+ self.assertEqual(['64512:6'], bgpvpn['export_targets'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('e35eb9be-fe1f-406c-b36b-fc1879328313')
+ def test_update_route_target_non_admin_fail(self):
+ bgpvpn = self.create_bgpvpn(
+ self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id,
+ route_targets=['64512:1'])
+ with ExpectedException(exceptions.Forbidden):
+ self.bgpvpn_client.update_bgpvpn(
+ bgpvpn['id'],
+ route_targets=['64512:2'],
+ import_targets=['64512:3'],
+ export_targets=['64512:4'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('464ca6f9-86e4-4ee3-9c65-f1edae93223d')
+ def test_create_bgpvpn_with_invalid_routetargets(self):
+ """Create a bgpvpn with invalid route target
+
+ This test verifies that invalid route targets,import targets,
+ export targets are rejected by the Create API
+ """
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ "route_targets": ["0"]
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.create_bgpvpn, **postdata)
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ "import_targets": ["test", " "]
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.create_bgpvpn, **postdata)
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ "export_targets": ["64512:1000000000000", "xyz"]
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.create_bgpvpn, **postdata)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('7d4e9b87-e1ab-47a7-a8d6-9d179365556a')
+ def test_update_bgpvpn_invalid_routetargets(self):
+ """Update the bgpvpn with invalid route targets
+
+ This test verifies that invalid route targets,import targets
+ and export targets are rejected by the Update API
+ """
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ }
+ bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
+ updatedata = {
+ "route_targets": ["0"]
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.update_bgpvpn,
+ bgpvpn['bgpvpn']['id'], **updatedata)
+ updatedata = {
+ "import_targets": ["test", " "]
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.update_bgpvpn,
+ bgpvpn['bgpvpn']['id'], **updatedata)
+ updatedata = {
+ "export_targets": ["64512:1000000000000", "xyz"],
+ }
+ self.assertRaises(exceptions.BadRequest,
+ self.bgpvpn_admin_client.update_bgpvpn,
+ bgpvpn['bgpvpn']['id'], **updatedata)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('f049ce21-d239-47c0-b13f-fb57a2a558ce')
+ def test_associate_invalid_network(self):
+ """Associate the invalid network in bgpvpn
+
+ This test verifies that invalid network id,bgpvpn id
+ are rejected by the associate API
+ """
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ }
+ bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
+ network = self.networks_client.create_network()
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_client.create_network_association,
+ bgpvpn['bgpvpn']['id'], uuidutils.generate_uuid())
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_client.create_network_association,
+ uuidutils.generate_uuid(),
+ network['network']['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('078b2660-4adb-4c4c-abf0-b77bf0bface5')
+ def test_disassociate_invalid_network(self):
+ """Disassociate the invalid network in bgpvpn
+
+ This test verifies that invalid network id,
+ bgpvpn id are rejected by the disassociate API
+ """
+ postdata = {
+ "name": "testbgpvpn",
+ "tenant_id": self.bgpvpn_client.tenant_id,
+ }
+ bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
+ network = self.networks_client.create_network()
+ association = self.bgpvpn_client.create_network_association(
+ bgpvpn['bgpvpn']['id'], network['network']['id'])
+ self.assertEqual(association['network_association'][
+ 'network_id'], network['network']['id'])
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_client.delete_network_association,
+ bgpvpn['bgpvpn']['id'],
+ uuidutils.generate_uuid())
+ self.assertRaises(exceptions.NotFound,
+ self.bgpvpn_client.delete_network_association,
+ uuidutils.generate_uuid(),
+ association['network_association']['id'])
+
+ @decorators.idempotent_id('de8d94b0-0239-4a48-9574-c3a4a4f7cacb')
+ def test_associate_disassociate_router(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ router = self.routers_client.create_router()
+ router_id = router['router']['id']
+
+ # Associate the network to the bgpvpn resource
+ association = self.bgpvpn_client.create_router_association(
+ bgpvpn['id'], router_id)
+ self.assertEqual(association['router_association']['router_id'],
+ router_id)
+ updated_bgpvpn = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])
+ self.assertEqual(updated_bgpvpn['bgpvpn']['routers'], [router_id])
+
+ # Disassociate the network from the bgpvpn resource
+ self.bgpvpn_client.delete_router_association(
+ bgpvpn['id'],
+ association['router_association']['id'])
+ updated_bgpvpn = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])
+ self.assertEqual(updated_bgpvpn['bgpvpn']['routers'], [])
+
+ self.routers_client.delete_router(router_id)
+
+ @decorators.idempotent_id('3ae91755-b1b6-4c62-a699-a44eeb4ee522')
+ def test_list_show_router_association(self):
+ bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ router = self.routers_client.create_router()
+ router_id = router['router']['id']
+
+ association = self.bgpvpn_client.create_router_association(
+ bgpvpn['id'], router_id)['router_association']
+ rtr_assocs = self.bgpvpn_client\
+ .list_router_associations(bgpvpn['id'])['router_associations']
+ self.assertIn(association['id'],
+ [rtr_assoc['id'] for rtr_assoc in rtr_assocs])
+ rtr_assoc_details = self.bgpvpn_client\
+ .show_router_association(bgpvpn['id'],
+ association['id'])['router_association']
+ self.assertEqual(association['id'], rtr_assoc_details['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('4be1f073-fe57-4858-b7b9-9a189e90b770')
+ def test_attach_associated_subnet_to_associated_router(self):
+ # Create a first bgpvpn and associate a network with a subnet to it
+ bgpvpn_net = self.create_bgpvpn(
+ self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_net['id'], network['id'])
+
+ # Create a second bgpvpn and associate a router to it
+ bgpvpn_router = self.create_bgpvpn(
+ self.bgpvpn_admin_client,
+ tenant_id=self.bgpvpn_client.tenant_id)
+
+ router = self.create_router(
+ router_name=data_utils.rand_name('test-bgpvpn-'))
+ self.bgpvpn_client.create_router_association(
+ bgpvpn_router['id'],
+ router['id'])
+
+ # Attach the subnet of the network to the router
+ subnet_data = {'subnet_id': subnet['id']}
+ self.assertRaises(exceptions.Conflict,
+ self.routers_client.add_router_interface,
+ router['id'],
+ **subnet_data)
diff --git a/neutron_tempest_plugin/bgpvpn/base.py b/neutron_tempest_plugin/bgpvpn/base.py
new file mode 100644
index 0000000..b436a5d
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/base.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2015 Ericsson.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempest.api.network.base as test
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+
+from neutron_tempest_plugin.bgpvpn.services import bgpvpn_client
+
+CONF = config.CONF
+
+
+class BaseBgpvpnTest(test.BaseNetworkTest):
+ """Base class for the Bgpvpn tests that use the Tempest Neutron REST client
+
+ """
+
+ credentials = ['primary', 'admin', 'alt']
+ bgpvpn_client = None
+ bgpvpn_admin_client = None
+ bgpvpn_alt_client = None
+
+ @classmethod
+ def resource_cleanup(cls):
+ for bgpvpn in cls.bgpvpns:
+ cls.bgpvpn_admin_client.delete_bgpvpn(bgpvpn['id'])
+ super(BaseBgpvpnTest, cls).resource_cleanup()
+
+ @classmethod
+ def resource_setup(cls):
+ cls.route_distinguishers = []
+ cls.bgpvpns = []
+ cls.bgpvpn_client = bgpvpn_client.BgpvpnClient(
+ cls.os_primary.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **cls.os_primary.default_params)
+ cls.bgpvpn_admin_client = bgpvpn_client.BgpvpnClient(
+ cls.os_admin.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **cls.os_admin.default_params)
+ cls.bgpvpn_alt_client = bgpvpn_client.BgpvpnClient(
+ cls.os_alt.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **cls.os_alt.default_params)
+ super(BaseBgpvpnTest, cls).resource_setup()
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseBgpvpnTest, cls).skip_checks()
+ msg = None
+ if not utils.is_extension_enabled('bgpvpn', 'network'):
+ msg = "Bgpvpn extension not enabled."
+ elif not CONF.bgpvpn.run_bgpvpn_tests:
+ msg = ("Running of bgpvpn related tests is disabled in "
+ "plugin configuration.")
+ if msg:
+ raise cls.skipException(msg)
+
+ def create_bgpvpn(self, client, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name('test-bgpvpn-')
+
+ body = client.create_bgpvpn(**kwargs)
+ bgpvpn = body['bgpvpn']
+ self.bgpvpns.append(bgpvpn)
+ return bgpvpn
+
+ def delete_bgpvpn(self, client, bgpvpn):
+ client.delete_bgpvpn(bgpvpn['id'])
+ self.bgpvpns.remove(bgpvpn)
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/__init__.py b/neutron_tempest_plugin/bgpvpn/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/scenario/__init__.py
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/manager.py b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
new file mode 100644
index 0000000..8a5f9f2
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
@@ -0,0 +1,879 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+
+import netaddr
+from oslo_log import log
+from oslo_utils import netutils
+
+from tempest.common import compute
+from tempest.common import utils
+from tempest.common.utils.linux import remote_client
+from tempest.common.utils import net_utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+import tempest.test
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class ScenarioTest(tempest.test.BaseTestCase):
+ """Base class for scenario tests. Uses tempest own clients. """
+
+ credentials = ['primary']
+
+ @classmethod
+ def setup_clients(cls):
+ super(ScenarioTest, cls).setup_clients()
+ # Clients (in alphabetical order)
+ cls.keypairs_client = cls.os_primary.keypairs_client
+ cls.servers_client = cls.os_primary.servers_client
+ # Neutron network client
+ cls.networks_client = cls.os_primary.networks_client
+ cls.ports_client = cls.os_primary.ports_client
+ cls.routers_client = cls.os_primary.routers_client
+ cls.subnets_client = cls.os_primary.subnets_client
+ cls.floating_ips_client = cls.os_primary.floating_ips_client
+ cls.security_groups_client = cls.os_primary.security_groups_client
+ cls.security_group_rules_client = (
+ cls.os_primary.security_group_rules_client)
+
+ # ## Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
+
+ def _create_port(self, network_id, client=None, namestart='port-quotatest',
+ **kwargs):
+ if not client:
+ client = self.ports_client
+ name = data_utils.rand_name(namestart)
+ result = client.create_port(
+ name=name,
+ network_id=network_id,
+ **kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
+ port = result['port']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_port, port['id'])
+ return port
+
+ def create_keypair(self, client=None):
+ if not client:
+ client = self.keypairs_client
+ name = data_utils.rand_name(self.__class__.__name__)
+ # We don't need to create a keypair by pubkey in scenario
+ body = client.create_keypair(name=name)
+ self.addCleanup(client.delete_keypair, name)
+ return body['keypair']
+
+ def create_server(self, name=None, image_id=None, flavor=None,
+ validatable=False, wait_until='ACTIVE',
+ clients=None, **kwargs):
+ """Wrapper utility that returns a test server.
+
+ This wrapper utility calls the common create test server and
+ returns a test server. The purpose of this wrapper is to minimize
+ the impact on the code of the tests already using this
+ function.
+ """
+
+ # NOTE(jlanoux): As a first step, ssh checks in the scenario
+ # tests need to be run regardless of the run_validation and
+ # validatable parameters and thus until the ssh validation job
+ # becomes voting in CI. The test resources management and IP
+ # association are taken care of in the scenario tests.
+ # Therefore, the validatable parameter is set to false in all
+ # those tests. In this way create_server just return a standard
+ # server and the scenario tests always perform ssh checks.
+
+ # Needed for the cross_tenant_traffic test:
+ if clients is None:
+ clients = self.os_primary
+
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__ + "-server")
+
+ vnic_type = CONF.network.port_vnic_type
+
+ # If vnic_type is configured create port for
+ # every network
+ if vnic_type:
+ ports = []
+
+ create_port_body = {'binding:vnic_type': vnic_type,
+ 'namestart': 'port-smoke'}
+ if kwargs:
+ # Convert security group names to security group ids
+ # to pass to create_port
+ if 'security_groups' in kwargs:
+ security_groups = \
+ clients.security_groups_client.list_security_groups(
+ ).get('security_groups')
+ sec_dict = dict([(s['name'], s['id'])
+ for s in security_groups])
+
+ sec_groups_names = [s['name'] for s in kwargs.pop(
+ 'security_groups')]
+ security_groups_ids = [sec_dict[s]
+ for s in sec_groups_names]
+
+ if security_groups_ids:
+ create_port_body[
+ 'security_groups'] = security_groups_ids
+ networks = kwargs.pop('networks', [])
+ else:
+ networks = []
+
+ # If there are no networks passed to us we look up
+ # for the project's private networks and create a port.
+ # The same behaviour as we would expect when passing
+ # the call to the clients with no networks
+ if not networks:
+ networks = clients.networks_client.list_networks(
+ **{'router:external': False, 'fields': 'id'})['networks']
+
+ # It's net['uuid'] if networks come from kwargs
+ # and net['id'] if they come from
+ # clients.networks_client.list_networks
+ for net in networks:
+ net_id = net.get('uuid', net.get('id'))
+ if 'port' not in net:
+ port = self._create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
+ ports.append({'port': port['id']})
+ else:
+ ports.append({'port': net['port']})
+ if ports:
+ kwargs['networks'] = ports
+ self.ports = ports
+
+ tenant_network = self.get_tenant_network()
+
+ body, servers = compute.create_test_server(
+ clients,
+ tenant_network=tenant_network,
+ wait_until=wait_until,
+ name=name, flavor=flavor,
+ image_id=image_id, **kwargs)
+
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
+ server = clients.servers_client.show_server(body['id'])['server']
+ return server
+
+ def get_remote_client(self, ip_address, username=None, private_key=None):
+ """Get a SSH client to a remote server
+
+ @param ip_address the server floating or fixed IP address to use
+ for ssh validation
+ @param username name of the Linux account on the remote server
+ @param private_key the SSH private key to use
+ @return a RemoteClient object
+ """
+
+ if username is None:
+ username = CONF.validation.image_ssh_user
+ # Set this with 'keypair' or others to log in with keypair or
+ # username/password.
+ if CONF.validation.auth_method == 'keypair':
+ password = None
+ if private_key is None:
+ private_key = self.keypair['private_key']
+ else:
+ password = CONF.validation.image_ssh_password
+ private_key = None
+ linux_client = remote_client.RemoteClient(ip_address, username,
+ pkey=private_key,
+ password=password)
+ try:
+ linux_client.validate_authentication()
+ except Exception as e:
+ message = ('Initializing SSH connection to %(ip)s failed. '
+ 'Error: %(error)s' % {'ip': ip_address,
+ 'error': e})
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ LOG.exception(message)
+ self._log_console_output()
+ raise
+
+ return linux_client
+
+ def _log_console_output(self, servers=None):
+ if not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot log')
+ return
+ if not servers:
+ servers = self.servers_client.list_servers()
+ servers = servers['servers']
+ for server in servers:
+ try:
+ console_output = self.servers_client.get_console_output(
+ server['id'])['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server['id'], console_output)
+ except lib_exc.NotFound:
+ LOG.debug("Server %s disappeared(deleted) while looking "
+ "for the console log", server['id'])
+
+ def _log_net_info(self, exc):
+ # network debug is called as part of ssh init
+ if not isinstance(exc, lib_exc.SSHTimeout):
+ LOG.debug('Network information on a devstack host')
+
+ def ping_ip_address(self, ip_address, should_succeed=True,
+ ping_timeout=None, mtu=None):
+ timeout = ping_timeout or CONF.validation.ping_timeout
+ cmd = ['ping', '-c1', '-w1']
+
+ if mtu:
+ cmd += [
+ # don't fragment
+ '-M', 'do',
+ # ping receives just the size of ICMP payload
+ '-s', str(net_utils.get_ping_payload_size(mtu, 4))
+ ]
+ cmd.append(ip_address)
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.communicate()
+
+ return (proc.returncode == 0) == should_succeed
+
+ caller = test_utils.find_test_caller()
+ LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
+ ' expected result is %(should_succeed)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'should_succeed':
+ 'reachable' if should_succeed else 'unreachable'
+ })
+ result = test_utils.call_until_true(ping, timeout, 1)
+ LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
+ 'ping result is %(result)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'result': 'expected' if result else 'unexpected'
+ })
+ return result
+
+ def check_vm_connectivity(self, ip_address,
+ username=None,
+ private_key=None,
+ should_connect=True,
+ mtu=None):
+ """Check server connectivity
+
+ :param ip_address: server to test against
+ :param username: server's ssh username
+ :param private_key: server's ssh private key to be used
+ :param should_connect: True/False indicates positive/negative test
+ positive - attempt ping and ssh
+ negative - attempt ping and fail if succeed
+ :param mtu: network MTU to use for connectivity validation
+
+ :raises: AssertError if the result of the connectivity check does
+ not match the value of the should_connect param
+ """
+ if should_connect:
+ msg = "Timed out waiting for %s to become reachable" % ip_address
+ else:
+ msg = "ip address %s is reachable" % ip_address
+ self.assertTrue(self.ping_ip_address(ip_address,
+ should_succeed=should_connect,
+ mtu=mtu),
+ msg=msg)
+ if should_connect:
+ # no need to check ssh for negative connectivity
+ self.get_remote_client(ip_address, username, private_key)
+
+ def check_public_network_connectivity(self, ip_address, username,
+ private_key, should_connect=True,
+ msg=None, servers=None, mtu=None):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
+ try:
+ self.check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect,
+ mtu=mtu)
+ except Exception:
+ ex_msg = 'Public network connectivity check failed'
+ if msg:
+ ex_msg += ": " + msg
+ LOG.exception(ex_msg)
+ self._log_console_output(servers)
+ raise
+
+
+class NetworkScenarioTest(ScenarioTest):
+ """Base class for network scenario tests.
+
+ This class provide helpers for network scenario tests, using the neutron
+ API. Helpers from ancestor which use the nova network API are overridden
+ with the neutron API.
+
+ This Class also enforces using Neutron instead of novanetwork.
+ Subclassed tests will be skipped if Neutron is not enabled
+
+ """
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def skip_checks(cls):
+ super(NetworkScenarioTest, cls).skip_checks()
+ if not CONF.service_available.neutron:
+ raise cls.skipException('Neutron not available')
+ if not utils.is_extension_enabled('bgpvpn', 'network'):
+ msg = "Bgpvpn extension not enabled."
+ raise cls.skipException(msg)
+
+ def _create_network(self, networks_client=None,
+ tenant_id=None,
+ namestart='network-smoke-',
+ port_security_enabled=True):
+ if not networks_client:
+ networks_client = self.networks_client
+ if not tenant_id:
+ tenant_id = networks_client.tenant_id
+ name = data_utils.rand_name(namestart)
+ network_kwargs = dict(name=name, tenant_id=tenant_id)
+ # Neutron disables port security by default so we have to check the
+ # config before trying to create the network with port_security_enabled
+ if CONF.network_feature_enabled.port_security:
+ network_kwargs['port_security_enabled'] = port_security_enabled
+ result = networks_client.create_network(**network_kwargs)
+ network = result['network']
+
+ self.assertEqual(network['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ networks_client.delete_network,
+ network['id'])
+ return network
+
+ def _create_subnet(self, network, subnets_client=None,
+ routers_client=None, namestart='subnet-smoke',
+ **kwargs):
+ """Create a subnet for the given network
+
+ within the cidr block configured for tenant networks.
+ """
+ if not subnets_client:
+ subnets_client = self.subnets_client
+ if not routers_client:
+ routers_client = self.routers_client
+
+ def cidr_in_use(cidr, tenant_id):
+ """Check cidr existence
+
+ :returns: True if subnet with cidr already exist in tenant
+ False else
+ """
+ cidr_in_use = self.os_admin.subnets_client.list_subnets(
+ tenant_id=tenant_id, cidr=cidr)['subnets']
+ return len(cidr_in_use) != 0
+
+ ip_version = kwargs.pop('ip_version', 4)
+
+ if ip_version == 6:
+ tenant_cidr = netaddr.IPNetwork(
+ CONF.network.project_network_v6_cidr)
+ num_bits = CONF.network.project_network_v6_mask_bits
+ else:
+ tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ num_bits = CONF.network.project_network_mask_bits
+
+ result = None
+ str_cidr = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(num_bits):
+ str_cidr = str(subnet_cidr)
+ if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
+ continue
+
+ subnet = dict(
+ name=data_utils.rand_name(namestart),
+ network_id=network['id'],
+ tenant_id=network['tenant_id'],
+ cidr=str_cidr,
+ ip_version=ip_version,
+ **kwargs
+ )
+ try:
+ result = subnets_client.create_subnet(**subnet)
+ break
+ except lib_exc.Conflict as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+
+ subnet = result['subnet']
+ self.assertEqual(subnet['cidr'], str_cidr)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ subnets_client.delete_subnet, subnet['id'])
+
+ return subnet
+
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None):
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], fixed_ip=ip_addr)['ports']
+ # A port can have more than one IP address in some cases.
+ # If the network is dual-stack (IPv4 + IPv6), this port is associated
+ # with 2 subnets
+ p_status = ['ACTIVE']
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state.
+ # TODO(vsaienko) remove once bug: #1599836 is resolved.
+ if getattr(CONF.service_available, 'ironic', False):
+ p_status.append('DOWN')
+ port_map = [(p["id"], fxip["ip_address"])
+ for p in ports
+ for fxip in p["fixed_ips"]
+ if netutils.is_valid_ipv4(fxip["ip_address"])
+ and p['status'] in p_status]
+ inactive = [p for p in ports if p['status'] != 'ACTIVE']
+ if inactive:
+ LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+ self.assertNotEqual(0, len(port_map),
+ "No IPv4 addresses found in: %s" % ports)
+ self.assertEqual(len(port_map), 1,
+ "Found multiple IPv4 addresses: %s. "
+ "Unable to determine which port to target."
+ % port_map)
+ return port_map[0]
+
+ def _get_network_by_name(self, network_name):
+ net = self.os_admin.networks_client.list_networks(
+ name=network_name)['networks']
+ self.assertNotEqual(len(net), 0,
+ "Unable to get network by name: %s" % network_name)
+ return net[0]
+
+ def create_floating_ip(self, thing, external_network_id=None,
+ port_id=None, client=None):
+ """Create a floating IP and associates to a resource/port on Neutron"""
+ if not external_network_id:
+ external_network_id = CONF.network.public_network_id
+ if not client:
+ client = self.floating_ips_client
+ if not port_id:
+ port_id, ip4 = self._get_server_port_id_and_ip4(thing)
+ else:
+ ip4 = None
+ result = client.create_floatingip(
+ floating_network_id=external_network_id,
+ port_id=port_id,
+ tenant_id=thing['tenant_id'],
+ fixed_ip_address=ip4
+ )
+ floating_ip = result['floatingip']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_floatingip,
+ floating_ip['id'])
+ return floating_ip
+
+ def _associate_floating_ip(self, floating_ip, server):
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def _disassociate_floating_ip(self, floating_ip):
+ """:param floating_ip: floating_ips_client.create_floatingip"""
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
+ def check_floating_ip_status(self, floating_ip, status):
+ """Verifies floatingip reaches the given status
+
+ :param dict floating_ip: floating IP dict to check status
+ :param status: target status
+ :raises: AssertionError if status doesn't match
+ """
+ floatingip_id = floating_ip['id']
+
+ def refresh():
+ result = (self.floating_ips_client.
+ show_floatingip(floatingip_id)['floatingip'])
+ return status == result['status']
+
+ test_utils.call_until_true(refresh,
+ CONF.network.build_timeout,
+ CONF.network.build_interval)
+ floating_ip = self.floating_ips_client.show_floatingip(
+ floatingip_id)['floatingip']
+ self.assertEqual(status, floating_ip['status'],
+ message="FloatingIP: {fp} is at status: {cst}. "
+ "failed to reach status: {st}"
+ .format(fp=floating_ip, cst=floating_ip['status'],
+ st=status))
+ LOG.info("FloatingIP: {fp} is at status: {st}"
+ .format(fp=floating_ip, st=status))
+
+ def _check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
+ if not CONF.network.project_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ LOG.info(msg)
+ return
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ for net_name, ip_addresses in server['addresses'].items():
+ for ip_address in ip_addresses:
+ self.check_vm_connectivity(ip_address['addr'],
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception as e:
+ LOG.exception('Tenant network connectivity check failed')
+ self._log_console_output(servers_for_debug)
+ self._log_net_info(e)
+ raise
+
+ def _check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
+ """check ping server via source ssh connection
+
+ :param source: RemoteClient: an ssh connection from which to ping
+ :param dest: and IP to ping against
+ :param should_succeed: boolean should ping succeed or not
+ :param nic: specific network interface to ping from
+ :returns: boolean -- should_succeed == ping
+ :returns: ping is false if ping failed
+ """
+ def ping_remote():
+ try:
+ source.ping_host(dest, nic=nic)
+ except lib_exc.SSHExecCommandFailed:
+ LOG.warning('Failed to ping IP: %s via a ssh connection '
+ 'from: %s.', dest, source.ssh_client.host)
+ return not should_succeed
+ return should_succeed
+
+ return test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout,
+ 1)
+
+ def _create_security_group(self, security_group_rules_client=None,
+ tenant_id=None,
+ namestart='secgroup-smoke',
+ security_groups_client=None):
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if tenant_id is None:
+ tenant_id = security_groups_client.tenant_id
+ secgroup = self._create_empty_security_group(
+ namestart=namestart, client=security_groups_client,
+ tenant_id=tenant_id)
+
+ # Add rules to the security group
+ rules = self._create_loginable_secgroup_rule(
+ security_group_rules_client=security_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client)
+ for rule in rules:
+ self.assertEqual(tenant_id, rule['tenant_id'])
+ self.assertEqual(secgroup['id'], rule['security_group_id'])
+ return secgroup
+
+ def _create_empty_security_group(self, client=None, tenant_id=None,
+ namestart='secgroup-smoke'):
+ """Create a security group without rules.
+
+ Default rules will be created:
+ - IPv4 egress to any
+ - IPv6 egress to any
+
+ :param tenant_id: secgroup will be created in this tenant
+ :returns: the created security group
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sg_name = data_utils.rand_name(namestart)
+ sg_desc = sg_name + " description"
+ sg_dict = dict(name=sg_name,
+ description=sg_desc)
+ sg_dict['tenant_id'] = tenant_id
+ result = client.create_security_group(**sg_dict)
+
+ secgroup = result['security_group']
+ self.assertEqual(secgroup['name'], sg_name)
+ self.assertEqual(tenant_id, secgroup['tenant_id'])
+ self.assertEqual(secgroup['description'], sg_desc)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_security_group, secgroup['id'])
+ return secgroup
+
+ def _default_security_group(self, client=None, tenant_id=None):
+ """Get default secgroup for given tenant_id.
+
+ :returns: default secgroup for given tenant
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sgs = [
+ sg for sg in list(client.list_security_groups().values())[0]
+ if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
+ ]
+ msg = "No default security group for tenant %s." % (tenant_id)
+ self.assertGreater(len(sgs), 0, msg)
+ return sgs[0]
+
+ def _create_security_group_rule(self, secgroup=None,
+ sec_group_rules_client=None,
+ tenant_id=None,
+ security_groups_client=None, **kwargs):
+ """Create a rule from a dictionary of rule parameters.
+
+ Create a rule in a secgroup. if secgroup not defined will search for
+ default secgroup in tenant_id.
+
+ :param secgroup: the security group.
+ :param tenant_id: if secgroup not passed -- the tenant in which to
+ search for default secgroup
+ :param kwargs: a dictionary containing rule parameters:
+ for example, to allow incoming ssh:
+ rule = {
+ direction: 'ingress'
+ protocol:'tcp',
+ port_range_min: 22,
+ port_range_max: 22
+ }
+ """
+ if sec_group_rules_client is None:
+ sec_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = security_groups_client.tenant_id
+ if secgroup is None:
+ secgroup = self._default_security_group(
+ client=security_groups_client, tenant_id=tenant_id)
+
+ ruleset = dict(security_group_id=secgroup['id'],
+ tenant_id=secgroup['tenant_id'])
+ ruleset.update(kwargs)
+
+ sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
+ sg_rule = sg_rule['security_group_rule']
+
+ self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
+ self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
+
+ return sg_rule
+
+ def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
+ """Create loginable security group rule
+
+ This function will create:
+ 1. egress and ingress tcp port 22 allow rule in order to allow ssh
+ access for ipv4.
+ 2. egress and ingress tcp port 80 allow rule in order to allow http
+ access for ipv4.
+ 3. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
+ 4. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
+ """
+
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ rules = []
+ rulesets = [
+ dict(
+ # ssh
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22,
+ ),
+ dict(
+ # http
+ protocol='tcp',
+ port_range_min=80,
+ port_range_max=80,
+ ),
+ dict(
+ # ping
+ protocol='icmp',
+ ),
+ dict(
+ # ipv6-icmp for ping6
+ protocol='icmp',
+ ethertype='IPv6',
+ )
+ ]
+ sec_group_rules_client = security_group_rules_client
+ for ruleset in rulesets:
+ for r_direction in ['ingress', 'egress']:
+ ruleset['direction'] = r_direction
+ try:
+ sg_rule = self._create_security_group_rule(
+ sec_group_rules_client=sec_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client,
+ **ruleset)
+ except lib_exc.Conflict as ex:
+ # if rule already exist - skip rule and continue
+ msg = 'Security group rule already exists'
+ if msg not in ex._error_string:
+ raise ex
+ else:
+ self.assertEqual(r_direction, sg_rule['direction'])
+ rules.append(sg_rule)
+
+ return rules
+
+ def _get_router(self, client=None, tenant_id=None):
+ """Retrieve a router for the given tenant id.
+
+ If a public router has been configured, it will be returned.
+
+ If a public router has not been configured, but a public
+ network has, a tenant router will be created and returned that
+ routes traffic to the public network.
+ """
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ router_id = CONF.network.public_router_id
+ network_id = CONF.network.public_network_id
+ if router_id:
+ body = client.show_router(router_id)
+ return body['router']
+ elif network_id:
+ router = self._create_router(client, tenant_id)
+ kwargs = {'external_gateway_info': dict(network_id=network_id)}
+ router = client.update_router(router['id'], **kwargs)['router']
+ return router
+ else:
+ raise Exception("Neither of 'public_router_id' or "
+ "'public_network_id' has been defined.")
+
+ def _create_router(self, client=None, tenant_id=None,
+ namestart='router-smoke'):
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ name = data_utils.rand_name(namestart)
+ result = client.create_router(name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ router = result['router']
+ self.assertEqual(router['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router,
+ router['id'])
+ return router
+
+ def _update_router_admin_state(self, router, admin_state_up):
+ kwargs = dict(admin_state_up=admin_state_up)
+ router = self.routers_client.update_router(
+ router['id'], **kwargs)['router']
+ self.assertEqual(admin_state_up, router['admin_state_up'])
+
+ def create_networks(self, networks_client=None,
+ routers_client=None, subnets_client=None,
+ tenant_id=None, dns_nameservers=None,
+ port_security_enabled=True):
+ """Create a network with a subnet connected to a router.
+
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
+ :param tenant_id: id of tenant to create resources in.
+ :param dns_nameservers: list of dns servers to send to subnet.
+ :returns: network, subnet, router
+ """
+ if CONF.network.shared_physical_network:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ if not CONF.compute.fixed_network_name:
+ m = 'fixed_network_name must be specified in config'
+ raise lib_exc.InvalidConfiguration(m)
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ network = self._create_network(
+ networks_client=networks_client,
+ tenant_id=tenant_id,
+ port_security_enabled=port_security_enabled)
+ router = self._get_router(client=routers_client,
+ tenant_id=tenant_id)
+ subnet_kwargs = dict(network=network,
+ subnets_client=subnets_client,
+ routers_client=routers_client)
+ # use explicit check because empty list is a valid option
+ if dns_nameservers is not None:
+ subnet_kwargs['dns_nameservers'] = dns_nameservers
+ subnet = self._create_subnet(**subnet_kwargs)
+ if not routers_client:
+ routers_client = self.routers_client
+ router_id = router['id']
+ routers_client.add_router_interface(router_id,
+ subnet_id=subnet['id'])
+
+ # save a cleanup job to remove this association between
+ # router and subnet
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ routers_client.remove_router_interface, router_id,
+ subnet_id=subnet['id'])
+ return network, subnet, router
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
new file mode 100644
index 0000000..937b0dc
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
@@ -0,0 +1,1354 @@
+# Copyright 2016 Cisco Systems, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import random
+
+import netaddr
+from neutron_lib.utils import test
+from oslo_concurrency import lockutils
+from oslo_log import log as logging
+from tempest.common import compute
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.bgpvpn import base
+from neutron_tempest_plugin.bgpvpn.scenario import manager
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+NET_A = 'A'
+NET_A_BIS = 'A-Bis'
+NET_B = 'B'
+NET_C = 'C'
+
+if "SUBNETPOOL_PREFIX_V4" in os.environ:
+ subnet_base = netaddr.IPNetwork(os.environ['SUBNETPOOL_PREFIX_V4'])
+ if subnet_base.prefixlen > 21:
+ raise Exception("if SUBNETPOOL_PREFIX_V4 is set, it needs to offer "
+ "space for at least 8 /24 subnets")
+else:
+ subnet_base = netaddr.IPNetwork("10.100.0.0/16")
+
+
+def assign_24(idx):
+ # how many addresses in a /24:
+ range_size = 2 ** (32 - 24)
+ return netaddr.cidr_merge(
+ subnet_base[range_size * idx:range_size * (idx + 1)])[0]
+
+
+S1A = assign_24(1)
+S2A = assign_24(2)
+S1B = assign_24(4)
+S2B = assign_24(6)
+S1C = assign_24(6)
+NET_A_S1 = str(S1A)
+NET_A_S2 = str(S2A)
+NET_B_S1 = str(S1B)
+NET_B_S2 = str(S2B)
+NET_C_S1 = str(S1C)
+IP_A_S1_1 = str(S1A[10])
+IP_B_S1_1 = str(S1B[20])
+IP_C_S1_1 = str(S1C[30])
+IP_A_S1_2 = str(S1A[30])
+IP_B_S1_2 = str(S1B[40])
+IP_A_S1_3 = str(S1A[50])
+IP_B_S1_3 = str(S1B[60])
+IP_A_S2_1 = str(S2A[50])
+IP_B_S2_1 = str(S2B[60])
+IP_A_BIS_S1_1 = IP_A_S1_1
+IP_A_BIS_S1_2 = IP_A_S1_2
+IP_A_BIS_S1_3 = IP_A_S1_3
+IP_A_BIS_S2_1 = IP_A_S2_1
+
+
+class TestBGPVPNBasic(base.BaseBgpvpnTest, manager.NetworkScenarioTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestBGPVPNBasic, cls).setUpClass()
+ cls._rt_index = 0
+
+ @classmethod
+ @lockutils.synchronized('bgpvpn')
+ def new_rt(cls):
+ cls._rt_index += 1
+ return "64512:%d" % cls._rt_index
+
+ def setUp(self):
+ super(TestBGPVPNBasic, self).setUp()
+ self.servers_keypairs = {}
+ self.servers = []
+ self.server_fixed_ips = {}
+ self.ports = {}
+ self.networks = {}
+ self.subnets = {}
+ self.server_fips = {}
+ self._create_security_group_for_test()
+ self.RT1 = self.new_rt()
+ self.RT2 = self.new_rt()
+ self.RT3 = self.new_rt()
+ self.RT4 = self.new_rt()
+
+ @decorators.idempotent_id('afdd6cad-871a-4343-b97b-6319c76c815d')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_basic(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Associate network A and network B to a given L3 BGPVPN
+ 5. Create router and connect it to network A
+ 6. Give a FIP to server 1
+ 7. Check that server 1 can ping server 2
+ """
+
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('8a5a6fac-313c-464b-9c5c-29d4e1c0a51e')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant1(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Associate network A and network B to a given L3 BGPVPN
+ 3. Start up server 1 in network A
+ 4. Start up server 2 in network B
+ 5. Create router and connect it to network A
+ 6. Give a FIP to server 1
+ 7. Check that server 1 can ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('e7468636-0816-4092-82ca-3590680ed00b')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant2(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Create router and associate to network B
+ 5. Associate network A and network B to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self.router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('7c66aa31-fb3a-4e15-8808-46eb361f153a')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant3(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Create router and connect it to network B
+ 5. Associate network A and network B to a given L3 BGPVPN
+ 6. Delete router associated to network B
+ 7. Create router and connect it to network A
+ 8. Give a FIP to server 1
+ 9. Check that server 1 can ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self.router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self.delete_router(self.router_b)
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('973ab26d-c7d8-4a32-9aa9-2d7e6f406135')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant4(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Associate network A and network B to a given L3 BGPVPN
+ 5. Create router and connect it to network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 2 can ping server 1
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self.router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('2ac0696b-e828-4299-9e94-5f9c4988d961')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant5(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create router and connect it to network B
+ 3. Associate network A and network B to a given L3 BGPVPN
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 2 can ping server 1
+ """
+ self._create_networks_and_subnets()
+ self.router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('9081338e-a52e-46bb-a40e-bda24ec4b1bd')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_variant6(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Associate network A and network B to a given L3 BGPVPN
+ 3. Create router and connect it to network B
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 2 can ping server 1
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self.router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+
+ @decorators.idempotent_id('133497a1-2788-40f7-be01-b3b64b5ef8cd')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_update_route_targets_disjoint_targets(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Create L3 BGPVPN with only RT defined
+ 5. Associate network A to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Update L3 BGPVPN to have eRT<>iRT and no RT what is insufficient
+ for proper connectivity between network A and B
+ 10. Check that server 1 cannot ping server 2
+ 11. Update L3 BGPVPN to have again only RT defined
+ 12. Check that server 1 can ping again server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self._create_l3_bgpvpn(rts=[self.RT1], import_rts=[],
+ export_rts=[])
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self._update_l3_bgpvpn(rts=[], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn()
+
+ @decorators.idempotent_id('bf417cad-0bc4-446a-b367-850aa619ca4f')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_update_route_targets_common_target(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Create L3 BGPVPN with only RT defined
+ 5. Associate network A to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Update L3 BGPVPN to have eRT<>iRT and RT=iRT
+ 10. Check that server 1 can ping server 2
+ 11. Update L3 BGPVPN to have again only RT defined
+ 12. Check that server 1 can ping again server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self._create_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._check_l3_bgpvpn()
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn()
+
+ @decorators.idempotent_id('08d4f40e-3cec-485b-9da2-76e67fbd9881')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_update_route_targets_and_unassociated_net(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Create invalid L3 BGPVPN with eRT<>iRT that is insufficient
+ for proper connectivity between network A and B
+ 5. Associate network A to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 cannot ping server 2
+ 9. Associate network B to a given L3 BGPVPN
+ 10. Check that server 1 cannot ping server 2
+ 11. Update L3 BGPVPN to have only RT defined
+ 12. Check that server 1 can ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_servers()
+ self.router = self._create_router_and_associate_fip(
+ 0, self.subnets[NET_A][0])
+ self._create_l3_bgpvpn(rts=[], export_rts=[self.RT1],
+ import_rts=[self.RT2])
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_B]['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn()
+
+ @decorators.idempotent_id('c8bfd695-f731-47a6-86e3-3dfa492e08e0')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_update_rt_and_keep_local_connectivity_variant1(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Start up server 3 in network A
+ 5. Start up server 4 in network B
+ 6. Create invalid L3 BGPVPN with eRT<>iRT that is insufficient
+ for proper connectivity between network A and B
+ 7. Associate network A to a given L3 BGPVPN
+ 8. Create router A and connect it to network A
+ 9. Give a FIP to server 1
+ 10. Check that server 1 cannot ping server 2
+ 11. Check that server 1 can ping server 3
+ 12. Associate network B to a given L3 BGPVPN
+ 13. Create router B and connect it to network B
+ 14. Give a FIP to server 2
+ 15. Check that server 1 still cannot ping server 2
+ 16. Check that server 2 can ping server 4
+ 17. Update L3 BGPVPN to have now only RT defined
+ 18. Check that server 1 can now ping server 2
+ 19. Check that server 1 still can ping server 3
+ 20. Check that server 2 still can ping server 4
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn(rts=[], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1],
+ [self.networks[NET_A], IP_A_S1_2],
+ [self.networks[NET_B], IP_B_S1_2]])
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ self.router_a = self._create_router_and_associate_fip(
+ 0, self.subnets[NET_A][0])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._check_l3_bgpvpn(self.servers[0], self.servers[2])
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_B]['id'])
+ self.router_b = self._create_router_and_associate_fip(
+ 1, self.subnets[NET_B][0])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._check_l3_bgpvpn(self.servers[1], self.servers[3])
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn()
+ self._check_l3_bgpvpn(self.servers[0], self.servers[2])
+ self._check_l3_bgpvpn(self.servers[1], self.servers[3])
+
+ @decorators.idempotent_id('758a8731-5070-4b1e-9a66-d6ff05bb5be1')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_update_rt_and_keep_local_connectivity_variant2(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Start up server 1 in network A
+ 3. Start up server 2 in network B
+ 4. Start up server 3 in network A
+ 5. Start up server 4 in network B
+ 6. Create invalid L3 BGPVPN with eRT<>iRT that is insufficient
+ for proper connectivity between network A and B
+ 7. Create router A and connect it to network A
+ 8. Give a FIP to server 1
+ 9. Create router B and connect it to network B
+ 10. Give a FIP to server 4
+ 11. Associate network A to a given L3 BGPVPN
+ 12. Check that server 1 cannot ping server 2
+ 13. Check that server 1 can ping server 3
+ 14. Associate router B to a given L3 BGPVPN
+ 15. Check that server 1 still cannot ping server 2
+ 16. Check that server 4 can ping server 2
+ 17. Update L3 BGPVPN to have now only RT defined
+ 18. Check that server 1 can now ping server 2
+ 19. Check that server 1 still can ping server 3
+ 20. Check that server 4 still can ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn(rts=[], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1],
+ [self.networks[NET_A], IP_A_S1_2],
+ [self.networks[NET_B], IP_B_S1_2]])
+ self._create_router_and_associate_fip(
+ 0, self.subnets[NET_A][0])
+ router_b = self._create_router_and_associate_fip(
+ 3, self.subnets[NET_B][0])
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._check_l3_bgpvpn(self.servers[0], self.servers[2])
+ self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
+ router_b['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+ self._check_l3_bgpvpn(self.servers[3], self.servers[1])
+ self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn()
+ self._check_l3_bgpvpn(self.servers[0], self.servers[2])
+ self._check_l3_bgpvpn(self.servers[3], self.servers[1])
+
+ @decorators.idempotent_id('876b49bc-f34a-451b-ba3c-d74295838130')
+ @utils.services('compute', 'network')
+ @utils.requires_ext(extension='bgpvpn-routes-control', service='network')
+ def test_bgpvpn_port_association_local_pref(self):
+ """This test checks port association in BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Start up server 1 in network A
+ 4. Start up server 2 in network B
+ 5. Start up server 3 in network B
+ 6. Create router and connect it to network A
+ 7. Create router and connect it to network B
+ 8. Give a FIP to all servers
+ 9. Setup dummy HTTP service on server 2 and 3
+ 10. Configure ip forwarding on server 2
+ 11. Configure ip forwarding on server 3
+ 12. Configure alternative loopback address on server 2
+ 13. Configure alternative loopback address on server 3
+ 14. Associate network A to a given L3 BGPVPN
+ 15. Associate port of server 2 to a given L3 BGPVPN
+ with higher local_pref value
+ 16. Associate port of server 3 to a given L3 BGPVPN
+ with lower local_pref value
+ 17. Check that server 1 pings server's 2 alternative ip
+ 18. Update port association of server 2 to have now
+ lower local_pref value
+ 19. Update port association of server 3 to have now
+ higher local_pref value
+ 20. Check that server 1 pings now server's 3 alternative ip
+ """
+ self._create_networks_and_subnets(port_security=False)
+ self._create_l3_bgpvpn()
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1],
+ [self.networks[NET_B], IP_B_S1_2]],
+ port_security=False)
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_B][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+ self._associate_fip(2)
+ self._setup_http_server(1)
+ self._setup_http_server(2)
+ self._setup_ip_forwarding(1)
+ self._setup_ip_forwarding(2)
+ self._setup_ip_address(1, IP_C_S1_1)
+ self._setup_ip_address(2, IP_C_S1_1)
+
+ primary_port_routes = [{'type': 'prefix',
+ 'local_pref': 200,
+ 'prefix': NET_C_S1}]
+ alternate_port_routes = [{'type': 'prefix',
+ 'local_pref': 100,
+ 'prefix': NET_C_S1}]
+
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+
+ port_id_1 = self.ports[self.servers[1]['id']]['id']
+ body = self.bgpvpn_client.create_port_association(
+ self.bgpvpn['id'], port_id=port_id_1, routes=primary_port_routes)
+ port_association_1 = body['port_association']
+
+ port_id_2 = self.ports[self.servers[2]['id']]['id']
+ body = self.bgpvpn_client.create_port_association(
+ self.bgpvpn['id'], port_id=port_id_2, routes=alternate_port_routes)
+ port_association_2 = body['port_association']
+
+ destination_srv_1 = '%s:%s' % (self.servers[1]['name'],
+ self.servers[1]['id'])
+ destination_srv_2 = '%s:%s' % (self.servers[2]['name'],
+ self.servers[2]['id'])
+
+ self._check_l3_bgpvpn_by_specific_ip(
+ to_server_ip=IP_C_S1_1,
+ validate_server=destination_srv_1)
+
+ self.bgpvpn_client.update_port_association(
+ self.bgpvpn['id'], port_association_1['id'],
+ routes=alternate_port_routes)
+ self.bgpvpn_client.update_port_association(
+ self.bgpvpn['id'], port_association_2['id'],
+ routes=primary_port_routes)
+
+ self._check_l3_bgpvpn_by_specific_ip(
+ to_server_ip=IP_C_S1_1,
+ validate_server=destination_srv_2)
+
+ @decorators.idempotent_id('f762e6ac-920e-4d0f-aa67-02bdd4ab8433')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_tenant_separation_and_local_connectivity(self):
+ """This test checks tenant separation for BGPVPN.
+
+ 1. Create networks A with subnet S1 and S2
+ 2. Create networks A-Bis with subnet S1 and S2 (like for network A)
+ 3. Create L3 BGPVPN for network A with self.RT1
+ 4. Create L3 BGPVPN for network A-Bis with self.RT2
+ 5. Associate network A to a given L3 BGPVPN
+ 6. Associate network A-Bis to a given L3 BGPVPN
+ 7. Start up server 1 in network A and subnet S1
+ 8. Start up server 2 in network A-Bis and subnet S1
+ 9. Start up server 3 in network A and subnet S1
+ 10. Start up server 4 in network A-Bis and subnet S1
+ 11. Start up server 5 in network A and subnet S1
+ 12. Create router A and connect it to network A
+ 13. Create router A-Bis and connect it to network A-Bis
+ 14. Give a FIP to all servers
+ 15. Setup dummy HTTP service on server 2 and 3
+ 16. Check that server 1 pings server 3 instead of server 2
+ 17. Check that server 1 can ping server 3
+ 18. Check that server 2 cannot ping server 1
+ 19. Check that server 2 pings itself instead of server 3
+ 20. Check that server 2 can ping server 4
+ 21. Check that server 2 pings server 4 instead of server 5
+ """
+ self._create_networks_and_subnets([NET_A, NET_A_BIS],
+ [[NET_A_S1, NET_A_S2],
+ [NET_A_S1, NET_A_S2]])
+ bgpvpn_a = self._create_l3_bgpvpn(name='test-l3-bgpvpn-a',
+ rts=[self.RT1])
+ bgpvpn_a_bis = self._create_l3_bgpvpn(name='test-l3-bgpvpn-a-bis',
+ rts=[self.RT2])
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_a['id'], self.networks[NET_A]['id'])
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_a_bis['id'], self.networks[NET_A_BIS]['id'])
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_A_BIS], IP_A_BIS_S1_2],
+ [self.networks[NET_A], IP_A_S1_2],
+ [self.networks[NET_A_BIS], IP_A_BIS_S1_3],
+ [self.networks[NET_A], IP_A_S1_3]])
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_A_BIS][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+ self._associate_fip(2)
+ self._associate_fip(3)
+ self._associate_fip(4)
+ self._setup_http_server(1)
+ self._setup_http_server(2)
+ self._setup_http_server(3)
+ self._setup_http_server(4)
+ self._check_l3_bgpvpn(self.servers[0], self.servers[1],
+ should_succeed=False, validate_server=True)
+ self._check_l3_bgpvpn(self.servers[0], self.servers[2],
+ validate_server=True)
+ self._check_l3_bgpvpn(self.servers[1], self.servers[0],
+ should_succeed=False)
+ self._check_l3_bgpvpn(self.servers[1], self.servers[2],
+ should_succeed=False, validate_server=True)
+ self._check_l3_bgpvpn(self.servers[1], self.servers[3],
+ validate_server=True)
+ self._check_l3_bgpvpn(self.servers[1], self.servers[4],
+ should_succeed=False, validate_server=True)
+
+ @decorators.idempotent_id('3b44b2f4-f514-4004-8623-2682bc46bb07')
+ @utils.services('compute', 'network')
+ @utils.requires_ext(extension='bgpvpn-routes-control', service='network')
+ def test_bgpvpn_port_association_create_and_update(self):
+ """This test checks port association in BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Create router and connect it to network A
+ 4. Create router and connect it to network B
+ 5. Start up server 1 in network A
+ 6. Start up server 2 in network B
+ 7. Give a FIP to all servers
+ 8. Configure ip forwarding on server 2
+ 9. Configure alternative loopback address on server 2
+ 10. Associate network A to a given L3 BGPVPN
+ 11. Associate port of server 2 to a given L3 BGPVPN
+ 12. Check that server 1 can ping server's 2 alternative ip
+ 13. Update created before port association by routes removal
+ 14. Check that server 1 cannot ping server's 2 alternative ip
+ """
+ self._create_networks_and_subnets(port_security=False)
+ self._create_l3_bgpvpn()
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1]],
+ port_security=False)
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_B][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+
+ # preliminary check that no connectivity to 192.168.0.1 initially
+ # exists
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=IP_C_S1_1)
+
+ self._setup_ip_forwarding(1)
+ self._setup_ip_address(1, IP_C_S1_1)
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ port_id = self.ports[self.servers[1]['id']]['id']
+ port_routes = [{'type': 'prefix',
+ 'prefix': NET_C_S1}]
+ body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
+ port_id=port_id,
+ routes=port_routes)
+ port_association = body['port_association']
+ self._check_l3_bgpvpn_by_specific_ip(
+ to_server_ip=IP_C_S1_1)
+ self.bgpvpn_client.update_port_association(
+ self.bgpvpn['id'], port_association['id'], routes=[])
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=IP_C_S1_1)
+
+ @decorators.idempotent_id('d92a8a18-c4d0-40d5-8592-713d7dae7d25')
+ @utils.services('compute', 'network')
+ @utils.requires_ext(extension='bgpvpn-routes-control', service='network')
+ @test.unstable_test("bug 1780205")
+ def test_port_association_many_bgpvpn_routes(self):
+ """This test checks port association in BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Create router and connect it to network A
+ 4. Create router and connect it to network B
+ 5. Start up server 1 in network A
+ 6. Start up server 2 in network B
+ 7. Give a FIP to all servers
+ 8. Configure ip forwarding on server 2
+ 9. Configure alternative loopback address on server 2
+ 10. Associate network A to a given L3 BGPVPN
+ 11. Associate port of server 2 to a given L3 BGPVPN
+ 12. Check that server 1 can ping server's 2 alternative ip
+ 13. Update created before port association by routes removal
+ 14. Check that server 1 cannot ping server's 2 alternative ip
+ """
+ AMOUNT_LOOPBACKS = 90
+ START_IP_LOOPBACKS = 31
+ SAMPLE_SIZE = 10
+ LOOPBACKS = [str(ip) for ip in
+ S1C[START_IP_LOOPBACKS:
+ START_IP_LOOPBACKS + AMOUNT_LOOPBACKS]]
+ SUB_LOOPBACKS = [LOOPBACKS[0], LOOPBACKS[-1]]
+
+ self._create_networks_and_subnets(port_security=False)
+ self._create_l3_bgpvpn()
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1]],
+ port_security=False)
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_B][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+
+ for ip in SUB_LOOPBACKS:
+ LOG.debug("Preliminary check that no connectivity exist")
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=ip)
+
+ self._setup_ip_forwarding(1)
+
+ self._setup_range_ip_address(1, LOOPBACKS)
+
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ port_id = self.ports[self.servers[1]['id']]['id']
+ port_routes = [{'type': 'prefix',
+ 'prefix': ip + "/32"}
+ for ip in LOOPBACKS]
+
+ body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
+ port_id=port_id,
+ routes=port_routes)
+ port_association = body['port_association']
+
+ for ip in random.sample(LOOPBACKS, SAMPLE_SIZE):
+ LOG.debug("Check that server 1 can "
+ "ping server 2 alternative ip %s", ip)
+ self._check_l3_bgpvpn_by_specific_ip(
+ to_server_ip=ip)
+
+ self.bgpvpn_client.update_port_association(
+ self.bgpvpn['id'], port_association['id'], routes=[])
+
+ for ip in SUB_LOOPBACKS:
+ LOG.debug("Check that server 1 can't "
+ "ping server 2 alternative ip %s", ip)
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=ip)
+
+ @decorators.idempotent_id('9c3280b5-0b32-4562-800c-0b50d9d52bfd')
+ @utils.services('compute', 'network')
+ @utils.requires_ext(extension='bgpvpn-routes-control', service='network')
+ def test_bgpvpn_port_association_create_and_delete(self):
+ """This test checks port association in BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Create router and connect it to network A
+ 4. Create router and connect it to network B
+ 5. Start up server 1 in network A
+ 6. Start up server 2 in network B
+ 7. Give a FIP to all servers
+ 8. Configure ip forwarding on server 2
+ 9. Configure alternative loopback address on server 2
+ 10. Associate network A to a given L3 BGPVPN
+ 11. Associate port of server 2 to a given L3 BGPVPN
+ 12. Check that server 1 can ping server's 2 alternative ip
+ 13. Remove created before port association
+ 14. Check that server 1 cannot ping server's 2 alternative ip
+ """
+ self._create_networks_and_subnets(port_security=False)
+ self._create_l3_bgpvpn()
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1]],
+ port_security=False)
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_B][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+
+ # preliminary check that no connectivity to 192.168.0.1 initially
+ # exists
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=IP_C_S1_1)
+
+ self._setup_ip_forwarding(1)
+ self._setup_ip_address(1, IP_C_S1_1)
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ port_id = self.ports[self.servers[1]['id']]['id']
+ port_routes = [{'type': 'prefix',
+ 'prefix': NET_C_S1}]
+ body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
+ port_id=port_id,
+ routes=port_routes)
+ port_association = body['port_association']
+ self._check_l3_bgpvpn_by_specific_ip(
+ to_server_ip=IP_C_S1_1)
+ self.bgpvpn_client.delete_port_association(
+ self.bgpvpn['id'], port_association['id'])
+ self._check_l3_bgpvpn_by_specific_ip(
+ should_succeed=False, to_server_ip=IP_C_S1_1)
+
+ @decorators.idempotent_id('73f629fa-fdae-40fc-8a7e-da3aedcf797a')
+ @utils.services('compute', 'network')
+ @utils.requires_ext(extension='bgpvpn-routes-control', service='network')
+ def test_bgpvpn_port_association_bgpvpn_route(self):
+ """Test port association routes of type 'bgpvpn'
+
+ In this test we use a Port Association with a 'bgpvpn' route
+ to have VM 1 in network A, reach a VM 3 in network C via the Port
+ of a VM 2 (on network B), and vice-versa. For A->C traffic, one Port
+ Association associates the port of VM 2 to a BGPVPN, with a route of
+ type 'bgpvpn' redistributing routes from network C. For C->A traffic,
+ another Port Association associates the port of VM 2 to a BGPVPN, with
+ a route of type 'bgpvpn' redistributing routes from network A.
+
+ The use of RT in this test is a bit complex, and would be much
+ simpler if we were using a VM with two interfaces.
+
+ We confirm that VM 1 can join VM 3, and we confirm that the traffic
+ actually goes through VM 2, by turning ip_forwaring off then on in VM2.
+ """
+ # create networks A, B and C with their respective subnets
+ self._create_networks_and_subnets(port_security=False)
+
+ RT_to_A = self.RT1
+ RT_from_A = self.RT2
+ RT_to_C = self.RT3
+ RT_from_C = self.RT4
+
+ # Create L3 BGPVPN for network A
+ bgpvpn_a = self._create_l3_bgpvpn(name="test-vpn-a",
+ import_rts=[RT_to_A],
+ export_rts=[RT_from_A])
+
+ # Create L3 BGPVPN for network C
+ bgpvpn_c = self._create_l3_bgpvpn(name="test-vpn-c",
+ import_rts=[RT_to_C],
+ export_rts=[RT_from_C])
+
+ # Create L3 BGPVPN for network B
+ bgpvpn_b = self._create_l3_bgpvpn(name="test-vpn-b",
+ import_rts=[RT_from_C, RT_from_A])
+
+ # BGPVPNs to only export into A and C
+ bgpvpn_to_a = self._create_l3_bgpvpn(name="test-vpn-to-a",
+ export_rts=[RT_to_A])
+ bgpvpn_to_c = self._create_l3_bgpvpn(name="test-vpn-to-c",
+ export_rts=[RT_to_C])
+
+ # create the three VMs
+ self._create_servers([[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1],
+ [self.networks[NET_C], IP_C_S1_1]],
+ port_security=False)
+ vm1, vm2, vm3 = self.servers
+
+ # Create one router for each of network A, B, C and give floating
+ # IPs to servers 1, 2, 3
+ self._create_fip_router(subnet_id=self.subnets[NET_A][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_fip_router(subnet_id=self.subnets[NET_C][0]['id'])
+ self._associate_fip(0)
+ self._associate_fip(1)
+ self._associate_fip(2)
+
+ # disable IP forwarding on VM2
+ self._setup_ip_forwarding(0)
+
+ # connect network A to its BGPVPN
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_a['id'], self.networks[NET_A]['id'])
+
+ # connect network B to its BGPVPN
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_b['id'], self.networks[NET_B]['id'])
+
+ # connect network C to its BGPVPN
+ self.bgpvpn_client.create_network_association(
+ bgpvpn_c['id'], self.networks[NET_C]['id'])
+
+ # create port associations for A->C traffic
+ # (leak routes imported by BGPVPN B -- which happen to include the
+ # routes net C -- into net A)
+ self.bgpvpn_client.create_port_association(
+ bgpvpn_to_a['id'],
+ port_id=self.ports[vm2['id']]['id'],
+ routes=[{'type': 'bgpvpn',
+ 'bgpvpn_id': bgpvpn_b['id']},
+ ])
+
+ # create port associations for C->A traffic
+ # (leak routes imported by BGPVPN B -- which happen to include the
+ # routes from net A -- into net C)
+ body = self.bgpvpn_client.create_port_association(
+ bgpvpn_to_c['id'],
+ port_id=self.ports[vm2['id']]['id'],
+ routes=[{'type': 'bgpvpn',
+ 'bgpvpn_id': bgpvpn_a['id']}])
+
+ port_association = body['port_association']
+
+ # check that we don't have connectivity
+ # (because destination is supposed to be reachable via VM2, which
+ # still has ip_forwarding disabled)
+ self._check_l3_bgpvpn_by_specific_ip(from_server=vm1,
+ to_server_ip=IP_C_S1_1,
+ should_succeed=False)
+
+ # enable IP forwarding on VM2
+ self._setup_ip_forwarding(1)
+
+ # VM1 should now be able to join VM2
+ self._check_l3_bgpvpn_by_specific_ip(from_server=vm1,
+ to_server_ip=IP_C_S1_1,
+ should_succeed=True)
+
+ # remove port association 1
+ self.bgpvpn_client.delete_port_association(self.bgpvpn['id'],
+ port_association['id'])
+
+ # check that connectivity is actually interrupted
+ self._check_l3_bgpvpn_by_specific_ip(from_server=vm1,
+ to_server_ip=IP_C_S1_1,
+ should_succeed=False)
+
+ @decorators.idempotent_id('8478074e-22df-4234-b02b-61257b475b18')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_ping_to_unassociated_net(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Associate network A to a given L3 BGPVPN
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn()
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('b6d219b2-90bb-431f-a566-bf6a780d1578')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_disjoint_import_export(self):
+ """This test checks basic BGPVPN.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create invalid L3 BGPVPN with eRT<>iRT that is insufficient
+ for proper connectivity between network A and B
+ 3. Associate network A and B to a given L3 BGPVPN
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn(rts=[], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._associate_all_nets_to_bgpvpn()
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('dc92643f-1b2c-4a3e-bc5e-ea780d721ef7')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_delete_bgpvpn(self):
+ """This test checks BGPVPN delete.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Associate network A and network B to a given L3 BGPVPN
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Delete L3 BGPVPN
+ 10. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn()
+ self._associate_all_nets_to_bgpvpn()
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self.delete_bgpvpn(self.bgpvpn_admin_client, self.bgpvpn)
+ self._check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('2e6bf893-1410-4ef6-9948-1877f3a8f3d1')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_delete_net_association(self):
+ """This test checks BGPVPN net association delete.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN
+ 3. Associate network A and network B to a given L3 BGPVPN
+ 4. Start up server 1 in network A
+ 5. Start up server 2 in network B
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Delete association of network A
+ 10. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn()
+ body = self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ assoc_b = body['network_association']
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_B]['id'])
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self.bgpvpn_admin_client.delete_network_association(self.bgpvpn['id'],
+ assoc_b['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('9ebf4342-4448-4d63-98f9-44d3a606b6cf')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_delete_router_association(self):
+ """This test checks BGPVPN router association delete.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create router and connect it to network B
+ 3. Create L3 BGPVPN
+ 4. Associate network A to a given L3 BGPVPN
+ 5. Associate router B to a given L3 BGPVPN
+ 6. Start up server 1 in network A
+ 7. Start up server 2 in network B
+ 8. Create router and connect it to network A
+ 9. Give a FIP to server 1
+ 10. Check that server 1 can ping server 2
+ 11. Delete association of router B
+ 12. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ router_b = self._create_fip_router(
+ subnet_id=self.subnets[NET_B][0]['id'])
+ self._create_l3_bgpvpn()
+ self.bgpvpn_client.create_network_association(
+ self.bgpvpn['id'], self.networks[NET_A]['id'])
+ body = self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
+ router_b['id'])
+ assoc_b = body['router_association']
+ self._create_servers()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self.bgpvpn_admin_client.delete_router_association(self.bgpvpn['id'],
+ assoc_b['id'])
+ self._check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('be4471b3-5f57-4022-b719-b45a673a728b')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_update_to_disjoint_import_export(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN with only RT defined
+ 3. Start up server 1 in network A
+ 4. Start up server 2 in network B
+ 5. Associate network A to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Update L3 BGPVPN to have eRT<>iRT and no RT what is insufficient
+ for proper connectivity between network A and B
+ 10. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._create_servers()
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self._update_l3_bgpvpn(rts=[], import_rts=[self.RT1],
+ export_rts=[self.RT2])
+ self._check_l3_bgpvpn(should_succeed=False)
+
+ @decorators.idempotent_id('fb37a546-7263-4ffe-a42c-77eca377ff1a')
+ @utils.services('compute', 'network')
+ def test_bgpvpn_negative_update_to_empty_rt_list(self):
+ """This test checks basic BGPVPN route targets update.
+
+ 1. Create networks A and B with their respective subnets
+ 2. Create L3 BGPVPN with only RT defined
+ 3. Start up server 1 in network A
+ 4. Start up server 2 in network B
+ 5. Associate network A and B to a given L3 BGPVPN
+ 6. Create router and connect it to network A
+ 7. Give a FIP to server 1
+ 8. Check that server 1 can ping server 2
+ 9. Update L3 BGPVPN to empty RT list what is insufficient
+ for proper connectivity between network A and B
+ 10. Check that server 1 cannot ping server 2
+ """
+ self._create_networks_and_subnets()
+ self._create_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
+ self._create_servers()
+ self._associate_all_nets_to_bgpvpn()
+ self._associate_fip_and_check_l3_bgpvpn()
+ self._update_l3_bgpvpn(rts=[], import_rts=[], export_rts=[])
+ self._check_l3_bgpvpn(should_succeed=False)
+
+ def _create_security_group_for_test(self):
+ self.security_group = self._create_security_group(
+ tenant_id=self.bgpvpn_client.tenant_id)
+
+ def _create_networks_and_subnets(self, names=None, subnet_cidrs=None,
+ port_security=True):
+ if not names:
+ names = [NET_A, NET_B, NET_C]
+ if not subnet_cidrs:
+ subnet_cidrs = [[NET_A_S1], [NET_B_S1], [NET_C_S1]]
+ for (name, subnet_cidrs) in zip(names, subnet_cidrs):
+ network = self._create_network(
+ namestart=name, port_security_enabled=port_security)
+ self.networks[name] = network
+ self.subnets[name] = []
+ for (j, cidr) in enumerate(subnet_cidrs):
+ sub_name = "subnet-%s-%d" % (name, j + 1)
+ subnet = self._create_subnet_with_cidr(network,
+ namestart=sub_name,
+ cidr=cidr,
+ ip_version=4)
+ self.subnets[name].append(subnet)
+
+ def _create_subnet_with_cidr(self, network, subnets_client=None,
+ namestart='subnet-smoke', **kwargs):
+ if not subnets_client:
+ subnets_client = self.subnets_client
+ tenant_cidr = kwargs.get('cidr')
+ subnet = dict(
+ name=data_utils.rand_name(namestart),
+ network_id=network['id'],
+ tenant_id=network['tenant_id'],
+ **kwargs)
+ result = subnets_client.create_subnet(**subnet)
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+ subnet = result['subnet']
+ self.assertEqual(subnet['cidr'], tenant_cidr)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ subnets_client.delete_subnet, subnet['id'])
+ return subnet
+
+ def _create_fip_router(self, client=None, public_network_id=None,
+ subnet_id=None):
+ router = self._create_router(client, namestart='router-')
+ router_id = router['id']
+ if public_network_id is None:
+ public_network_id = CONF.network.public_network_id
+ if client is None:
+ client = self.routers_client
+ kwargs = {'external_gateway_info': {'network_id': public_network_id}}
+ router = client.update_router(router_id, **kwargs)['router']
+ if subnet_id is not None:
+ client.add_router_interface(router_id, subnet_id=subnet_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.remove_router_interface, router_id,
+ subnet_id=subnet_id)
+ return router
+
+ def _associate_fip(self, server_index):
+ server = self.servers[server_index]
+ fip = self.create_floating_ip(
+ server, external_network_id=CONF.network.public_network_id,
+ port_id=self.ports[server['id']]['id'])
+ self.server_fips[server['id']] = fip
+ return fip
+
+ def _create_router_and_associate_fip(self, server_index, subnet):
+ router = self._create_fip_router(subnet_id=subnet['id'])
+ self._associate_fip(server_index)
+ return router
+
+ def _create_server(self, name, keypair, network, ip_address,
+ security_group_ids, clients, port_security):
+ security_groups = []
+ if port_security:
+ security_groups = security_group_ids
+ create_port_body = {'fixed_ips': [{'ip_address': ip_address}],
+ 'namestart': 'port-smoke',
+ 'security_groups': security_groups}
+ port = self._create_port(network_id=network['id'],
+ client=clients.ports_client,
+ **create_port_body)
+ create_server_kwargs = {
+ 'key_name': keypair['name'],
+ 'networks': [{'uuid': network['id'], 'port': port['id']}]
+ }
+ body, servers = compute.create_test_server(
+ clients, wait_until='ACTIVE', name=name, **create_server_kwargs)
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
+ server = clients.servers_client.show_server(body['id'])['server']
+ LOG.debug('Created server: %s with status: %s', server['id'],
+ server['status'])
+ self.ports[server['id']] = port
+ return server
+
+ def _create_servers(self, ports_config=None, port_security=True):
+ keypair = self.create_keypair()
+ security_group_ids = [self.security_group['id']]
+ if ports_config is None:
+ ports_config = [[self.networks[NET_A], IP_A_S1_1],
+ [self.networks[NET_B], IP_B_S1_1]]
+ for (i, port_config) in enumerate(ports_config):
+ network = port_config[0]
+ server = self._create_server(
+ 'server-' + str(i + 1), keypair, network, port_config[1],
+ security_group_ids, self.os_primary, port_security)
+ self.servers.append(server)
+ self.servers_keypairs[server['id']] = keypair
+ self.server_fixed_ips[server['id']] = (
+ server['addresses'][network['name']][0]['addr'])
+ self.assertTrue(self.servers_keypairs)
+
+ def _create_l3_bgpvpn(self, name='test-l3-bgpvpn', rts=None,
+ import_rts=None, export_rts=None):
+ if rts is None and import_rts is None and export_rts is None:
+ rts = [self.RT1]
+ import_rts = import_rts or []
+ export_rts = export_rts or []
+ self.bgpvpn = self.create_bgpvpn(
+ self.bgpvpn_admin_client, tenant_id=self.bgpvpn_client.tenant_id,
+ name=name, route_targets=rts, export_targets=export_rts,
+ import_targets=import_rts)
+ return self.bgpvpn
+
+ def _update_l3_bgpvpn(self, rts=None, import_rts=None, export_rts=None,
+ bgpvpn=None):
+ bgpvpn = bgpvpn or self.bgpvpn
+ if rts is None:
+ rts = [self.RT1]
+ import_rts = import_rts or []
+ export_rts = export_rts or []
+ LOG.debug('Updating targets in BGPVPN %s', bgpvpn['id'])
+ self.bgpvpn_admin_client.update_bgpvpn(bgpvpn['id'],
+ route_targets=rts,
+ export_targets=export_rts,
+ import_targets=import_rts)
+
+ def _associate_all_nets_to_bgpvpn(self, bgpvpn=None):
+ bgpvpn = bgpvpn or self.bgpvpn
+ for network in self.networks.values():
+ self.bgpvpn_client.create_network_association(
+ bgpvpn['id'], network['id'])
+ LOG.debug('BGPVPN network associations completed')
+
+ def _setup_ssh_client(self, server):
+ server_fip = self.server_fips[server['id']][
+ 'floating_ip_address']
+ private_key = self.servers_keypairs[server['id']][
+ 'private_key']
+ ssh_client = self.get_remote_client(server_fip,
+ private_key=private_key)
+ return ssh_client
+
+ def _setup_http_server(self, server_index):
+ server = self.servers[server_index]
+ ssh_client = self._setup_ssh_client(server)
+ ssh_client.exec_command("sudo nc -kl -p 80 -e echo '%s:%s' &"
+ % (server['name'], server['id']))
+
+ def _setup_ip_forwarding(self, server_index):
+ server = self.servers[server_index]
+ ssh_client = self._setup_ssh_client(server)
+ ssh_client.exec_command("sudo sysctl -w net.ipv4.ip_forward=1")
+
+ def _setup_ip_address(self, server_index, cidr, device=None):
+ self._setup_range_ip_address(server_index, [cidr], device=None)
+
+ def _setup_range_ip_address(self, server_index, cidrs, device=None):
+ MAX_CIDRS = 50
+ if device is None:
+ device = 'lo'
+ server = self.servers[server_index]
+ ssh_client = self._setup_ssh_client(server)
+ for i in range(0, len(cidrs), MAX_CIDRS):
+ ips = ' '.join(cidrs[i:i + MAX_CIDRS])
+ ssh_client.exec_command(
+ ("for ip in {ips}; do sudo ip addr add $ip "
+ "dev {dev}; done").format(ips=ips, dev=device))
+
+ def _check_l3_bgpvpn(self, from_server=None, to_server=None,
+ should_succeed=True, validate_server=False):
+ to_server = to_server or self.servers[1]
+ destination_srv = None
+ if validate_server:
+ destination_srv = '%s:%s' % (to_server['name'], to_server['id'])
+ destination_ip = self.server_fixed_ips[to_server['id']]
+ self._check_l3_bgpvpn_by_specific_ip(from_server=from_server,
+ to_server_ip=destination_ip,
+ should_succeed=should_succeed,
+ validate_server=destination_srv)
+
+ def _check_l3_bgpvpn_by_specific_ip(self, from_server=None,
+ to_server_ip=None,
+ should_succeed=True,
+ validate_server=None,
+ repeat_validate_server=10):
+ from_server = from_server or self.servers[0]
+ from_server_ip = self.server_fips[from_server['id']][
+ 'floating_ip_address']
+ if to_server_ip is None:
+ to_server_ip = self.server_fixed_ips[self.servers[1]['id']]
+ ssh_client = self._setup_ssh_client(from_server)
+ check_reachable = should_succeed or validate_server
+ msg = ""
+ if check_reachable:
+ msg = "Timed out waiting for {ip} to become reachable".format(
+ ip=to_server_ip)
+ else:
+ msg = ("Unexpected ping response from VM with IP address "
+ "{dest} originated from VM with IP address "
+ "{src}").format(dest=to_server_ip, src=from_server_ip)
+ try:
+ result = self._check_remote_connectivity(ssh_client,
+ to_server_ip,
+ check_reachable)
+ # if a negative connectivity check was unsuccessful (unexpected
+ # ping reply) then try to know more:
+ if not check_reachable and not result:
+ try:
+ content = ssh_client.exec_command(
+ "nc %s 80" % to_server_ip).strip()
+ LOG.warning("Can connect to %s: %s", to_server_ip, content)
+ except Exception:
+ LOG.warning("Could ping %s, but no http", to_server_ip)
+
+ self.assertTrue(result, msg)
+
+ if validate_server and result:
+ # repeating multiple times gives increased odds of avoiding
+ # false positives in the case where the dataplane does
+ # equal-cost multipath
+ for i in range(0, repeat_validate_server):
+ real_dest = ssh_client.exec_command(
+ "nc %s 80" % to_server_ip).strip()
+ result = real_dest == validate_server
+ self.assertTrue(
+ should_succeed == result,
+ ("Destination server name is '%s', expected is '%s'" %
+ (real_dest, validate_server)))
+ LOG.info("nc server name check %d successful", i)
+ except Exception:
+ LOG.exception("Error validating connectivity to %s "
+ "from VM with IP address %s: %s",
+ to_server_ip, from_server_ip, msg)
+ raise
+
+ def _associate_fip_and_check_l3_bgpvpn(self, should_succeed=True):
+ subnet = self.subnets[NET_A][0]
+ self.router = self._create_router_and_associate_fip(0, subnet)
+ self._check_l3_bgpvpn(should_succeed=should_succeed)
diff --git a/neutron_tempest_plugin/bgpvpn/services/__init__.py b/neutron_tempest_plugin/bgpvpn/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/services/__init__.py
diff --git a/neutron_tempest_plugin/bgpvpn/services/bgpvpn_client.py b/neutron_tempest_plugin/bgpvpn/services/bgpvpn_client.py
new file mode 100644
index 0000000..dc40d86
--- /dev/null
+++ b/neutron_tempest_plugin/bgpvpn/services/bgpvpn_client.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2015 Ericsson.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+# This is the representation of the bgpvpn
+# client of networking-bgpvpn
+
+BGPVPN_OBJECT_PATH = '/bgpvpn/bgpvpns'
+BGPVPN_RESOURCE_PATH = '/bgpvpn/bgpvpns/%s'
+BGPVPN_NETWORK_ASSOCIATION_PATH = '/bgpvpn/bgpvpns/%s/network_associations'
+BGPVPN_ROUTER_ASSOCIATION_PATH = '/bgpvpn/bgpvpns/%s/router_associations'
+BGPVPN_PORT_ASSOCIATION_PATH = '/bgpvpn/bgpvpns/%s/port_associations'
+
+
+class BgpvpnClient(base.BaseNetworkClient):
+
+ def create_bgpvpn(self, **kwargs):
+ uri = BGPVPN_OBJECT_PATH
+ post_data = {'bgpvpn': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def delete_bgpvpn(self, bgpvpn_id):
+ uri = BGPVPN_RESOURCE_PATH % bgpvpn_id
+ return self.delete_resource(uri)
+
+ def show_bgpvpn(self, bgpvpn_id, **fields):
+ uri = BGPVPN_RESOURCE_PATH % bgpvpn_id
+ return self.show_resource(uri, **fields)
+
+ def list_bgpvpns(self, **filters):
+ uri = BGPVPN_OBJECT_PATH
+ return self.list_resources(uri, **filters)
+
+ def update_bgpvpn(self, bgpvpn_id, **kwargs):
+ uri = BGPVPN_RESOURCE_PATH % bgpvpn_id
+ post_data = {'bgpvpn': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def create_network_association(self, bgpvpn_id, network_id):
+ uri = BGPVPN_NETWORK_ASSOCIATION_PATH % bgpvpn_id
+ post_data = {"network_association":
+ {"network_id": network_id}}
+ return self.create_resource(uri, post_data)
+
+ def delete_network_association(self, bgpvpn_id, association_id):
+ uri_pattern = BGPVPN_NETWORK_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.delete_resource(uri)
+
+ def show_network_association(self, bgpvpn_id, association_id, **fields):
+ uri_pattern = BGPVPN_NETWORK_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.show_resource(uri, **fields)
+
+ def list_network_associations(self, bgpvpn_id, **filters):
+ uri = BGPVPN_NETWORK_ASSOCIATION_PATH % bgpvpn_id
+ return self.list_resources(uri, **filters)
+
+ def create_router_association(self, bgpvpn_id, router_id):
+ uri = BGPVPN_ROUTER_ASSOCIATION_PATH % bgpvpn_id
+ post_data = {"router_association":
+ {"router_id": router_id}}
+ return self.create_resource(uri, post_data)
+
+ def delete_router_association(self, bgpvpn_id, association_id):
+ uri_pattern = BGPVPN_ROUTER_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.delete_resource(uri)
+
+ def show_router_association(self, bgpvpn_id, association_id, **fields):
+ uri_pattern = BGPVPN_ROUTER_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.show_resource(uri, **fields)
+
+ def list_router_associations(self, bgpvpn_id, **filters):
+ uri = BGPVPN_ROUTER_ASSOCIATION_PATH % bgpvpn_id
+ return self.list_resources(uri, **filters)
+
+ def create_port_association(self, bgpvpn_id, **kwargs):
+ uri = BGPVPN_PORT_ASSOCIATION_PATH % bgpvpn_id
+ post_data = {"port_association": kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_port_association(self, bgpvpn_id, association_id, **kwargs):
+ uri_pattern = BGPVPN_PORT_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ post_data = {"port_association": kwargs}
+ return self.update_resource(uri, post_data)
+
+ def delete_port_association(self, bgpvpn_id, association_id):
+ uri_pattern = BGPVPN_PORT_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.delete_resource(uri)
+
+ def show_port_association(self, bgpvpn_id, association_id, **fields):
+ uri_pattern = BGPVPN_PORT_ASSOCIATION_PATH + "/%s"
+ uri = uri_pattern % (bgpvpn_id, association_id)
+ return self.show_resource(uri, **fields)
+
+ def list_port_associations(self, bgpvpn_id, **filters):
+ uri = BGPVPN_PORT_ASSOCIATION_PATH % bgpvpn_id
+ return self.list_resources(uri, **filters)
diff --git a/neutron_tempest_plugin/common/socat.py b/neutron_tempest_plugin/common/socat.py
deleted file mode 100644
index 6bd1fdc..0000000
--- a/neutron_tempest_plugin/common/socat.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2018 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-COMMAND = 'socat'
-
-
-class SocatAddress(object):
-
- def __init__(self, address, args=None, options=None):
- self.address = address
- self.args = args
- self.options = options
-
- @classmethod
- def udp_datagram(cls, host, port, options=None, ip_version=None):
- address = 'UDP{}-DATAGRAM'.format(ip_version or '')
- return cls(address, (host, int(port)), options)
-
- @classmethod
- def udp_recvfrom(cls, port, options=None, ip_version=None):
- address = 'UDP{}-RECVFROM'.format(ip_version or '')
- return cls(address, (int(port),), options)
-
- @classmethod
- def stdio(cls):
- return cls('STDIO')
-
- def __str__(self):
- address = self.address
- if self.args:
- address += ':' + ':'.join(str(a) for a in self.args)
- if self.options:
- address += ',' + ','.join(str(o) for o in self.options)
- return address
-
- def format(self, *args, **kwargs):
- return str(self).format(*args, **kwargs)
-
-
-STDIO = SocatAddress.stdio()
-
-
-class SocatOption(object):
-
- def __init__(self, name, *args):
- self.name = name
- self.args = args
-
- @classmethod
- def bind(cls, host):
- return cls('bind', host)
-
- @classmethod
- def fork(cls):
- return cls('fork')
-
- @classmethod
- def ip_multicast_ttl(cls, ttl):
- return cls('ip-multicast-ttl', int(ttl))
-
- @classmethod
- def ip_multicast_if(cls, interface_address):
- return cls('ip-multicast-if', interface_address)
-
- @classmethod
- def ip_add_membership(cls, multicast_address, interface_address):
- return cls('ip-add-membership', multicast_address, interface_address)
-
- def __str__(self):
- result = self.name
- args = self.args
- if args:
- result += '=' + ':'.join(str(a) for a in args)
- return result
-
-
-class SocatCommand(object):
-
- def __init__(self, source=STDIO, destination=STDIO, command=COMMAND):
- self.source = source
- self.destination = destination
- self.command = command
-
- def __str__(self):
- words = [self.command, self.source, self.destination]
- return ' '.join(str(obj) for obj in words)
-
-
-def socat_command(source=STDIO, destination=STDIO, command=COMMAND):
- command = SocatCommand(source=source, destination=destination,
- command=command)
- return str(command)
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index 3649cb6..bd7a367 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -21,9 +21,18 @@
import functools
import threading
import time
+try:
+ import urlparse
+except ImportError:
+ from urllib import parse as urlparse
import eventlet
+SCHEMA_PORT_MAPPING = {
+ "http": 80,
+ "https": 443,
+}
+
class classproperty(object):
def __init__(self, f):
@@ -102,3 +111,15 @@
bases = (overrider_class, overriden_class)
overriden_class = type(name, bases, {})
return overriden_class
+
+
+def normalize_url(url):
+ """Normalize url without port with schema default port
+
+ """
+ parse_result = urlparse.urlparse(url)
+ (scheme, netloc, url, params, query, fragment) = parse_result
+ port = parse_result.port
+ if scheme in SCHEMA_PORT_MAPPING and not port:
+ netloc = netloc + ":" + str(SCHEMA_PORT_MAPPING[scheme])
+ return urlparse.urlunparse((scheme, netloc, url, params, query, fragment))
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index 05142d8..54dc16e 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -65,6 +65,12 @@
choices=['None', 'linuxbridge', 'ovs', 'sriov'],
help='Agent used for devstack@q-agt.service'),
+ # Multicast tests settings
+ cfg.StrOpt('multicast_group_range',
+ default='224.0.0.120-224.0.0.250',
+ help='Unallocated multi-cast IPv4 range, which will be used to '
+ 'test the multi-cast support.'),
+
# Option for feature to connect via SSH to VMs using an intermediate SSH
# server
cfg.StrOpt('ssh_proxy_jump_host',
@@ -116,6 +122,71 @@
for opt in NeutronPluginOptions:
CONF.register_opt(opt, 'neutron_plugin_options')
+# TODO(slaweq): This config option is added to avoid running bgpvpn tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. Bgpvpn tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards bgpvpn tests will run from
+# neutron-tempest-plugins.
+BgpvpnGroup = [
+ cfg.BoolOpt('run_bgpvpn_tests',
+ default=True,
+ help=("If it is set to False bgpvpn api and scenario tests "
+ "will be skipped")),
+ cfg.IntOpt('min_asn',
+ default=100,
+ help=("Minimum number for the range of "
+ "autonomous system number for distinguishers.")),
+ cfg.IntOpt('min_nn',
+ default=100,
+ help=("Minimum number for the range of "
+ "assigned number for distinguishers.")),
+ cfg.IntOpt('max_asn',
+ default=200,
+ help=("Maximum number for the range of "
+ "autonomous system number for distinguishers.")),
+ cfg.IntOpt('max_nn',
+ default=200,
+ help=("Maximum number for the range of "
+ "assigned number for distinguishers.")),
+]
+
+bgpvpn_group = cfg.OptGroup(name="bgpvpn", title=("Networking-Bgpvpn Service "
+ "Options"))
+CONF.register_group(bgpvpn_group)
+CONF.register_opts(BgpvpnGroup, group="bgpvpn")
+
+# TODO(slaweq): This config option is added to avoid running fwaas tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. Fwaas tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards fwaas tests will run from
+# neutron-tempest-plugins.
+FwaasGroup = [
+ cfg.BoolOpt('run_fwaas_tests',
+ default=True,
+ help=("If it is set to False fwaas api and scenario tests "
+ "will be skipped")),
+]
+
+fwaas_group = cfg.OptGroup(
+ name="fwaas", title=("Neutron-fwaas Service Options"))
+CONF.register_group(fwaas_group)
+CONF.register_opts(FwaasGroup, group="fwaas")
+
+# TODO(slaweq): This config option is added to avoid running SFC tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. SFC tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards SFC tests will run from
+# neutron-tempest-plugins.
+SfcGroup = [
+ cfg.BoolOpt('run_sfc_tests',
+ default=True,
+ help=("If it is set to False SFC api and scenario tests "
+ "will be skipped")),
+]
+
+sfc_group = cfg.OptGroup(name="sfc", title=("Networking-sfc Service Options"))
+CONF.register_group(sfc_group)
+CONF.register_opts(SfcGroup, group="sfc")
config_opts_translator = {
'project_network_cidr': 'tenant_network_cidr',
diff --git a/neutron_tempest_plugin/fwaas/__init__.py b/neutron_tempest_plugin/fwaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/__init__.py
diff --git a/neutron_tempest_plugin/fwaas/api/__init__.py b/neutron_tempest_plugin/fwaas/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/api/__init__.py
diff --git a/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py b/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py
new file mode 100644
index 0000000..f4f63ec
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py
@@ -0,0 +1,33 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest import config
+
+from neutron_tempest_plugin.fwaas.common import fwaas_v2_client
+
+CONF = config.CONF
+
+
+class BaseFWaaSTest(fwaas_v2_client.FWaaSClientMixin, base.BaseNetworkTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseFWaaSTest, cls).skip_checks()
+ msg = None
+ if not CONF.fwaas.run_fwaas_tests:
+ msg = ("Running of fwaas related tests is disabled in "
+ "plugin configuration.")
+ if msg:
+ raise cls.skipException(msg)
diff --git a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
new file mode 100644
index 0000000..f085e6d
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
@@ -0,0 +1,358 @@
+# Copyright 2016
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+import six
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.fwaas.api import fwaas_v2_base as v2_base
+
+
+CONF = config.CONF
+DEFAULT_FWG = 'default'
+
+
+class FWaaSv2ExtensionTestJSON(v2_base.BaseFWaaSTest):
+
+ """List of tests
+
+ Tests the following operations in the Neutron API using the REST client
+ for Neutron:
+
+ List firewall rules
+ Create firewall rule
+ Update firewall rule
+ Delete firewall rule
+ Show firewall rule
+ List firewall policies
+ Create firewall policy
+ Update firewall policy
+ Insert firewall rule to policy
+ Remove firewall rule from policy
+ Insert firewall rule after/before rule in policy
+ Update firewall policy audited attribute
+ Delete firewall policy
+ Show firewall policy
+ List firewall group
+ Create firewall group
+ Update firewall group
+ Delete firewall group
+ Show firewall group
+ """
+
+ @classmethod
+ def resource_setup(cls):
+ super(FWaaSv2ExtensionTestJSON, cls).resource_setup()
+ if not utils.is_extension_enabled('fwaas_v2', 'network'):
+ msg = "FWaaS v2 Extension not enabled."
+ raise cls.skipException(msg)
+
+ def setUp(self):
+ super(FWaaSv2ExtensionTestJSON, self).setUp()
+ self.fw_rule_1 = self.create_firewall_rule(action="allow",
+ protocol="tcp")
+ self.fw_rule_2 = self.create_firewall_rule(action="deny",
+ protocol="udp")
+ self.fw_policy_1 = self.create_firewall_policy(
+ firewall_rules=[self.fw_rule_1['id']])
+ self.fw_policy_2 = self.create_firewall_policy(
+ firewall_rules=[self.fw_rule_2['id']])
+
+ def _create_router_interfaces(self):
+ network_1 = self.create_network()
+ network_2 = self.create_network()
+
+ cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ mask_bits = CONF.network.project_network_mask_bits
+
+ subnet_cidr_1 = list(cidr.subnet(mask_bits))[-1]
+ subnet_cidr_2 = list(cidr.subnet(mask_bits))[-2]
+ subnet_1 = self.create_subnet(network_1, cidr=subnet_cidr_1,
+ mask_bits=mask_bits)
+ subnet_2 = self.create_subnet(network_2, cidr=subnet_cidr_2,
+ mask_bits=mask_bits)
+
+ router = self.create_router(
+ data_utils.rand_name('router-'),
+ admin_state_up=True)
+ self.addCleanup(self._try_delete_router, router)
+
+ intf_1 = self.routers_client.add_router_interface(router['id'],
+ subnet_id=subnet_1['id'])
+ intf_2 = self.routers_client.add_router_interface(router['id'],
+ subnet_id=subnet_2['id'])
+
+ return intf_1, intf_2
+
+ def _try_delete_router(self, router):
+ # delete router, if it exists
+ try:
+ self.delete_router(router)
+ # if router is not found, this means it was deleted in the test
+ except lib_exc.NotFound:
+ pass
+
+ def _try_delete_policy(self, policy_id):
+ # delete policy, if it exists
+ try:
+ self.firewall_policies_client.delete_firewall_policy(policy_id)
+ # if policy is not found, this means it was deleted in the test
+ except lib_exc.NotFound:
+ pass
+
+ def _try_delete_rule(self, rule_id):
+ # delete rule, if it exists
+ try:
+ self.firewall_rules_client.delete_firewall_rule(rule_id)
+ # if rule is not found, this means it was deleted in the test
+ except lib_exc.NotFound:
+ pass
+
+ def _try_delete_firewall_group(self, fwg_id):
+ # delete firewall group, if it exists
+ try:
+ self.firewall_groups_client.delete_firewall_group(fwg_id)
+ # if firewall group is not found, this means it was deleted in the test
+ except lib_exc.NotFound:
+ pass
+
+ self.firewall_groups_client.wait_for_resource_deletion(fwg_id)
+
+ def _wait_until_ready(self, fwg_id):
+ target_states = ('ACTIVE', 'CREATED')
+
+ def _wait():
+ firewall_group = self.firewall_groups_client.show_firewall_group(
+ fwg_id)
+ firewall_group = firewall_group['firewall_group']
+ return firewall_group['status'] in target_states
+
+ if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = ("Timed out waiting for firewall_group %s to reach %s "
+ "state(s)" %
+ (fwg_id, target_states))
+ raise lib_exc.TimeoutException(m)
+
+ def _wait_until_deleted(self, fwg_id):
+ def _wait():
+ try:
+ fwg = self.firewall_groups_client.show_firewall_group(fwg_id)
+ except lib_exc.NotFound:
+ return True
+
+ fwg_status = fwg['firewall_group']['status']
+ if fwg_status == 'ERROR':
+ raise lib_exc.DeleteErrorException(resource_id=fwg_id)
+
+ if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = ("Timed out waiting for firewall_group %s deleted" % fwg_id)
+ raise lib_exc.TimeoutException(m)
+
+ @decorators.idempotent_id('ddccfa87-4af7-48a6-9e50-0bd0ad1348cb')
+ def test_list_firewall_rules(self):
+ # List firewall rules
+ fw_rules = self.firewall_rules_client.list_firewall_rules()
+ fw_rules = fw_rules['firewall_rules']
+ self.assertIn((self.fw_rule_1['id'],
+ self.fw_rule_1['name'],
+ self.fw_rule_1['action'],
+ self.fw_rule_1['protocol'],
+ self.fw_rule_1['ip_version'],
+ self.fw_rule_1['enabled']),
+ [(m['id'],
+ m['name'],
+ m['action'],
+ m['protocol'],
+ m['ip_version'],
+ m['enabled']) for m in fw_rules])
+
+ @decorators.idempotent_id('ffc009fa-cd17-4029-8025-c4b81a7dd923')
+ def test_create_update_delete_firewall_rule(self):
+ # Create firewall rule
+ body = self.firewall_rules_client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="tcp")
+ fw_rule_id = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id)
+
+ # Update firewall rule
+ body = self.firewall_rules_client.update_firewall_rule(fw_rule_id,
+ action="deny")
+ self.assertEqual("deny", body["firewall_rule"]['action'])
+
+ # Delete firewall rule
+ self.firewall_rules_client.delete_firewall_rule(fw_rule_id)
+ # Confirm deletion
+ fw_rules = self.firewall_rules_client.list_firewall_rules()
+ self.assertNotIn(fw_rule_id,
+ [m['id'] for m in fw_rules['firewall_rules']])
+
+ @decorators.idempotent_id('76b07afc-444e-4bb9-abec-9b8c5f994dcd')
+ def test_show_firewall_rule(self):
+ # show a created firewall rule
+ fw_rule = self.firewall_rules_client.show_firewall_rule(
+ self.fw_rule_1['id'])
+ for key, value in six.iteritems(fw_rule['firewall_rule']):
+ if key != 'firewall_policy_id':
+ self.assertEqual(self.fw_rule_1[key], value)
+ # This check is placed because we cannot modify policy during
+ # Create/Update of Firewall Rule but we can see the association
+ # of a Firewall Rule with the policies it belongs to.
+
+ @decorators.idempotent_id('f6b83902-746f-4e74-9403-2ec9899583a3')
+ def test_list_firewall_policies(self):
+ fw_policies = self.firewall_policies_client.list_firewall_policies()
+ fw_policies = fw_policies['firewall_policies']
+ self.assertIn((self.fw_policy_1['id'],
+ self.fw_policy_1['name'],
+ self.fw_policy_1['firewall_rules']),
+ [(m['id'],
+ m['name'],
+ m['firewall_rules']) for m in fw_policies])
+
+ @decorators.idempotent_id('6ef9bd02-7349-4d61-8d1f-80479f64d904')
+ def test_create_update_delete_firewall_policy(self):
+ # Create firewall policy
+ body = self.firewall_policies_client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+ # Update firewall policy
+ body = self.firewall_policies_client.update_firewall_policy(
+ fw_policy_id,
+ name="updated_policy")
+ updated_fw_policy = body["firewall_policy"]
+ self.assertEqual("updated_policy", updated_fw_policy['name'])
+
+ # Delete firewall policy
+ self.firewall_policies_client.delete_firewall_policy(fw_policy_id)
+ # Confirm deletion
+ fw_policies = self.firewall_policies_client.list_firewall_policies()
+ fw_policies = fw_policies['firewall_policies']
+ self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
+
+ @decorators.idempotent_id('164381de-61f4-483f-9a5a-48105b8e70e2')
+ def test_show_firewall_policy(self):
+ # show a created firewall policy
+ fw_policy = self.firewall_policies_client.show_firewall_policy(
+ self.fw_policy_1['id'])
+ fw_policy = fw_policy['firewall_policy']
+ for key, value in six.iteritems(fw_policy):
+ self.assertEqual(self.fw_policy_1[key], value)
+
+ @decorators.idempotent_id('48dfcd75-3924-479d-bb65-b3ed33397663')
+ def test_create_show_delete_firewall_group(self):
+ # create router and add interfaces
+ intf_1, intf_2 = self._create_router_interfaces()
+
+ # Create firewall_group
+ body = self.firewall_groups_client.create_firewall_group(
+ name=data_utils.rand_name("firewall_group"),
+ ingress_firewall_policy_id=self.fw_policy_1['id'],
+ egress_firewall_policy_id=self.fw_policy_2['id'],
+ ports=[intf_1['port_id'], intf_2['port_id']])
+ created_firewall_group = body['firewall_group']
+ fwg_id = created_firewall_group['id']
+
+ # Wait for the firewall resource to become ready
+ self._wait_until_ready(fwg_id)
+
+ # show created firewall_group
+ firewall_group = self.firewall_groups_client.show_firewall_group(
+ fwg_id)
+ fwg = firewall_group['firewall_group']
+
+ for key, value in six.iteritems(fwg):
+ if key == 'status':
+ continue
+ self.assertEqual(created_firewall_group[key], value)
+
+ # list firewall_groups
+ firewall_groups = self.firewall_groups_client.list_firewall_groups()
+ fwgs = firewall_groups['firewall_groups']
+ self.assertIn((created_firewall_group['id'],
+ created_firewall_group['name'],
+ created_firewall_group['ingress_firewall_policy_id'],
+ created_firewall_group['egress_firewall_policy_id']),
+ [(m['id'],
+ m['name'],
+ m['ingress_firewall_policy_id'],
+ m['egress_firewall_policy_id']) for m in fwgs])
+
+ # Disassociate all port with this firewall group
+ self.firewall_groups_client.update_firewall_group(fwg_id, ports=[])
+ # Delete firewall_group
+ self.firewall_groups_client.delete_firewall_group(fwg_id)
+
+ # Wait for the firewall group to be deleted
+ self._wait_until_deleted(fwg_id)
+
+ # Confirm deletion
+ firewall_groups = self.firewall_groups_client.list_firewall_groups()
+ fwgs = firewall_groups['firewall_groups']
+ self.assertNotIn(fwg_id, [m['id'] for m in fwgs])
+
+ @decorators.idempotent_id('e021baab-d4f7-4bad-b382-bde4946e0e0b')
+ def test_update_firewall_group(self):
+ # create router and add interfaces
+ intf_1, intf_2 = self._create_router_interfaces()
+
+ # Create firewall_group
+ body = self.firewall_groups_client.create_firewall_group(
+ name=data_utils.rand_name("firewall_group"),
+ ingress_firewall_policy_id=self.fw_policy_1['id'],
+ egress_firewall_policy_id=self.fw_policy_2['id'],
+ ports=[intf_1['port_id']])
+ created_firewall_group = body['firewall_group']
+ fwg_id = created_firewall_group['id']
+ self.addCleanup(self._try_delete_firewall_group, fwg_id)
+
+ # Wait for the firewall resource to become ready
+ self._wait_until_ready(fwg_id)
+
+ # Update firewall group
+ body = self.firewall_groups_client.update_firewall_group(
+ fwg_id,
+ ports=[intf_2['port_id']])
+ updated_fwg = body["firewall_group"]
+ self.assertEqual([intf_2['port_id']], updated_fwg['ports'])
+
+ # Delete firewall_group
+ self.firewall_groups_client.delete_firewall_group(fwg_id)
+
+ @decorators.idempotent_id('a1f524d8-5336-4769-aa7b-0830bb4df6c8')
+ def test_error_on_create_firewall_group_name_default(self):
+ try:
+ # Create firewall_group with name == reserved default fwg
+ self.firewall_groups_client.create_firewall_group(
+ name=DEFAULT_FWG)
+ except lib_exc.Conflict:
+ pass
+
+ @decorators.idempotent_id('fd24db16-c8cb-4cb4-ba60-b0cd18a66b83')
+ def test_default_fwg_created_on_list_firewall_groups(self):
+ fw_groups = self.firewall_groups_client.list_firewall_groups()
+ fw_groups = fw_groups['firewall_groups']
+ self.assertIn(DEFAULT_FWG,
+ [g['name'] for g in fw_groups])
diff --git a/neutron_tempest_plugin/fwaas/common/__init__.py b/neutron_tempest_plugin/fwaas/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/common/__init__.py
diff --git a/neutron_tempest_plugin/fwaas/common/fwaas_v2_client.py b/neutron_tempest_plugin/fwaas/common/fwaas_v2_client.py
new file mode 100644
index 0000000..767afc0
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/common/fwaas_v2_client.py
@@ -0,0 +1,162 @@
+# Copyright (c) 2015 Midokura SARL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from neutron_lib import constants as nl_constants
+from tempest import config
+from tempest import exceptions
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.fwaas.services import v2_client
+
+
+CONF = config.CONF
+
+
+class FWaaSClientMixin(object):
+
+ @classmethod
+ def resource_setup(cls):
+ super(FWaaSClientMixin, cls).resource_setup()
+ manager = cls.os_primary
+ cls.firewall_groups_client = v2_client.FirewallGroupsClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params)
+ cls.firewall_policies_client = v2_client.FirewallPoliciesClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params)
+ cls.firewall_rules_client = v2_client.FirewallRulesClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params)
+
+ def create_firewall_rule(self, **kwargs):
+ body = self.firewall_rules_client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ **kwargs)
+ fw_rule = body['firewall_rule']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.firewall_rules_client.delete_firewall_rule,
+ fw_rule['id'])
+ return fw_rule
+
+ def create_firewall_policy(self, **kwargs):
+ body = self.firewall_policies_client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"),
+ **kwargs)
+ fw_policy = body['firewall_policy']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.firewall_policies_client.delete_firewall_policy,
+ fw_policy['id'])
+ return fw_policy
+
+ def create_firewall_group(self, **kwargs):
+ body = self.firewall_groups_client.create_firewall_group(
+ name=data_utils.rand_name("fwg"),
+ **kwargs)
+ fwg = body['firewall_group']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.delete_firewall_group_and_wait,
+ fwg['id'])
+ return fwg
+
+ def delete_firewall_group_and_wait(self, firewall_group_id):
+ self.firewall_groups_client.delete_firewall_group(firewall_group_id)
+ self._wait_firewall_group_while(firewall_group_id,
+ [nl_constants.PENDING_DELETE],
+ not_found_ok=True)
+
+ def insert_firewall_rule_in_policy_and_wait(self,
+ firewall_group_id,
+ firewall_policy_id,
+ firewall_rule_id, **kwargs):
+ self.firewall_policies_client.insert_firewall_rule_in_policy(
+ firewall_policy_id=firewall_policy_id,
+ firewall_rule_id=firewall_rule_id,
+ **kwargs)
+ self.addCleanup(
+ self._call_and_ignore_exceptions,
+ (lib_exc.NotFound, lib_exc.BadRequest),
+ self.remove_firewall_rule_from_policy_and_wait,
+ firewall_group_id=firewall_group_id,
+ firewall_policy_id=firewall_policy_id,
+ firewall_rule_id=firewall_rule_id)
+ self._wait_firewall_group_ready(firewall_group_id)
+
+ def remove_firewall_rule_from_policy_and_wait(self,
+ firewall_group_id,
+ firewall_policy_id,
+ firewall_rule_id):
+ self.firewall_policies_client.remove_firewall_rule_from_policy(
+ firewall_policy_id=firewall_policy_id,
+ firewall_rule_id=firewall_rule_id)
+ self._wait_firewall_group_ready(firewall_group_id)
+
+ @staticmethod
+ def _call_and_ignore_exceptions(exc_list, func, *args, **kwargs):
+ """Call the given function and pass if a given exception is raised."""
+
+ try:
+ return func(*args, **kwargs)
+ except exc_list:
+ pass
+
+ def _wait_firewall_group_ready(self, firewall_group_id):
+ self._wait_firewall_group_while(firewall_group_id,
+ [nl_constants.PENDING_CREATE,
+ nl_constants.PENDING_UPDATE])
+
+ def _wait_firewall_group_while(self, firewall_group_id, statuses,
+ not_found_ok=False):
+ start = int(time.time())
+ if not_found_ok:
+ expected_exceptions = (lib_exc.NotFound)
+ else:
+ expected_exceptions = ()
+ while True:
+ try:
+ fwg = self.firewall_groups_client.show_firewall_group(
+ firewall_group_id)
+ except expected_exceptions:
+ break
+ status = fwg['firewall_group']['status']
+ if status not in statuses:
+ break
+ if (int(time.time()) - start >=
+ self.firewall_groups_client.build_timeout):
+ msg = ("Firewall Group %(firewall_group)s failed to reach "
+ "non PENDING status (current %(status)s)") % {
+ "firewall_group": firewall_group_id,
+ "status": status,
+ }
+ raise exceptions.TimeoutException(msg)
+ time.sleep(1)
diff --git a/neutron_tempest_plugin/fwaas/scenario/__init__.py b/neutron_tempest_plugin/fwaas/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/scenario/__init__.py
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
new file mode 100644
index 0000000..00cdf2c
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2015 Midokura SARL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.lib.common import ssh
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.fwaas.common import fwaas_v2_client
+from neutron_tempest_plugin.fwaas.scenario import fwaas_v2_manager as manager
+
+CONF = config.CONF
+
+
+class FWaaSScenarioTestBase(object):
+ def check_connectivity(self, ip_address, username=None, private_key=None,
+ should_connect=True,
+ check_icmp=True, check_ssh=True,
+ check_reverse_icmp_ip=None,
+ should_reverse_connect=True):
+ if should_connect:
+ msg = "Timed out waiting for %s to become reachable" % ip_address
+ else:
+ msg = "ip address %s is reachable" % ip_address
+ if check_icmp:
+ ok = self.ping_ip_address(ip_address,
+ should_succeed=should_connect)
+ self.assertTrue(ok, msg=msg)
+ if check_ssh:
+ connect_timeout = CONF.validation.connect_timeout
+ kwargs = {}
+ if not should_connect:
+ # Use a shorter timeout for negative case
+ kwargs['timeout'] = 1
+ try:
+ client = ssh.Client(ip_address, username, pkey=private_key,
+ channel_timeout=connect_timeout,
+ **kwargs)
+ client.test_connection_auth()
+ self.assertTrue(should_connect, "Unexpectedly reachable")
+ if check_reverse_icmp_ip:
+ cmd = 'ping -c1 -w1 %s' % check_reverse_icmp_ip
+ try:
+ client.exec_command(cmd)
+ self.assertTrue(should_reverse_connect,
+ "Unexpectedly reachable (reverse)")
+ except lib_exc.SSHExecCommandFailed:
+ if should_reverse_connect:
+ raise
+ except lib_exc.SSHTimeout:
+ if should_connect:
+ raise
+
+
+class FWaaSScenarioTest_V2(fwaas_v2_client.FWaaSClientMixin,
+ FWaaSScenarioTestBase,
+ manager.NetworkScenarioTest):
+ pass
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
new file mode 100644
index 0000000..5ead2a7
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -0,0 +1,877 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+
+import netaddr
+from oslo_log import log
+from oslo_utils import netutils
+
+from tempest.common import compute
+from tempest.common.utils.linux import remote_client
+from tempest.common.utils import net_utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+import tempest.test
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class ScenarioTest(tempest.test.BaseTestCase):
+ """Base class for scenario tests. Uses tempest own clients. """
+
+ credentials = ['primary']
+
+ @classmethod
+ def skip_checks(cls):
+ super(ScenarioTest, cls).skip_checks()
+ msg = None
+ if not CONF.fwaas.run_fwaas_tests:
+ msg = ("Running of fwaas related tests is disabled in "
+ "plugin configuration.")
+ if msg:
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setup_clients(cls):
+ super(ScenarioTest, cls).setup_clients()
+ # Clients (in alphabetical order)
+ cls.keypairs_client = cls.os_primary.keypairs_client
+ cls.servers_client = cls.os_primary.servers_client
+ # Neutron network client
+ cls.networks_client = cls.os_primary.networks_client
+ cls.ports_client = cls.os_primary.ports_client
+ cls.routers_client = cls.os_primary.routers_client
+ cls.subnets_client = cls.os_primary.subnets_client
+ cls.floating_ips_client = cls.os_primary.floating_ips_client
+ cls.security_groups_client = cls.os_primary.security_groups_client
+ cls.security_group_rules_client = (
+ cls.os_primary.security_group_rules_client)
+
+ # Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
+
+ def _create_port(self, network_id, client=None, namestart='port-quotatest',
+ **kwargs):
+ if not client:
+ client = self.ports_client
+ name = data_utils.rand_name(namestart)
+ result = client.create_port(
+ name=name,
+ network_id=network_id,
+ **kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
+ port = result['port']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_port, port['id'])
+ return port
+
+ def create_keypair(self, client=None):
+ if not client:
+ client = self.keypairs_client
+ name = data_utils.rand_name(self.__class__.__name__)
+ # We don't need to create a keypair by pubkey in scenario
+ body = client.create_keypair(name=name)
+ self.addCleanup(client.delete_keypair, name)
+ return body['keypair']
+
+ def create_server(self, name=None, image_id=None, flavor=None,
+ validatable=False, wait_until='ACTIVE',
+ clients=None, **kwargs):
+ """Wrapper utility that returns a test server.
+
+ This wrapper utility calls the common create test server and
+ returns a test server. The purpose of this wrapper is to minimize
+ the impact on the code of the tests already using this
+ function.
+ """
+
+ # NOTE(jlanoux): As a first step, ssh checks in the scenario
+ # tests need to be run regardless of the run_validation and
+ # validatable parameters and thus until the ssh validation job
+ # becomes voting in CI. The test resources management and IP
+ # association are taken care of in the scenario tests.
+ # Therefore, the validatable parameter is set to false in all
+ # those tests. In this way create_server just return a standard
+ # server and the scenario tests always perform ssh checks.
+
+ # Needed for the cross_tenant_traffic test:
+ if clients is None:
+ clients = self.os_primary
+
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__ + "-server")
+
+ vnic_type = CONF.network.port_vnic_type
+
+ # If vnic_type is configured create port for
+ # every network
+ if vnic_type:
+ ports = []
+
+ create_port_body = {'binding:vnic_type': vnic_type,
+ 'namestart': 'port-smoke'}
+ if kwargs:
+ # Convert security group names to security group ids
+ # to pass to create_port
+ if 'security_groups' in kwargs:
+ security_groups = \
+ clients.security_groups_client.list_security_groups(
+ ).get('security_groups')
+ sec_dict = dict([(s['name'], s['id'])
+ for s in security_groups])
+
+ sec_groups_names = [s['name'] for s in kwargs.pop(
+ 'security_groups')]
+ security_groups_ids = [sec_dict[s]
+ for s in sec_groups_names]
+
+ if security_groups_ids:
+ create_port_body[
+ 'security_groups'] = security_groups_ids
+ networks = kwargs.pop('networks', [])
+ else:
+ networks = []
+
+ # If there are no networks passed to us we look up
+ # for the project's private networks and create a port.
+ # The same behaviour as we would expect when passing
+ # the call to the clients with no networks
+ if not networks:
+ networks = clients.networks_client.list_networks(
+ **{'router:external': False, 'fields': 'id'})['networks']
+
+ # It's net['uuid'] if networks come from kwargs
+ # and net['id'] if they come from
+ # clients.networks_client.list_networks
+ for net in networks:
+ net_id = net.get('uuid', net.get('id'))
+ if 'port' not in net:
+ port = self._create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
+ ports.append({'port': port['id']})
+ else:
+ ports.append({'port': net['port']})
+ if ports:
+ kwargs['networks'] = ports
+ self.ports = ports
+
+ tenant_network = self.get_tenant_network()
+
+ body, servers = compute.create_test_server(
+ clients,
+ tenant_network=tenant_network,
+ wait_until=wait_until,
+ name=name, flavor=flavor,
+ image_id=image_id, **kwargs)
+
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
+ server = clients.servers_client.show_server(body['id'])['server']
+ return server
+
+ def get_remote_client(self, ip_address, username=None, private_key=None):
+ """Get a SSH client to a remote server
+
+ @param ip_address the server floating or fixed IP address to use
+ for ssh validation
+ @param username name of the Linux account on the remote server
+ @param private_key the SSH private key to use
+ @return a RemoteClient object
+ """
+
+ if username is None:
+ username = CONF.validation.image_ssh_user
+ # Set this with 'keypair' or others to log in with keypair or
+ # username/password.
+ if CONF.validation.auth_method == 'keypair':
+ password = None
+ if private_key is None:
+ private_key = self.keypair['private_key']
+ else:
+ password = CONF.validation.image_ssh_password
+ private_key = None
+ linux_client = remote_client.RemoteClient(ip_address, username,
+ pkey=private_key,
+ password=password)
+ try:
+ linux_client.validate_authentication()
+ except Exception as e:
+ message = ('Initializing SSH connection to %(ip)s failed. '
+ 'Error: %(error)s' % {'ip': ip_address,
+ 'error': e})
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ LOG.exception(message)
+ self._log_console_output()
+ raise
+
+ return linux_client
+
+ def _log_console_output(self, servers=None):
+ if not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot log')
+ return
+ if not servers:
+ servers = self.servers_client.list_servers()
+ servers = servers['servers']
+ for server in servers:
+ try:
+ console_output = self.servers_client.get_console_output(
+ server['id'])['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server['id'], console_output)
+ except lib_exc.NotFound:
+ LOG.debug("Server %s disappeared(deleted) while looking "
+ "for the console log", server['id'])
+
+ def _log_net_info(self, exc):
+ # network debug is called as part of ssh init
+ if not isinstance(exc, lib_exc.SSHTimeout):
+ LOG.debug('Network information on a devstack host')
+
+ def ping_ip_address(self, ip_address, should_succeed=True,
+ ping_timeout=None, mtu=None):
+ timeout = ping_timeout or CONF.validation.ping_timeout
+ cmd = ['ping', '-c1', '-w1']
+
+ if mtu:
+ cmd += [
+ # don't fragment
+ '-M', 'do',
+ # ping receives just the size of ICMP payload
+ '-s', str(net_utils.get_ping_payload_size(mtu, 4))
+ ]
+ cmd.append(ip_address)
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.communicate()
+
+ return (proc.returncode == 0) == should_succeed
+
+ caller = test_utils.find_test_caller()
+ LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
+ ' expected result is %(should_succeed)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'should_succeed':
+ 'reachable' if should_succeed else 'unreachable'
+ })
+ result = test_utils.call_until_true(ping, timeout, 1)
+ LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
+ 'ping result is %(result)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'result': 'expected' if result else 'unexpected'
+ })
+ return result
+
+ def check_vm_connectivity(self, ip_address,
+ username=None,
+ private_key=None,
+ should_connect=True,
+ mtu=None):
+ """Check server connectivity
+
+ :param ip_address: server to test against
+ :param username: server's ssh username
+ :param private_key: server's ssh private key to be used
+ :param should_connect: True/False indicates positive/negative test
+ positive - attempt ping and ssh
+ negative - attempt ping and fail if succeed
+ :param mtu: network MTU to use for connectivity validation
+
+ :raises: AssertError if the result of the connectivity check does
+ not match the value of the should_connect param
+ """
+ if should_connect:
+ msg = "Timed out waiting for %s to become reachable" % ip_address
+ else:
+ msg = "ip address %s is reachable" % ip_address
+ self.assertTrue(self.ping_ip_address(ip_address,
+ should_succeed=should_connect,
+ mtu=mtu),
+ msg=msg)
+ if should_connect:
+ # no need to check ssh for negative connectivity
+ self.get_remote_client(ip_address, username, private_key)
+
+ def check_public_network_connectivity(self, ip_address, username,
+ private_key, should_connect=True,
+ msg=None, servers=None, mtu=None):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
+ try:
+ self.check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect,
+ mtu=mtu)
+ except Exception:
+ ex_msg = 'Public network connectivity check failed'
+ if msg:
+ ex_msg += ": " + msg
+ LOG.exception(ex_msg)
+ self._log_console_output(servers)
+ raise
+
+
+class NetworkScenarioTest(ScenarioTest):
+ """Base class for network scenario tests.
+
+ This class provide helpers for network scenario tests, using the neutron
+ API. Helpers from ancestor which use the nova network API are overridden
+ with the neutron API.
+
+ This Class also enforces using Neutron instead of novanetwork.
+ Subclassed tests will be skipped if Neutron is not enabled
+
+ """
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def skip_checks(cls):
+ super(NetworkScenarioTest, cls).skip_checks()
+ if not CONF.service_available.neutron:
+ raise cls.skipException('Neutron not available')
+
+ def _create_network(self, networks_client=None,
+ tenant_id=None,
+ namestart='network-smoke-',
+ port_security_enabled=True):
+ if not networks_client:
+ networks_client = self.networks_client
+ if not tenant_id:
+ tenant_id = networks_client.tenant_id
+ name = data_utils.rand_name(namestart)
+ network_kwargs = dict(name=name, tenant_id=tenant_id)
+ # Neutron disables port security by default so we have to check the
+ # config before trying to create the network with port_security_enabled
+ if CONF.network_feature_enabled.port_security:
+ network_kwargs['port_security_enabled'] = port_security_enabled
+ result = networks_client.create_network(**network_kwargs)
+ network = result['network']
+
+ self.assertEqual(network['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ networks_client.delete_network,
+ network['id'])
+ return network
+
+ def _create_subnet(self, network, subnets_client=None,
+ routers_client=None, namestart='subnet-smoke',
+ **kwargs):
+ """Create a subnet for the given network
+
+ within the cidr block configured for tenant networks.
+ """
+ if not subnets_client:
+ subnets_client = self.subnets_client
+ if not routers_client:
+ routers_client = self.routers_client
+
+ def cidr_in_use(cidr, tenant_id):
+ """Check cidr existence
+
+ :returns: True if subnet with cidr already exist in tenant
+ False else
+ """
+ cidr_in_use = self.os_admin.subnets_client.list_subnets(
+ tenant_id=tenant_id, cidr=cidr)['subnets']
+ return len(cidr_in_use) != 0
+
+ ip_version = kwargs.pop('ip_version', 4)
+
+ if ip_version == 6:
+ tenant_cidr = netaddr.IPNetwork(
+ CONF.network.project_network_v6_cidr)
+ num_bits = CONF.network.project_network_v6_mask_bits
+ else:
+ tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ num_bits = CONF.network.project_network_mask_bits
+
+ result = None
+ str_cidr = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(num_bits):
+ str_cidr = str(subnet_cidr)
+ if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
+ continue
+
+ subnet = dict(
+ name=data_utils.rand_name(namestart),
+ network_id=network['id'],
+ tenant_id=network['tenant_id'],
+ cidr=str_cidr,
+ ip_version=ip_version,
+ **kwargs
+ )
+ try:
+ result = subnets_client.create_subnet(**subnet)
+ break
+ except lib_exc.Conflict as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+
+ subnet = result['subnet']
+ self.assertEqual(subnet['cidr'], str_cidr)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ subnets_client.delete_subnet, subnet['id'])
+
+ return subnet
+
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None):
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], fixed_ip=ip_addr)['ports']
+ # A port can have more than one IP address in some cases.
+ # If the network is dual-stack (IPv4 + IPv6), this port is associated
+ # with 2 subnets
+ p_status = ['ACTIVE']
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state.
+ # TODO(vsaienko) remove once bug: #1599836 is resolved.
+ if getattr(CONF.service_available, 'ironic', False):
+ p_status.append('DOWN')
+ port_map = [(p["id"], fxip["ip_address"])
+ for p in ports
+ for fxip in p["fixed_ips"]
+ if (netutils.is_valid_ipv4(fxip["ip_address"]) and
+ p['status'] in p_status)]
+ inactive = [p for p in ports if p['status'] != 'ACTIVE']
+ if inactive:
+ LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+ self.assertNotEqual(0, len(port_map),
+ "No IPv4 addresses found in: %s" % ports)
+ self.assertEqual(len(port_map), 1,
+ "Found multiple IPv4 addresses: %s. "
+ "Unable to determine which port to target."
+ % port_map)
+ return port_map[0]
+
+ def _get_network_by_name(self, network_name):
+ net = self.os_admin.networks_client.list_networks(
+ name=network_name)['networks']
+ self.assertNotEqual(len(net), 0,
+ "Unable to get network by name: %s" % network_name)
+ return net[0]
+
+ def create_floating_ip(self, thing, external_network_id=None,
+ port_id=None, client=None):
+ """Create a floating IP and associates to a resource/port on Neutron"""
+ if not external_network_id:
+ external_network_id = CONF.network.public_network_id
+ if not client:
+ client = self.floating_ips_client
+ if not port_id:
+ port_id, ip4 = self._get_server_port_id_and_ip4(thing)
+ else:
+ ip4 = None
+ result = client.create_floatingip(
+ floating_network_id=external_network_id,
+ port_id=port_id,
+ tenant_id=thing['tenant_id'],
+ fixed_ip_address=ip4
+ )
+ floating_ip = result['floatingip']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_floatingip,
+ floating_ip['id'])
+ return floating_ip
+
+ def _associate_floating_ip(self, floating_ip, server):
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def _disassociate_floating_ip(self, floating_ip):
+ """:param floating_ip: floating_ips_client.create_floatingip"""
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
+ def check_floating_ip_status(self, floating_ip, status):
+ """Verifies floatingip reaches the given status
+
+ :param dict floating_ip: floating IP dict to check status
+ :param status: target status
+ :raises: AssertionError if status doesn't match
+ """
+ floatingip_id = floating_ip['id']
+
+ def refresh():
+ result = (self.floating_ips_client.
+ show_floatingip(floatingip_id)['floatingip'])
+ return status == result['status']
+
+ test_utils.call_until_true(refresh,
+ CONF.network.build_timeout,
+ CONF.network.build_interval)
+ floating_ip = self.floating_ips_client.show_floatingip(
+ floatingip_id)['floatingip']
+ self.assertEqual(status, floating_ip['status'],
+ message="FloatingIP: {fp} is at status: {cst}. "
+ "failed to reach status: {st}"
+ .format(fp=floating_ip, cst=floating_ip['status'],
+ st=status))
+ LOG.info("FloatingIP: {fp} is at status: {st}"
+ .format(fp=floating_ip, st=status))
+
+ def _check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
+ if not CONF.network.project_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ LOG.info(msg)
+ return
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ for net_name, ip_addresses in server['addresses'].items():
+ for ip_address in ip_addresses:
+ self.check_vm_connectivity(ip_address['addr'],
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception as e:
+ LOG.exception('Tenant network connectivity check failed')
+ self._log_console_output(servers_for_debug)
+ self._log_net_info(e)
+ raise
+
+ def _check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
+ """check ping server via source ssh connection
+
+ :param source: RemoteClient: an ssh connection from which to ping
+ :param dest: and IP to ping against
+ :param should_succeed: boolean should ping succeed or not
+ :param nic: specific network interface to ping from
+ :returns: boolean -- should_succeed == ping
+ :returns: ping is false if ping failed
+ """
+ def ping_remote():
+ try:
+ source.ping_host(dest, nic=nic)
+ except lib_exc.SSHExecCommandFailed:
+ LOG.warning('Failed to ping IP: %s via a ssh connection '
+ 'from: %s.', dest, source.ssh_client.host)
+ return not should_succeed
+ return should_succeed
+
+ return test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout,
+ 1)
+
+ def _create_security_group(self, security_group_rules_client=None,
+ tenant_id=None,
+ namestart='secgroup-smoke',
+ security_groups_client=None):
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if tenant_id is None:
+ tenant_id = security_groups_client.tenant_id
+ secgroup = self._create_empty_security_group(
+ namestart=namestart, client=security_groups_client,
+ tenant_id=tenant_id)
+
+ # Add rules to the security group
+ rules = self._create_loginable_secgroup_rule(
+ security_group_rules_client=security_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client)
+ for rule in rules:
+ self.assertEqual(tenant_id, rule['tenant_id'])
+ self.assertEqual(secgroup['id'], rule['security_group_id'])
+ return secgroup
+
+ def _create_empty_security_group(self, client=None, tenant_id=None,
+ namestart='secgroup-smoke'):
+ """Create a security group without rules.
+
+ Default rules will be created:
+ - IPv4 egress to any
+ - IPv6 egress to any
+
+ :param tenant_id: secgroup will be created in this tenant
+ :returns: the created security group
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sg_name = data_utils.rand_name(namestart)
+ sg_desc = sg_name + " description"
+ sg_dict = dict(name=sg_name,
+ description=sg_desc)
+ sg_dict['tenant_id'] = tenant_id
+ result = client.create_security_group(**sg_dict)
+
+ secgroup = result['security_group']
+ self.assertEqual(secgroup['name'], sg_name)
+ self.assertEqual(tenant_id, secgroup['tenant_id'])
+ self.assertEqual(secgroup['description'], sg_desc)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_security_group, secgroup['id'])
+ return secgroup
+
+ def _default_security_group(self, client=None, tenant_id=None):
+ """Get default secgroup for given tenant_id.
+
+ :returns: default secgroup for given tenant
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sgs = [
+ sg for sg in list(client.list_security_groups().values())[0]
+ if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
+ ]
+ msg = "No default security group for tenant %s." % (tenant_id)
+ self.assertGreater(len(sgs), 0, msg)
+ return sgs[0]
+
+ def _create_security_group_rule(self, secgroup=None,
+ sec_group_rules_client=None,
+ tenant_id=None,
+ security_groups_client=None, **kwargs):
+ """Create a rule from a dictionary of rule parameters.
+
+ Create a rule in a secgroup. if secgroup not defined will search for
+ default secgroup in tenant_id.
+
+ :param secgroup: the security group.
+ :param tenant_id: if secgroup not passed -- the tenant in which to
+ search for default secgroup
+ :param kwargs: a dictionary containing rule parameters:
+ for example, to allow incoming ssh:
+ rule = {
+ direction: 'ingress'
+ protocol:'tcp',
+ port_range_min: 22,
+ port_range_max: 22
+ }
+ """
+ if sec_group_rules_client is None:
+ sec_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = security_groups_client.tenant_id
+ if secgroup is None:
+ secgroup = self._default_security_group(
+ client=security_groups_client, tenant_id=tenant_id)
+
+ ruleset = dict(security_group_id=secgroup['id'],
+ tenant_id=secgroup['tenant_id'])
+ ruleset.update(kwargs)
+
+ sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
+ sg_rule = sg_rule['security_group_rule']
+
+ self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
+ self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
+
+ return sg_rule
+
+ def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
+ """Create loginable security group rule
+
+ This function will create:
+ 1. egress and ingress tcp port 22 allow rule in order to allow ssh
+ access for ipv4.
+ 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
+ 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
+ """
+
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ rules = []
+ rulesets = [
+ dict(
+ # ssh
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22,
+ ),
+ dict(
+ # ping
+ protocol='icmp',
+ ),
+ dict(
+ # ipv6-icmp for ping6
+ protocol='icmp',
+ ethertype='IPv6',
+ )
+ ]
+ sec_group_rules_client = security_group_rules_client
+ for ruleset in rulesets:
+ for r_direction in ['ingress', 'egress']:
+ ruleset['direction'] = r_direction
+ try:
+ sg_rule = self._create_security_group_rule(
+ sec_group_rules_client=sec_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client,
+ **ruleset)
+ except lib_exc.Conflict as ex:
+ # if rule already exist - skip rule and continue
+ msg = 'Security group rule already exists'
+ if msg not in ex._error_string:
+ raise ex
+ else:
+ self.assertEqual(r_direction, sg_rule['direction'])
+ rules.append(sg_rule)
+
+ return rules
+
+ def _get_router(self, client=None, tenant_id=None):
+ """Retrieve a router for the given tenant id.
+
+ If a public router has been configured, it will be returned.
+
+ If a public router has not been configured, but a public
+ network has, a tenant router will be created and returned that
+ routes traffic to the public network.
+ """
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ router_id = CONF.network.public_router_id
+ network_id = CONF.network.public_network_id
+ if router_id:
+ body = client.show_router(router_id)
+ return body['router']
+ elif network_id:
+ router = self._create_router(client, tenant_id)
+ kwargs = {'external_gateway_info': dict(network_id=network_id)}
+ router = client.update_router(router['id'], **kwargs)['router']
+ return router
+ else:
+ raise Exception("Neither of 'public_router_id' or "
+ "'public_network_id' has been defined.")
+
+ def _create_router(self, client=None, tenant_id=None,
+ namestart='router-smoke'):
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ name = data_utils.rand_name(namestart)
+ result = client.create_router(name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ router = result['router']
+ self.assertEqual(router['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router,
+ router['id'])
+ return router
+
+ def _update_router_admin_state(self, router, admin_state_up):
+ kwargs = dict(admin_state_up=admin_state_up)
+ router = self.routers_client.update_router(
+ router['id'], **kwargs)['router']
+ self.assertEqual(admin_state_up, router['admin_state_up'])
+
+ def create_networks(self, networks_client=None,
+ routers_client=None, subnets_client=None,
+ tenant_id=None, dns_nameservers=None,
+ port_security_enabled=True):
+ """Create a network with a subnet connected to a router.
+
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
+ :param tenant_id: id of tenant to create resources in.
+ :param dns_nameservers: list of dns servers to send to subnet.
+ :returns: network, subnet, router
+ """
+ if CONF.network.shared_physical_network:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ if not CONF.compute.fixed_network_name:
+ m = 'fixed_network_name must be specified in config'
+ raise lib_exc.InvalidConfiguration(m)
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ network = self._create_network(
+ networks_client=networks_client,
+ tenant_id=tenant_id,
+ port_security_enabled=port_security_enabled)
+ router = self._get_router(client=routers_client,
+ tenant_id=tenant_id)
+ subnet_kwargs = dict(network=network,
+ subnets_client=subnets_client,
+ routers_client=routers_client)
+ # use explicit check because empty list is a valid option
+ if dns_nameservers is not None:
+ subnet_kwargs['dns_nameservers'] = dns_nameservers
+ subnet = self._create_subnet(**subnet_kwargs)
+ if not routers_client:
+ routers_client = self.routers_client
+ router_id = router['id']
+ routers_client.add_router_interface(router_id,
+ subnet_id=subnet['id'])
+
+ # save a cleanup job to remove this association between
+ # router and subnet
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ routers_client.remove_router_interface, router_id,
+ subnet_id=subnet['id'])
+ return network, subnet, router
diff --git a/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
new file mode 100644
index 0000000..e9dad0b
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
@@ -0,0 +1,303 @@
+# Copyright (c) 2016 Juniper Networks
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import testscenarios
+
+from oslo_log import log as logging
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.fwaas.scenario import fwaas_v2_base as base
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+load_tests = testscenarios.load_tests_apply_scenarios
+
+
+class TestFWaaS_v2(base.FWaaSScenarioTest_V2):
+
+ """Config Requirement in tempest.conf:
+
+ - project_network_cidr_bits- specifies the subnet range for each network
+ - project_network_cidr
+ - public_network_id
+ """
+
+ def setUp(self):
+ LOG.debug("Initializing FWaaSScenarioTest Setup")
+ super(TestFWaaS_v2, self).setUp()
+ required_exts = ['fwaas_v2', 'security-group', 'router']
+ # if self.router_insertion:
+ # required_exts.append('fwaasrouterinsertion')
+ for ext in required_exts:
+ if not utils.is_extension_enabled(ext, 'network'):
+ msg = "%s Extension not enabled." % ext
+ raise self.skipException(msg)
+ LOG.debug("FWaaSScenarioTest Setup done.")
+
+ def _create_server(self, network, security_group=None):
+ keys = self.create_keypair()
+ kwargs = {}
+ if security_group is not None:
+ kwargs['security_groups'] = [{'name': security_group['name']}]
+ server = self.create_server(
+ key_name=keys['name'],
+ networks=[{'uuid': network['id']}],
+ wait_until='ACTIVE',
+ **kwargs)
+ return server, keys
+
+ def _check_connectivity_between_internal_networks(
+ self, floating_ip1, keys1, network2, server2, should_connect=True):
+ internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
+ self.os_admin.ports_client.list_ports(
+ tenant_id=server2['tenant_id'],
+ network_id=network2['id'])['ports']
+ if p['device_owner'].startswith('network'))
+ self._check_server_connectivity(
+ floating_ip1, keys1, internal_ips, should_connect)
+
+ def _check_server_connectivity(self, floating_ip, keys1, address_list,
+ should_connect=True):
+ ip_address = floating_ip['floating_ip_address']
+ private_key = keys1
+ ssh_source = self.get_remote_client(
+ ip_address, private_key=private_key)
+
+ for remote_ip in address_list:
+ if should_connect:
+ msg = ("Timed out waiting for %s to become "
+ "reachable") % remote_ip
+ else:
+ msg = "ip address %s is reachable" % remote_ip
+ try:
+ self.assertTrue(self._check_remote_connectivity
+ (ssh_source, remote_ip, should_connect),
+ msg)
+ except Exception:
+ LOG.exception("Unable to access {dest} via ssh to "
+ "floating-ip {src}".format(dest=remote_ip,
+ src=floating_ip))
+ raise
+
+ def _check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
+ """check ping server via source ssh connection
+
+ :param source: RemoteClient: an ssh connection from which to ping
+ :param dest: and IP to ping against
+ :param should_succeed: boolean should ping succeed or not
+ :param nic: specific network interface to ping from
+ :returns: boolean -- should_succeed == ping
+ :returns: ping is false if ping failed
+ """
+ def ping_remote():
+ try:
+ source.ping_host(dest, nic=nic)
+ except lib_exc.SSHExecCommandFailed:
+ LOG.warning('Failed to ping IP: %s via a ssh connection '
+ 'from: %s.', dest, source.ssh_client.host)
+ return not should_succeed
+ return should_succeed
+
+ return test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout,
+ 1)
+
+ def _add_router_interface(self, router_id, subnet_id):
+ resp = self.routers_client.add_router_interface(
+ router_id, subnet_id=subnet_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.routers_client.remove_router_interface, router_id,
+ subnet_id=subnet_id)
+ return resp
+
+ def _create_network_subnet(self):
+ network = self._create_network()
+ subnet_kwargs = dict(network=network)
+ subnet = self._create_subnet(**subnet_kwargs)
+ return network, subnet
+
+ def _create_test_server(self, network, security_group):
+ pub_network_id = CONF.network.public_network_id
+ server, keys = self._create_server(
+ network, security_group=security_group)
+ private_key = keys['private_key']
+ server_floating_ip = self.create_floating_ip(server, pub_network_id)
+ fixed_ip = list(server['addresses'].values())[0][0]['addr']
+ return server, private_key, fixed_ip, server_floating_ip
+
+ def _create_topology(self):
+ """Topology diagram:
+
+ +--------+ +-------------+
+ |"server"| | "subnet" |
+ | VM-1 +-------------+ "network-1" |
+ +--------+ +----+--------+
+ |
+ | router interface port
+ +----+-----+
+ | "router" |
+ +----+-----+
+ | router interface port
+ |
+ |
+ +--------+ +-------------+
+ |"server"| | "subnet" |
+ | VM-2 +-------------+ "network-2" |
+ +--------+ +----+--------+
+ """
+
+ LOG.debug('Starting Topology Creation')
+ resp = {}
+ # Create Network1 and Subnet1.
+ network1, subnet1 = self._create_network_subnet()
+ resp['network1'] = network1
+ resp['subnet1'] = subnet1
+
+ # Create Network2 and Subnet2.
+ network2, subnet2 = self._create_network_subnet()
+ resp['network2'] = network2
+ resp['subnet2'] = subnet2
+
+ # Create a router and attach Network1, Network2 and External Networks
+ # to it.
+ router = self._create_router(namestart='SCENARIO-TEST-ROUTER')
+ pub_network_id = CONF.network.public_network_id
+ kwargs = {'external_gateway_info': dict(network_id=pub_network_id)}
+ router = self.routers_client.update_router(
+ router['id'], **kwargs)['router']
+ router_id = router['id']
+ resp_add_intf = self._add_router_interface(
+ router_id, subnet_id=subnet1['id'])
+ router_portid_1 = resp_add_intf['port_id']
+ resp_add_intf = self._add_router_interface(
+ router_id, subnet_id=subnet2['id'])
+ router_portid_2 = resp_add_intf['port_id']
+ resp['router'] = router
+ resp['router_portid_1'] = router_portid_1
+ resp['router_portid_2'] = router_portid_2
+
+ # Create a VM on each of the network and assign it a floating IP.
+ security_group = self._create_security_group()
+ server1, private_key1, server_fixed_ip_1, server_floating_ip_1 = (
+ self._create_test_server(network1, security_group))
+ server2, private_key2, server_fixed_ip_2, server_floating_ip_2 = (
+ self._create_test_server(network2, security_group))
+ resp['server1'] = server1
+ resp['private_key1'] = private_key1
+ resp['server_fixed_ip_1'] = server_fixed_ip_1
+ resp['server_floating_ip_1'] = server_floating_ip_1
+ resp['server2'] = server2
+ resp['private_key2'] = private_key2
+ resp['server_fixed_ip_2'] = server_fixed_ip_2
+ resp['server_floating_ip_2'] = server_floating_ip_2
+
+ return resp
+
+ @decorators.idempotent_id('77fdf3ea-82c1-453d-bfec-f7efe335625d')
+ def test_icmp_reachability_scenarios(self):
+ topology = self._create_topology()
+ ssh_login = CONF.validation.image_ssh_user
+
+ self.check_vm_connectivity(
+ ip_address=topology['server_floating_ip_1']['floating_ip_address'],
+ username=ssh_login,
+ private_key=topology['private_key1'])
+ self.check_vm_connectivity(
+ ip_address=topology['server_floating_ip_2']['floating_ip_address'],
+ username=ssh_login,
+ private_key=topology['private_key2'])
+
+ # Scenario 1: Add allow ICMP rules between the two VMs.
+ fw_allow_icmp_rule = self.create_firewall_rule(action="allow",
+ protocol="icmp")
+ fw_allow_ssh_rule = self.create_firewall_rule(action="allow",
+ protocol="tcp",
+ destination_port=22)
+ fw_policy = self.create_firewall_policy(
+ firewall_rules=[fw_allow_icmp_rule['id'], fw_allow_ssh_rule['id']])
+ fw_group = self.create_firewall_group(
+ ports=[
+ topology['router_portid_1'],
+ topology['router_portid_2']],
+ ingress_firewall_policy_id=fw_policy['id'],
+ egress_firewall_policy_id=fw_policy['id'])
+ self._wait_firewall_group_ready(fw_group['id'])
+ LOG.debug('fw_allow_icmp_rule: %s\nfw_allow_ssh_rule: %s\n'
+ 'fw_policy: %s\nfw_group: %s\n',
+ fw_allow_icmp_rule, fw_allow_ssh_rule, fw_policy, fw_group)
+
+ # Check the connectivity between VM1 and VM2. It should Pass.
+ self._check_server_connectivity(
+ topology['server_floating_ip_1'],
+ topology['private_key1'],
+ address_list=[topology['server_fixed_ip_2']],
+ should_connect=True)
+
+ # Scenario 2: Now remove the allow_icmp rule add a deny_icmp rule and
+ # check that ICMP gets blocked
+ fw_deny_icmp_rule = self.create_firewall_rule(action="deny",
+ protocol="icmp")
+ self.remove_firewall_rule_from_policy_and_wait(
+ firewall_group_id=fw_group['id'],
+ firewall_rule_id=fw_allow_icmp_rule['id'],
+ firewall_policy_id=fw_policy['id'])
+ self.insert_firewall_rule_in_policy_and_wait(
+ firewall_group_id=fw_group['id'],
+ firewall_rule_id=fw_deny_icmp_rule['id'],
+ firewall_policy_id=fw_policy['id'])
+ self._check_server_connectivity(
+ topology['server_floating_ip_1'],
+ topology['private_key1'],
+ address_list=[topology['server_fixed_ip_2']],
+ should_connect=False)
+
+ # Scenario 3: Create a rule allowing ICMP only from server_fixed_ip_1
+ # to server_fixed_ip_2 and check that traffic from opposite direction
+ # is blocked.
+ fw_allow_unidirectional_icmp_rule = self.create_firewall_rule(
+ action="allow", protocol="icmp",
+ source_ip_address=topology['server_fixed_ip_1'],
+ destination_ip_address=topology['server_fixed_ip_2'])
+
+ self.remove_firewall_rule_from_policy_and_wait(
+ firewall_group_id=fw_group['id'],
+ firewall_rule_id=fw_deny_icmp_rule['id'],
+ firewall_policy_id=fw_policy['id'])
+ self.insert_firewall_rule_in_policy_and_wait(
+ firewall_group_id=fw_group['id'],
+ firewall_rule_id=fw_allow_unidirectional_icmp_rule['id'],
+ firewall_policy_id=fw_policy['id'])
+
+ self._check_server_connectivity(
+ topology['server_floating_ip_1'],
+ topology['private_key1'],
+ address_list=[topology['server_fixed_ip_2']],
+ should_connect=True)
+ self._check_server_connectivity(
+ topology['server_floating_ip_2'],
+ topology['private_key2'],
+ address_list=[topology['server_fixed_ip_1']],
+ should_connect=False)
+
+ # Disassociate ports of this firewall group for cleanup resources
+ self.firewall_groups_client.update_firewall_group(
+ fw_group['id'], ports=[])
diff --git a/neutron_tempest_plugin/fwaas/services/__init__.py b/neutron_tempest_plugin/fwaas/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/services/__init__.py
diff --git a/neutron_tempest_plugin/fwaas/services/v2_client.py b/neutron_tempest_plugin/fwaas/services/v2_client.py
new file mode 100644
index 0000000..6660418
--- /dev/null
+++ b/neutron_tempest_plugin/fwaas/services/v2_client.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2016
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.network import base
+
+
+class FirewallGroupsClient(base.BaseNetworkClient):
+
+ def create_firewall_group(self, **kwargs):
+ uri = '/fwaas/firewall_groups'
+ post_data = {'firewall_group': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_firewall_group(self, firewall_group_id, **kwargs):
+ uri = '/fwaas/firewall_groups/%s' % firewall_group_id
+ post_data = {'firewall_group': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_firewall_group(self, firewall_group_id, **fields):
+ uri = '/fwaas/firewall_groups/%s' % firewall_group_id
+ return self.show_resource(uri, **fields)
+
+ def delete_firewall_group(self, firewall_group_id):
+ uri = '/fwaas/firewall_groups/%s' % firewall_group_id
+ return self.delete_resource(uri)
+
+ def list_firewall_groups(self, **filters):
+ uri = '/fwaas/firewall_groups'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_firewall_group(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'firewall_group'
+
+
+class FirewallRulesClient(base.BaseNetworkClient):
+
+ def create_firewall_rule(self, **kwargs):
+ uri = '/fwaas/firewall_rules'
+ post_data = {'firewall_rule': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_firewall_rule(self, firewall_rule_id, **kwargs):
+ uri = '/fwaas/firewall_rules/%s' % firewall_rule_id
+ post_data = {'firewall_rule': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_firewall_rule(self, firewall_rule_id, **fields):
+ uri = '/fwaas/firewall_rules/%s' % firewall_rule_id
+ return self.show_resource(uri, **fields)
+
+ def delete_firewall_rule(self, firewall_rule_id):
+ uri = '/fwaas/firewall_rules/%s' % firewall_rule_id
+ return self.delete_resource(uri)
+
+ def list_firewall_rules(self, **filters):
+ uri = '/fwaas/firewall_rules'
+ return self.list_resources(uri, **filters)
+
+
+class FirewallPoliciesClient(base.BaseNetworkClient):
+
+ def create_firewall_policy(self, **kwargs):
+ uri = '/fwaas/firewall_policies'
+ post_data = {'firewall_policy': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_firewall_policy(self, firewall_policy_id, **kwargs):
+ uri = '/fwaas/firewall_policies/%s' % firewall_policy_id
+ post_data = {'firewall_policy': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_firewall_policy(self, firewall_policy_id, **fields):
+ uri = '/fwaas/firewall_policies/%s' % firewall_policy_id
+ return self.show_resource(uri, **fields)
+
+ def delete_firewall_policy(self, firewall_policy_id):
+ uri = '/fwaas/firewall_policies/%s' % firewall_policy_id
+ return self.delete_resource(uri)
+
+ def list_firewall_policies(self, **filters):
+ uri = '/fwaas/firewall_policies'
+ return self.list_resources(uri, **filters)
+
+ def insert_firewall_rule_in_policy(self, firewall_policy_id,
+ firewall_rule_id, insert_after='',
+ insert_before=''):
+ uri = '/fwaas/firewall_policies/%s/insert_rule' % firewall_policy_id
+ data = {
+ 'firewall_rule_id': firewall_rule_id,
+ 'insert_after': insert_after,
+ 'insert_before': insert_before,
+ }
+ return self.update_resource(uri, data)
+
+ def remove_firewall_rule_from_policy(self, firewall_policy_id,
+ firewall_rule_id):
+ uri = '/fwaas/firewall_policies/%s/remove_rule' % firewall_policy_id
+ data = {
+ 'firewall_rule_id': firewall_rule_id,
+ }
+ return self.update_resource(uri, data)
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index ffb4dbd..f24c82b 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -238,21 +238,24 @@
LOG.debug("Server %s disappeared(deleted) while looking "
"for the console log", server['id'])
- def _check_remote_connectivity(self, source, dest, should_succeed=True,
+ def _check_remote_connectivity(self, source, dest, count,
+ should_succeed=True,
nic=None, mtu=None, fragmentation=True,
timeout=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
+ :param count: Number of ping packet(s) to send
:param should_succeed: boolean should ping succeed or not
:param nic: specific network interface to ping from
:param mtu: mtu size for the packet to be sent
:param fragmentation: Flag for packet fragmentation
+ :param timeout: Timeout for all ping packet(s) to succeed
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
- def ping_host(source, host, count=CONF.validation.ping_count,
+ def ping_host(source, host, count,
size=CONF.validation.ping_size, nic=None, mtu=None,
fragmentation=True):
IP_VERSION_4 = neutron_lib_constants.IP_VERSION_4
@@ -275,7 +278,7 @@
def ping_remote():
try:
- result = ping_host(source, dest, nic=nic, mtu=mtu,
+ result = ping_host(source, dest, count, nic=nic, mtu=mtu,
fragmentation=fragmentation)
except lib_exc.SSHExecCommandFailed:
@@ -296,10 +299,12 @@
def check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None, mtu=None, fragmentation=True,
- servers=None, timeout=None):
+ servers=None, timeout=None,
+ ping_count=CONF.validation.ping_count):
try:
self.assertTrue(self._check_remote_connectivity(
- source, dest, should_succeed, nic, mtu, fragmentation,
+ source, dest, ping_count, should_succeed, nic, mtu,
+ fragmentation,
timeout=timeout))
except lib_exc.SSHTimeout as ssh_e:
LOG.debug(ssh_e)
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
index 3385a04..78d8d95 100644
--- a/neutron_tempest_plugin/scenario/test_connectivity.py
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -47,14 +47,20 @@
'image_ref': CONF.compute.image_ref,
'key_name': self.keypair['name']
}
- vm1 = self.create_server(networks=[{'port': port_1['id']}], **params)
+ vms = []
+ vms.append(
+ self.create_server(networks=[{'port': port_1['id']}], **params))
if (CONF.compute.min_compute_nodes > 1 and
compute.is_scheduler_filter_enabled("DifferentHostFilter")):
params['scheduler_hints'] = {
- 'different_host': [vm1['server']['id']]}
+ 'different_host': [vms[0]['server']['id']]}
- self.create_server(networks=[{'port': port_2['id']}], **params)
+ vms.append(
+ self.create_server(networks=[{'port': port_2['id']}], **params))
+
+ for vm in vms:
+ self.wait_for_server_active(vm['server'])
@decorators.idempotent_id('8944b90d-1766-4669-bd8a-672b5d106bb7')
def test_connectivity_through_2_routers(self):
@@ -83,8 +89,6 @@
ap1_wan_port = self.create_port(wan_net)
ap2_wan_port = self.create_port(wan_net)
- self._create_servers(ap1_internal_port, ap2_internal_port)
-
self.client.add_router_interface_with_port_id(
ap1_rt['id'], ap1_wan_port['id'])
self.client.add_router_interface_with_port_id(
@@ -101,6 +105,8 @@
routes=[{"destination": ap1_subnet['cidr'],
"nexthop": ap1_wan_port['fixed_ips'][0]['ip_address']}])
+ self._create_servers(ap1_internal_port, ap2_internal_port)
+
ap1_fip = self.create_and_associate_floatingip(
ap1_internal_port['id'])
ap1_sshclient = ssh.Client(
@@ -109,3 +115,114 @@
self.check_remote_connectivity(
ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'])
+
+ @decorators.idempotent_id('b72c3b77-3396-4144-b05d-9cd3c0099893')
+ def test_connectivity_router_east_west_traffic(self):
+ """This case is intended to test router east west taffic
+
+ The case can be used in various scenarios: legacy/distributed router,
+ same/different host.
+ """
+ net_1 = self.create_network()
+ net_2 = self.create_network()
+ subnet_1 = self.create_subnet(net_1, cidr="10.10.1.0/24")
+ subnet_2 = self.create_subnet(net_2, cidr="10.10.2.0/24")
+
+ router = self.create_router(
+ router_name=data_utils.rand_name("east_west_traffic_router"),
+ admin_state_up=True,
+ external_network_id=CONF.network.public_network_id)
+
+ internal_port_1 = self.create_port(
+ net_1, security_groups=[self.secgroup['id']])
+ internal_port_2 = self.create_port(
+ net_2, security_groups=[self.secgroup['id']])
+
+ self.create_router_interface(router['id'], subnet_1['id'])
+ self.create_router_interface(router['id'], subnet_2['id'])
+
+ self._create_servers(internal_port_1, internal_port_2)
+
+ fip = self.create_and_associate_floatingip(
+ internal_port_1['id'])
+ sshclient = ssh.Client(
+ fip['floating_ip_address'], CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ self.check_remote_connectivity(
+ sshclient, internal_port_2['fixed_ips'][0]['ip_address'],
+ ping_count=10)
+
+ @utils.requires_ext(extension="dvr", service="network")
+ @decorators.idempotent_id('69d3650a-5c32-40bc-ae56-5c4c849ddd37')
+ def test_connectivity_dvr_and_no_dvr_routers_in_same_subnet(self):
+ """This test case tests connectivity between vm and 2 routers.
+
+ Subnet is connected to dvr and non-dvr routers in the same time, test
+ ensures that connectivity from VM to both routers is working.
+
+ Test scenario:
+ +----------------+ +------------+
+ | Non-dvr router | | DVR router |
+ | | | |
+ | 10.0.0.1 | | 10.0.0.x |
+ +-------+--------+ +-----+------+
+ | |
+ | 10.0.0.0/24 |
+ +----------------+----------------+
+ |
+ +-+-+
+ |VM |
+ +---+
+
+ where:
+ 10.0.0.1 - is subnet's gateway IP address,
+ 10.0.0.x - is any other IP address taken from subnet's range
+
+ Test ensures that both 10.0.0.1 and 10.0.0.x IP addresses are
+ reachable from VM.
+ """
+
+ network = self.create_network()
+ subnet = self.create_subnet(
+ network, cidr="10.0.0.0/24", gateway="10.0.0.1")
+
+ non_dvr_router = self.create_router_by_client(
+ tenant_id=self.client.tenant_id,
+ is_admin=True,
+ router_name=data_utils.rand_name("nondvr-2-routers-same-network"),
+ admin_state_up=True,
+ distributed=False)
+ self.create_router_interface(non_dvr_router['id'], subnet['id'])
+
+ dvr_router = self.create_router_by_client(
+ tenant_id=self.client.tenant_id,
+ is_admin=True,
+ router_name=data_utils.rand_name("dvr-2-rotuers-same-network"),
+ admin_state_up=True,
+ distributed=True)
+ dvr_router_port = self.create_port(network)
+ self.client.add_router_interface_with_port_id(
+ dvr_router['id'], dvr_router_port['id'])
+
+ vm = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'uuid': network['id']}],
+ security_groups=[{'name': self.secgroup['name']}])
+ self.wait_for_server_active(vm['server'])
+
+ vm_port = self.client.list_ports(
+ network_id=network['id'], device_id=vm['server']['id'])['ports'][0]
+ fip = self.create_and_associate_floatingip(vm_port['id'])
+
+ sshclient = ssh.Client(
+ fip['floating_ip_address'], CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ self.check_remote_connectivity(
+ sshclient, '10.0.0.1', ping_count=10)
+ self.check_remote_connectivity(
+ sshclient, dvr_router_port['fixed_ips'][0]['ip_address'],
+ ping_count=10)
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index fadabb0..13ca797 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -35,7 +35,6 @@
2.1) ping the other VM's internal IP
2.2) ping the other VM's hostname
"""
-
network = self.create_network(dns_domain='starwars.')
self.setup_network_and_server(network=network, server_name='luke')
self.create_pingable_secgroup_rule(
@@ -71,8 +70,10 @@
self.check_remote_connectivity(
ssh_client, leia_port['fixed_ips'][0]['ip_address'],
timeout=CONF.validation.ping_timeout * 10)
- self.assertIn(
- 'starwars', ssh_client.exec_command('cat /etc/resolv.conf'))
+
+ resolv_conf = ssh_client.exec_command('cat /etc/resolv.conf')
+ self.assertIn('openstackgate.local', resolv_conf)
+ self.assertNotIn('starwars', resolv_conf)
self.check_remote_connectivity(ssh_client, 'leia')
- self.check_remote_connectivity(ssh_client, 'leia.starwars')
+ self.check_remote_connectivity(ssh_client, 'leia.openstackgate.local')
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
new file mode 100644
index 0000000..cfaa73f
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -0,0 +1,297 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from neutron_lib import constants
+from oslo_log import log
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+def get_receiver_script(group, port, hello_message, ack_message, result_file):
+
+ return """
+import socket
+import struct
+import sys
+
+multicast_group = '%(group)s'
+server_address = ('', %(port)s)
+
+# Create the socket
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+
+# Bind to the server address
+sock.bind(server_address)
+
+# Tell the operating system to add the socket to the multicast group
+# on all interfaces.
+group = socket.inet_aton(multicast_group)
+mreq = struct.pack('4sL', group, socket.INADDR_ANY)
+sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+
+# Receive/respond loop
+with open('%(result_file)s', 'w') as f:
+ f.write('%(hello_message)s')
+ f.flush()
+ data, address = sock.recvfrom(1024)
+ f.write('received ' + str(len(data)) + ' bytes from ' + str(address))
+ f.write(str(data))
+sock.sendto(b'%(ack_message)s', address)
+ """ % {'group': group,
+ 'port': port,
+ 'hello_message': hello_message,
+ 'ack_message': ack_message,
+ 'result_file': result_file}
+
+
+def get_sender_script(group, port, message, result_file):
+
+ return """
+import socket
+import sys
+
+message = b'%(message)s'
+multicast_group = ('%(group)s', %(port)s)
+
+# Create the datagram socket
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+# Set the time-to-live for messages to 1 so they do not go past the
+# local network segment.
+sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
+
+# Set a timeout so the socket does not block indefinitely when trying
+# to receive data.
+sock.settimeout(1)
+
+with open('%(result_file)s', 'w') as f:
+ try:
+ # Send data to the multicast group
+ sent = sock.sendto(message, multicast_group)
+
+ # Look for responses from all recipients
+ while True:
+ try:
+ data, server = sock.recvfrom(1024)
+ except socket.timeout:
+ f.write('timed out, no more responses')
+ break
+ else:
+ f.write('received reply ' + str(data) + ' from ' + str(server))
+ finally:
+ sys.stdout.write('closing socket')
+ sock.close()
+ """ % {'group': group,
+ 'port': port,
+ 'message': message,
+ 'result_file': result_file}
+
+
+class BaseMulticastTest(object):
+
+ credentials = ['primary']
+ force_tenant_isolation = False
+
+ # Import configuration options
+ available_type_drivers = (
+ CONF.neutron_plugin_options.available_type_drivers)
+
+ hello_message = "I am waiting..."
+ multicast_port = 5007
+ multicast_message = "Big Bang"
+ receiver_output_file = "/tmp/receiver_mcast_out"
+ sender_output_file = "/tmp/sender_mcast_out"
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseMulticastTest, cls).skip_checks()
+ advanced_image_available = (
+ CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced)
+ if not advanced_image_available:
+ skip_reason = "This test require advanced tools for this test"
+ raise cls.skipException(skip_reason)
+
+ @classmethod
+ def resource_setup(cls):
+ super(BaseMulticastTest, cls).resource_setup()
+
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ cls.flavor_ref = CONF.compute.flavor_ref
+ cls.image_ref = CONF.compute.image_ref
+ cls.username = CONF.validation.image_ssh_user
+ else:
+ cls.flavor_ref = (
+ CONF.neutron_plugin_options.advanced_image_flavor_ref)
+ cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
+ cls.username = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+ # setup basic topology for servers we can log into it
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+ cls.keypair = cls.create_keypair()
+
+ cls.secgroup = cls.os_primary.network_client.create_security_group(
+ name='secgroup_mtu')
+ cls.security_groups.append(cls.secgroup['security_group'])
+ cls.create_loginable_secgroup_rule(
+ secgroup_id=cls.secgroup['security_group']['id'])
+ cls.create_pingable_secgroup_rule(
+ secgroup_id=cls.secgroup['security_group']['id'])
+ # Create security group rule for UDP (multicast traffic)
+ cls.create_secgroup_rules(
+ rule_list=[dict(protocol=constants.PROTO_NAME_UDP,
+ direction=constants.INGRESS_DIRECTION,
+ remote_ip_prefix=cls.any_addresses,
+ ethertype=cls.ethertype)],
+ secgroup_id=cls.secgroup['security_group']['id'])
+
+ # Multicast IP range to be used for multicast group IP asignement
+ if '-' in cls.multicast_group_range:
+ multicast_group_range = netaddr.IPRange(
+ *cls.multicast_group_range.split('-'))
+ else:
+ multicast_group_range = netaddr.IPNetwork(
+ cls.multicast_group_range)
+ cls.multicast_group_iter = iter(multicast_group_range)
+
+ def _create_server(self):
+ name = data_utils.rand_name("multicast-server")
+ server = self.create_server(
+ flavor_ref=self.flavor_ref,
+ image_ref=self.image_ref,
+ key_name=self.keypair['name'], name=name,
+ networks=[{'uuid': self.network['id']}],
+ security_groups=[{'name': self.secgroup['security_group']['name']}]
+ )['server']
+ self.wait_for_server_active(server)
+ port = self.client.list_ports(
+ network_id=self.network['id'], device_id=server['id'])['ports'][0]
+ server['fip'] = self.create_floatingip(port=port)
+ return server
+
+ def _prepare_sender(self, server, mcast_address):
+ check_script = get_sender_script(
+ group=mcast_address, port=self.multicast_port,
+ message=self.multicast_message,
+ result_file=self.sender_output_file)
+ ssh_client = ssh.Client(server['fip']['floating_ip_address'],
+ self.username,
+ pkey=self.keypair['private_key'])
+
+ ssh_client.execute_script(
+ 'echo "%s" > ~/multicast_traffic_sender.py' % check_script)
+ return ssh_client
+
+ def _prepare_receiver(self, server, mcast_address):
+ check_script = get_receiver_script(
+ group=mcast_address, port=self.multicast_port,
+ hello_message=self.hello_message, ack_message=server['id'],
+ result_file=self.receiver_output_file)
+ ssh_client = ssh.Client(
+ server['fip']['floating_ip_address'],
+ self.username,
+ pkey=self.keypair['private_key'])
+ ssh_client.execute_script(
+ 'echo "%s" > ~/multicast_traffic_receiver.py' % check_script)
+ return ssh_client
+
+ @decorators.idempotent_id('113486fc-24c9-4be4-8361-03b1c9892867')
+ def test_multicast_between_vms_on_same_network(self):
+ """Test multicast messaging between two servers on the same network
+
+ [Sender server] -> (Multicast network) -> [Receiver server]
+ """
+ sender = self._create_server()
+ receivers = [self._create_server() for _ in range(1)]
+ # Sender can be also receiver of multicast traffic
+ receivers.append(sender)
+ self._check_multicast_conectivity(sender=sender, receivers=receivers)
+
+ def _check_multicast_conectivity(self, sender, receivers):
+ """Test multi-cast messaging between two servers
+
+ [Sender server] -> ... some network topology ... -> [Receiver server]
+ """
+ mcast_address = next(self.multicast_group_iter)
+ LOG.debug("Multicast group address: %s", mcast_address)
+
+ def _message_received(client, msg, file_path):
+ result = client.execute_script(
+ "cat {path} || echo '{path} not exists yet'".format(
+ path=file_path))
+ return msg in result
+
+ sender_ssh_client = self._prepare_sender(sender, mcast_address)
+ receiver_ssh_clients = []
+ receiver_ids = []
+ for receiver in receivers:
+ receiver_ssh_client = self._prepare_receiver(
+ receiver, mcast_address)
+ receiver_ssh_client.execute_script(
+ "python3 ~/multicast_traffic_receiver.py &", shell="bash")
+ utils.wait_until_true(
+ lambda: _message_received(
+ receiver_ssh_client, self.hello_message,
+ self.receiver_output_file),
+ exception=RuntimeError(
+ "Receiver script didn't start properly on server "
+ "{!r}.".format(receiver['id'])))
+
+ receiver_ssh_clients.append(receiver_ssh_client)
+ receiver_ids.append(receiver['id'])
+
+ # Now lets run scripts on sender
+ sender_ssh_client.execute_script(
+ "python3 ~/multicast_traffic_sender.py")
+
+ # And check if message was received
+ for receiver_ssh_client in receiver_ssh_clients:
+ utils.wait_until_true(
+ lambda: _message_received(
+ receiver_ssh_client, self.multicast_message,
+ self.receiver_output_file),
+ exception=RuntimeError(
+ "Receiver {!r} didn't get multicast message".format(
+ receiver['id'])))
+
+ # TODO(slaweq): add validation of answears on sended server
+ replies_result = sender_ssh_client.execute_script(
+ "cat {path} || echo '{path} not exists yet'".format(
+ path=self.sender_output_file))
+ for receiver_id in receiver_ids:
+ self.assertIn(receiver_id, replies_result)
+
+
+class MulticastTestIPv4(BaseMulticastTest, base.BaseTempestTestCase):
+
+ # Import configuration options
+ multicast_group_range = CONF.neutron_plugin_options.multicast_group_range
+
+ # IP version specific parameters
+ _ip_version = constants.IP_VERSION_4
+ any_addresses = constants.IPv4_ANY
diff --git a/neutron_tempest_plugin/scenario/test_port_forwardings.py b/neutron_tempest_plugin/scenario/test_port_forwardings.py
new file mode 100644
index 0000000..1d1fe94
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_port_forwardings.py
@@ -0,0 +1,88 @@
+# Copyright 2019 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class PortForwardingTestJSON(base.BaseTempestTestCase):
+
+ required_extensions = ['router', 'floating-ip-port-forwarding']
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortForwardingTestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.fip = cls.create_floatingip()
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name("test_port_secgroup"))
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.keypair = cls.create_keypair()
+
+ @decorators.idempotent_id('ab40fc48-ca8d-41a0-b2a3-f6679c847bfe')
+ def test_port_forwarding_to_2_servers(self):
+ internal_tcp_port = 22
+ servers = []
+ for i in range(1, 3):
+ external_tcp_port = 1000 + i
+ name = data_utils.rand_name("server-%s" % i)
+ port = self.create_port(
+ self.network,
+ security_groups=[self.secgroup['id']])
+ server = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'], name=name,
+ networks=[{'port': port['id']}])['server']
+ server['name'] = name
+ self.wait_for_server_active(server)
+ server['port_forwarding'] = self.create_port_forwarding(
+ self.fip['id'],
+ internal_port_id=port['id'],
+ internal_ip_address=port['fixed_ips'][0]['ip_address'],
+ internal_port=internal_tcp_port,
+ external_port=external_tcp_port,
+ protocol="tcp")
+ servers.append(server)
+
+ try:
+ for server in servers:
+ ssh_client = ssh.Client(
+ self.fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'],
+ port=server['port_forwarding']['external_port'])
+ self.assertIn(server['name'],
+ ssh_client.exec_command('hostname'))
+ except lib_exc.SSHTimeout as ssh_e:
+ LOG.debug(ssh_e)
+ self._log_console_output(servers)
+ raise
+ except AssertionError:
+ self._log_console_output(servers)
+ raise
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 25fc8c1..11ba8ef 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -227,6 +227,23 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
+ def add_subnetpool_prefix(self, id, **kwargs):
+ return self._subnetpool_prefix_operation(id, 'add_prefixes', kwargs)
+
+ def remove_subnetpool_prefix(self, id, **kwargs):
+ return self._subnetpool_prefix_operation(id,
+ 'remove_prefixes',
+ kwargs)
+
+ def _subnetpool_prefix_operation(self, id, operation, op_body):
+ uri = self.get_uri("subnetpools")
+ op_prefix_uri = '%s/%s/%s' % (uri, id, operation)
+ body = jsonutils.dumps(op_body)
+ resp, body = self.put(op_prefix_uri, body)
+ body = jsonutils.loads(body)
+ self.expected_success(200, resp.status)
+ return service_client.ResponseBody(resp, body)
+
# Common methods that are hard to automate
def create_bulk_network(self, names, shared=False):
network_list = [{'name': name, 'shared': shared} for name in names]
@@ -938,6 +955,55 @@
body = jsonutils.loads(resp_body)
return service_client.ResponseBody(put_resp, body)
+ def create_port_forwarding(self, fip_id, internal_port_id,
+ internal_port, external_port,
+ internal_ip_address=None, protocol='tcp'):
+ post_body = {'port_forwarding': {
+ 'protocol': protocol,
+ 'internal_port_id': internal_port_id,
+ 'internal_port': int(internal_port),
+ 'external_port': int(external_port)}}
+ if internal_ip_address:
+ post_body['port_forwarding']['internal_ip_address'] = (
+ internal_ip_address)
+ body = jsonutils.dumps(post_body)
+ uri = '%s/floatingips/%s/port_forwardings' % (self.uri_prefix, fip_id)
+ resp, body = self.post(uri, body)
+ self.expected_success(201, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def get_port_forwarding(self, fip_id, pf_id):
+ uri = '%s/floatingips/%s/port_forwardings/%s' % (self.uri_prefix,
+ fip_id, pf_id)
+ get_resp, get_resp_body = self.get(uri)
+ self.expected_success(200, get_resp.status)
+ body = jsonutils.loads(get_resp_body)
+ return service_client.ResponseBody(get_resp, body)
+
+ def list_port_forwardings(self, fip_id):
+ uri = '%s/floatingips/%s/port_forwardings' % (self.uri_prefix, fip_id)
+ resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def update_port_forwarding(self, fip_id, pf_id, **kwargs):
+ uri = '%s/floatingips/%s/port_forwardings/%s' % (self.uri_prefix,
+ fip_id, pf_id)
+ put_body = jsonutils.dumps({'port_forwarding': kwargs})
+ put_resp, resp_body = self.put(uri, put_body)
+ self.expected_success(200, put_resp.status)
+ body = jsonutils.loads(resp_body)
+ return service_client.ResponseBody(put_resp, body)
+
+ def delete_port_forwarding(self, fip_id, pf_id):
+ uri = '%s/floatingips/%s/port_forwardings/%s' % (self.uri_prefix,
+ fip_id, pf_id)
+ resp, body = self.delete(uri)
+ self.expected_success(204, resp.status)
+ service_client.ResponseBody(resp, body)
+
def create_network_keystone_v3(self, name, project_id, tenant_id=None):
uri = '%s/networks' % self.uri_prefix
post_data = {
diff --git a/neutron_tempest_plugin/sfc/tests/api/base.py b/neutron_tempest_plugin/sfc/tests/api/base.py
index 732e2dc..606aed6 100644
--- a/neutron_tempest_plugin/sfc/tests/api/base.py
+++ b/neutron_tempest_plugin/sfc/tests/api/base.py
@@ -18,17 +18,31 @@
import netaddr
from tempest.api.network import base
from tempest.common import utils
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.sfc.tests import flowclassifier_client
from neutron_tempest_plugin.sfc.tests import sfc_client
+CONF = config.CONF
+
class BaseFlowClassifierTest(
flowclassifier_client.FlowClassifierClientMixin,
base.BaseAdminNetworkTest
):
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseFlowClassifierTest, cls).skip_checks()
+ msg = None
+ if not CONF.sfc.run_sfc_tests:
+ msg = ("Running of SFC related tests is disabled in "
+ "plugin configuration.")
+ if msg:
+ raise cls.skipException(msg)
+
@classmethod
def resource_setup(cls):
super(BaseFlowClassifierTest, cls).resource_setup()
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/base.py b/neutron_tempest_plugin/sfc/tests/scenario/base.py
index d4cff18..44b5cd2 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/base.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/base.py
@@ -30,6 +30,17 @@
sfc_client.SfcClientMixin,
manager.NetworkScenarioTest
):
+
+ @classmethod
+ def skip_checks(cls):
+ super(SfcScenarioTest, cls).skip_checks()
+ msg = None
+ if not CONF.sfc.run_sfc_tests:
+ msg = ("Running of SFC related tests is disabled in "
+ "plugin configuration.")
+ if msg:
+ raise cls.skipException(msg)
+
def _check_connectivity(
self, source_ip, destination_ip, routes=None,
username=None, private_key=None
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 66d6a9e..770396a 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -52,7 +52,6 @@
master_doc = 'index'
# General information about the project.
-project = u'Neutron Tempest Plugin Release Notes'
copyright = u'2017, Neutron Tempest Plugin Developers'
# openstackdocstheme options
@@ -60,16 +59,6 @@
bug_project = 'neutron'
bug_tag = ''
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-# The full version, including alpha/beta/rc tags.
-release = ''
-# The short X.Y version.
-version = ''
-
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
@@ -196,17 +185,6 @@
# -- Options for LaTeX output ---------------------------------------------
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 5648f19..7486090 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -1,6 +1,6 @@
-======================================
- Neutron Tempest Plugin Release Notes
-======================================
+====================================
+Neutron Tempest Plugin Release Notes
+====================================
.. toctree::
:maxdepth: 1
diff --git a/test-requirements.txt b/test-requirements.txt
index c0546cf..8b251f6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,10 +7,11 @@
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3
python-subunit>=1.0.0 # Apache-2.0/BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
+sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
-openstackdocstheme>=1.18.1 # Apache-2.0
+openstackdocstheme>=1.20.0 # Apache-2.0
# releasenotes
reno>=2.5.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 6f8cd38..daf728a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -63,8 +63,10 @@
ignore = E125,E126,E128,E129,N530
# H106: Don't put vim configuration in source files
# H203: Use assertIs(Not)None to check for None
+# H204: Use assert(Not)Equal to check for equality
+# H205: Use assert(Greater|Less)(Equal) for comparison
# H904: Delay string interpolations at logging calls
-enable-extensions = H106,H203,H904
+enable-extensions = H106,H203,H204,H205,H904
show-source = true
exclude = ./.*,build,dist,doc,*egg*,releasenotes
import-order-style = pep8