Migrate networking-sfc tests to neutron-tempest-plugin
As discussed in the neutron_ci meeting [1] the QA team would like to
move the tempest tests for the stadium projects from their repos to
repos specific to being tempest plugins. This is the first part of a
two stage move, by copying over the tempest tests to the
neutron-tempest-plugin repo [2] rather than spawning new repos to be
separate.
Updated some uuids to fix idempotent ids
[1] http://eavesdrop.openstack.org/meetings/neutron_ci/2019/neutron_ci.2019-03-12-16.01.log.html#l-94
[2] https://etherpad.openstack.org/p/neutron_stadium_move_to_tempest_plugin_repo
Change-Id: I80ff2daac44bd3a4ee179c7a6cf1d62a8fd2004c
diff --git a/.zuul.yaml b/.zuul.yaml
index 3e2318c..9a5ac25 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -690,6 +690,30 @@
vars:
branch_override: stable/stein
+- job:
+ name: neutron-tempest-plugin-sfc
+ parent: neutron-tempest-plugin
+ timeout: 10800
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/networking-sfc
+ - openstack/neutron
+ - openstack/neutron-tempest-plugin
+ - openstack/tempest
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.sfc
+ tox_envlist: all-plugin
+ devstack_plugins:
+ networking-sfc: https://opendev.org/openstack/networking-sfc
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin
+ network_api_extensions_sfc:
+ - flow_classifier
+ - sfc
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_sfc) | join(',') }}"
+ files:
+ - ^neutron_tempest_plugin/sfc/.*$
+
- project-template:
name: neutron-tempest-plugin-jobs
check:
@@ -750,3 +774,6 @@
- check-requirements
- tempest-plugin-jobs
- release-notes-jobs-python3
+ check:
+ jobs:
+ - neutron-tempest-plugin-sfc
diff --git a/neutron_tempest_plugin/sfc/__init__.py b/neutron_tempest_plugin/sfc/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/__init__.py
diff --git a/neutron_tempest_plugin/sfc/services/__init__.py b/neutron_tempest_plugin/sfc/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/services/__init__.py
diff --git a/neutron_tempest_plugin/sfc/services/flowclassifier_client.py b/neutron_tempest_plugin/sfc/services/flowclassifier_client.py
new file mode 100644
index 0000000..44f3a88
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/services/flowclassifier_client.py
@@ -0,0 +1,53 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.network import base
+
+
+class FlowClassifierClient(base.BaseNetworkClient):
+
+ def create_flowclassifier(self, **kwargs):
+ uri = '/sfc/flow_classifiers'
+ post_data = {'flow_classifier': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_flowclassifier(self, flowclassifier_id, **kwargs):
+ uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
+ post_data = {'flow_classifier': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_flowclassifier(self, flowclassifier_id, **fields):
+ uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
+ return self.show_resource(uri, **fields)
+
+ def delete_flowclassifier(self, flowclassifier_id):
+ uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
+ return self.delete_resource(uri)
+
+ def list_flowclassifiers(self, **filters):
+ uri = '/sfc/flow_classifiers'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_flowclassifier(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'flow_classifier'
diff --git a/neutron_tempest_plugin/sfc/services/sfc_client.py b/neutron_tempest_plugin/sfc/services/sfc_client.py
new file mode 100644
index 0000000..5188800
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/services/sfc_client.py
@@ -0,0 +1,165 @@
+# Copyright 2016 Futurewei. All rights reserved.
+# Copyright 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.network import base
+
+
+class PortChainClient(base.BaseNetworkClient):
+
+ def create_port_chain(self, **kwargs):
+ uri = '/sfc/port_chains'
+ post_data = {'port_chain': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_port_chain(self, pc_id, **kwargs):
+ uri = '/sfc/port_chains/%s' % pc_id
+ post_data = {'port_chain': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_port_chain(self, pc_id, **fields):
+ uri = '/sfc/port_chains/%s' % pc_id
+ return self.show_resource(uri, **fields)
+
+ def delete_port_chain(self, pc_id):
+ uri = '/sfc/port_chains/%s' % pc_id
+ return self.delete_resource(uri)
+
+ def list_port_chains(self, **filters):
+ uri = '/sfc/port_chains'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_port_chain(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'sfc'
+
+
+class PortPairGroupClient(base.BaseNetworkClient):
+
+ def create_port_pair_group(self, **kwargs):
+ uri = '/sfc/port_pair_groups'
+ post_data = {'port_pair_group': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_port_pair_group(self, pg_id, **kwargs):
+ uri = '/sfc/port_pair_groups/%s' % pg_id
+ post_data = {'port_pair_group': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_port_pair_group(self, pg_id, **fields):
+ uri = '/sfc/port_pair_groups/%s' % pg_id
+ return self.show_resource(uri, **fields)
+
+ def delete_port_pair_group(self, pg_id):
+ uri = '/sfc/port_pair_groups/%s' % pg_id
+ return self.delete_resource(uri)
+
+ def list_port_pair_groups(self, **filters):
+ uri = '/sfc/port_pair_groups'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_port_pair_group(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'sfc'
+
+
+class PortPairClient(base.BaseNetworkClient):
+
+ def create_port_pair(self, **kwargs):
+ uri = '/sfc/port_pairs'
+ post_data = {'port_pair': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_port_pair(self, pp_id, **kwargs):
+ uri = '/sfc/port_pairs/%s' % pp_id
+ post_data = {'port_pair': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_port_pair(self, pp_id, **fields):
+ uri = '/sfc/port_pairs/%s' % pp_id
+ return self.show_resource(uri, **fields)
+
+ def delete_port_pair(self, pp_id):
+ uri = '/sfc/port_pairs/%s' % pp_id
+ return self.delete_resource(uri)
+
+ def list_port_pairs(self, **filters):
+ uri = '/sfc/port_pairs'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_port_pair(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'sfc'
+
+
+class ServiceGraphClient(base.BaseNetworkClient):
+
+ def create_service_graph(self, **kwargs):
+ uri = '/sfc/service_graphs'
+ post_data = {'service_graph': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_service_graph(self, pp_id, **kwargs):
+ uri = '/sfc/service_graphs/%s' % pp_id
+ post_data = {'service_graph': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_service_graph(self, pp_id, **fields):
+ uri = '/sfc/service_graphs/%s' % pp_id
+ return self.show_resource(uri, **fields)
+
+ def delete_service_graph(self, pp_id):
+ uri = '/sfc/service_graphs/%s' % pp_id
+ return self.delete_resource(uri)
+
+ def list_service_graphs(self, **filters):
+ uri = '/sfc/service_graphs'
+ return self.list_resources(uri, **filters)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.show_service_graph(id)
+ except lib_exc.NotFound:
+ return True
+ return False
+
+ @property
+ def resource_type(self):
+ """Returns the primary type of resource this client works with."""
+ return 'sfc'
diff --git a/neutron_tempest_plugin/sfc/tests/__init__.py b/neutron_tempest_plugin/sfc/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/__init__.py
diff --git a/neutron_tempest_plugin/sfc/tests/api/__init__.py b/neutron_tempest_plugin/sfc/tests/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/api/__init__.py
diff --git a/neutron_tempest_plugin/sfc/tests/api/base.py b/neutron_tempest_plugin/sfc/tests/api/base.py
new file mode 100644
index 0000000..732e2dc
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/api/base.py
@@ -0,0 +1,194 @@
+# Copyright 2016 Futurewei. All rights reserved.
+# Copyright 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+
+import netaddr
+from tempest.api.network import base
+from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.sfc.tests import flowclassifier_client
+from neutron_tempest_plugin.sfc.tests import sfc_client
+
+
+class BaseFlowClassifierTest(
+ flowclassifier_client.FlowClassifierClientMixin,
+ base.BaseAdminNetworkTest
+):
+ @classmethod
+ def resource_setup(cls):
+ super(BaseFlowClassifierTest, cls).resource_setup()
+ if not utils.is_extension_enabled('flow_classifier', 'network'):
+ msg = "FlowClassifier Extension not enabled."
+ raise cls.skipException(msg)
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.host_id = socket.gethostname()
+
+ @classmethod
+ def resource_cleanup(cls):
+ if not utils.is_extension_enabled('flow_classifier', 'network'):
+ msg = "FlowClassifier Extension not enabled."
+ raise cls.skipException(msg)
+ super(BaseFlowClassifierTest, cls).resource_cleanup()
+
+ @classmethod
+ def _create_port(cls, network, **kwargs):
+ body = cls.admin_ports_client.create_port(
+ network_id=network['id'],
+ **kwargs)
+ port = body['port']
+ return port
+
+ def _try_create_flowclassifier(self, **kwargs):
+ if 'logical_source_port' not in kwargs:
+ port_kwargs = {"binding:host_id": self.host_id}
+ port = self._create_port(network=self.network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, port['id'])
+ kwargs['logical_source_port'] = port['id']
+ if 'source_ip_prefix' not in kwargs:
+ port_ip_prefix = str(netaddr.IPNetwork(
+ port['fixed_ips'][0]['ip_address']))
+ kwargs['source_ip_prefix'] = port_ip_prefix
+ fc = self.create_flowclassifier(**kwargs)
+ self.addCleanup(self._try_delete_flowclassifier, fc['id'])
+ return fc
+
+ def _try_delete_port(self, port_id):
+ try:
+ self.admin_ports_client.delete_port(port_id)
+ except lib_exc.NotFound:
+ pass
+ body = self.admin_ports_client.list_ports()
+ ports_list = body['ports']
+ self.assertNotIn(port_id, [n['id'] for n in ports_list])
+
+ def _try_delete_flowclassifier(self, fc_id):
+ # delete flowclassifier, if it exists
+ try:
+ self.flowclassifier_client.delete_flowclassifier(fc_id)
+ # if flowclassifier is not found, this means it was deleted
+ except lib_exc.NotFound:
+ pass
+ body = self.flowclassifier_client.list_flowclassifiers()
+ fc_list = body['flow_classifiers']
+ self.assertNotIn(fc_id, [n['id'] for n in fc_list])
+
+
+class BaseSfcTest(
+ sfc_client.SfcClientMixin, BaseFlowClassifierTest
+):
+ @classmethod
+ def resource_setup(cls):
+ super(BaseSfcTest, cls).resource_setup()
+ if not utils.is_extension_enabled('sfc', 'network'):
+ msg = "Sfc Extension not enabled."
+ raise cls.skipException(msg)
+
+ @classmethod
+ def resource_cleanup(cls):
+ if not utils.is_extension_enabled('sfc', 'network'):
+ msg = "Sfc Extension not enabled."
+ raise cls.skipException(msg)
+ super(BaseSfcTest, cls).resource_cleanup()
+
+ def _try_create_port_pair(self, **kwargs):
+ if 'ingress' not in kwargs or 'egress' not in 'kwargs':
+ router = self.admin_routers_client.create_router(
+ name=data_utils.rand_name('router-'))['router']
+ self.addCleanup(
+ self.admin_routers_client.delete_router, router['id'])
+ port_kwargs = {"binding:host_id": self.host_id}
+ port = self._create_port(
+ network=self.network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, port['id'])
+ self.admin_routers_client.add_router_interface(
+ router['id'], port_id=port['id'])
+ self.addCleanup(self.admin_routers_client.remove_router_interface,
+ router['id'],
+ port_id=port['id'])
+ if 'ingress' not in kwargs:
+ kwargs['ingress'] = port['id']
+ if 'egress' not in kwargs:
+ kwargs['egress'] = port['id']
+ pp = self.create_port_pair(**kwargs)
+ self.addCleanup(self._try_delete_port_pair, pp['id'])
+ return pp
+
+ def _try_delete_port_pair(self, pp_id):
+ # delete port pair, if it exists
+ try:
+ self.portpair_client.delete_port_pair(pp_id)
+ # if port pair is not found, this means it was deleted
+ except lib_exc.NotFound:
+ pass
+ body = self.portpair_client.list_port_pairs()
+ pp_list = body['port_pairs']
+ self.assertNotIn(pp_id, [n['id'] for n in pp_list])
+
+ def _try_create_port_pair_group(self, **kwargs):
+ pg = self.create_port_pair_group(
+ **kwargs)
+ self.addCleanup(self._try_delete_port_pair_group, pg['id'])
+ # self.pgs.append(pg)
+ return pg
+
+ def _try_delete_port_pair_group(self, pg_id):
+ # delete port pair group, if it exists
+ try:
+ self.portpairgroup_client.delete_port_pair_group(pg_id)
+ # if port pair group is not found, this means it was deleted
+ except lib_exc.NotFound:
+ pass
+ body = self.portpairgroup_client.list_port_pair_groups()
+ pg_list = body['port_pair_groups']
+ self.assertNotIn(pg_id, [n['id'] for n in pg_list])
+
+ def _try_create_port_chain(self, **kwargs):
+ pc = self.create_port_chain(
+ **kwargs)
+ self.addCleanup(self._try_delete_port_chain, pc['id'])
+ # self.pcs.append(pc)
+ return pc
+
+ def _try_delete_port_chain(self, pc_id):
+ # delete port chain, if it exists
+ try:
+ self.portchain_client.delete_port_chain(pc_id)
+ # if port chain is not found, this means it was deleted
+ except lib_exc.NotFound:
+ pass
+ body = self.portchain_client.list_port_chains()
+ pc_list = body['port_chains']
+ self.assertNotIn(pc_id, [n['id'] for n in pc_list])
+
+ def _try_create_service_graph(self, **kwargs):
+ graph = self.create_service_graph(
+ **kwargs)
+ self.addCleanup(self._try_delete_service_graph, graph['id'])
+ return graph
+
+ def _try_delete_service_graph(self, graph_id):
+ # delete Service Graph, if it exists
+ try:
+ self.sfcgraph_client.delete_service_graph(graph_id)
+ # if Service Graph is not found, this means it was deleted
+ except lib_exc.NotFound:
+ pass
+ body = self.sfcgraph_client.list_service_graphs()
+ graph_list = body['service_graphs']
+ self.assertNotIn(graph_id, [n['id'] for n in graph_list])
diff --git a/neutron_tempest_plugin/sfc/tests/api/test_flowclassifier_extensions.py b/neutron_tempest_plugin/sfc/tests/api/test_flowclassifier_extensions.py
new file mode 100644
index 0000000..1b44383
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/api/test_flowclassifier_extensions.py
@@ -0,0 +1,91 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.sfc.tests.api import base
+
+
+class FlowClassifierExtensionTestJSON(base.BaseFlowClassifierTest):
+ """Tests the following operations in the Neutron API:
+
+ List flowclassifiers
+ Create flowclassifier
+ Update flowclassifier
+ Delete flowclassifier
+ Show flowclassifier
+ """
+
+ @decorators.idempotent_id('1b84cf01-9c09-4ce7-bc72-b15e39076468')
+ def test_list_flowclassifier(self):
+ # List flow classifiers
+ fc = self._try_create_flowclassifier()
+ fcs = self.flowclassifier_client.list_flowclassifiers()
+ self.assertIn((
+ fc['id'],
+ fc['name'],
+ fc['source_ip_prefix'],
+ fc['logical_source_port']
+ ), [(
+ m['id'],
+ m['name'],
+ m['source_ip_prefix'],
+ m['logical_source_port'],
+ ) for m in fcs['flow_classifiers']])
+
+ @decorators.idempotent_id('b2ed2a37-fc64-4be5-819b-9cf2a13db70b')
+ def test_list_flowclassifier_with_logical_destination_port(self):
+ # List flow classifiers with logical_destination_port
+ fc = self._try_create_flowclassifier()
+ fcs = self.flowclassifier_client.list_flowclassifiers()
+ self.assertIn((
+ fc['id'],
+ fc['name'],
+ fc['source_ip_prefix'],
+ fc['destination_ip_prefix'],
+ fc['logical_source_port'],
+ fc['logical_destination_port']
+ ), [(
+ m['id'],
+ m['name'],
+ m['source_ip_prefix'],
+ m['destination_ip_prefix'],
+ m['logical_source_port'],
+ m['logical_destination_port']
+ ) for m in fcs['flow_classifiers']])
+
+ @decorators.idempotent_id('563564f7-7077-4f5e-8cdc-51f37ae5a2b9')
+ def test_update_flowclassifier(self):
+ # Create flow classifier
+ name1 = data_utils.rand_name('test')
+ fc = self._try_create_flowclassifier(
+ name=name1
+ )
+ fc_id = fc['id']
+
+ # Update flow classifier
+ name2 = data_utils.rand_name('test')
+ body = self.flowclassifier_client.update_flowclassifier(
+ fc_id, name=name2)
+ self.assertEqual(body['flow_classifier']['name'], name2)
+
+ @decorators.idempotent_id('3ff8c08e-26ff-4034-ae48-810ed213a998')
+ def test_show_flowclassifier(self):
+ # show a created flow classifier
+ created = self._try_create_flowclassifier()
+ fc = self.flowclassifier_client.show_flowclassifier(
+ created['id'])
+ for key, value in fc['flow_classifier'].items():
+ self.assertEqual(created[key], value)
diff --git a/neutron_tempest_plugin/sfc/tests/api/test_sfc_extensions.py b/neutron_tempest_plugin/sfc/tests/api/test_sfc_extensions.py
new file mode 100644
index 0000000..7ccd98d
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/api/test_sfc_extensions.py
@@ -0,0 +1,413 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.sfc.tests.api import base
+
+
+class SfcExtensionTestJSON(base.BaseSfcTest):
+ """Tests the following operations in the Neutron API:
+
+ List port chains
+ Create port chain
+ Update port chain
+ Delete port chain
+ Show port chain
+ List port pair groups
+ Create port pair group
+ Update port pair group
+ Delete port pair group
+ Show port pair groups
+ List port pairs
+ Create port pair
+ Update port pair
+ Delete port pair
+ Show port pair
+ List Service Graphs
+ Create Service Graph
+ Update Service Graph
+ Delete Service Graph
+ Show Service Graphs
+ """
+ @decorators.idempotent_id('1a6067bf-b967-42a7-8b62-158a9ec185b4')
+ def test_create_port_pair_different_ingress_egress(self):
+ ingress_network = self.create_network()
+ self.addCleanup(self.networks_client.delete_network,
+ ingress_network['id'])
+ cidr = netaddr.IPNetwork('192.168.1.0/24')
+ allocation_pools = {'allocation_pools': [{'start': str(cidr[2]),
+ 'end': str(cidr[-2])}]}
+ ingress_subnet = self.create_subnet(ingress_network, cidr=cidr,
+ mask_bits=cidr.prefixlen,
+ **allocation_pools)
+ self.addCleanup(self.subnets_client.delete_subnet,
+ ingress_subnet['id'])
+ egress_network = self.create_network()
+ self.addCleanup(self.networks_client.delete_network,
+ egress_network['id'])
+ cidr = netaddr.IPNetwork('192.168.2.0/24')
+ allocation_pools = {'allocation_pools': [{'start': str(cidr[2]),
+ 'end': str(cidr[-2])}]}
+ egress_subnet = self.create_subnet(egress_network, cidr=cidr,
+ mask_bits=cidr.prefixlen,
+ **allocation_pools)
+ self.addCleanup(self.subnets_client.delete_subnet,
+ egress_subnet['id'])
+ router = self.admin_routers_client.create_router(
+ name=data_utils.rand_name('router-'))['router']
+ self.addCleanup(self.admin_routers_client.delete_router, router['id'])
+ port_kwargs = {"binding:host_id": self.host_id}
+ ingress = self._create_port(
+ network=ingress_network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, ingress['id'])
+ self.admin_routers_client.add_router_interface(
+ router['id'], port_id=ingress['id'])
+ self.addCleanup(self.admin_routers_client.remove_router_interface,
+ router['id'],
+ port_id=ingress['id'])
+ egress = self._create_port(
+ network=egress_network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, egress['id'])
+ self.admin_routers_client.add_router_interface(
+ router['id'], port_id=egress['id'])
+ self.addCleanup(self.admin_routers_client.remove_router_interface,
+ router['id'],
+ port_id=egress['id'])
+ pp = self._try_create_port_pair(
+ ingress=ingress['id'],
+ egress=egress['id'])
+ pps = self.portpair_client.list_port_pairs()
+ self.assertIn((
+ pp['id'],
+ pp['name'],
+ pp['ingress'],
+ pp['egress']
+ ), [(
+ m['id'],
+ m['name'],
+ m['ingress'],
+ m['egress'],
+ ) for m in pps['port_pairs']])
+
+ @decorators.idempotent_id('264cc4b8-aa17-4cea-88bf-26400e9751d9')
+ def test_list_port_pair(self):
+ # List port pairs
+ pp = self._try_create_port_pair()
+ pps = self.portpair_client.list_port_pairs()
+ self.assertIn((
+ pp['id'],
+ pp['name'],
+ pp['ingress'],
+ pp['egress']
+ ), [(
+ m['id'],
+ m['name'],
+ m['ingress'],
+ m['egress'],
+ ) for m in pps['port_pairs']])
+
+ @decorators.idempotent_id('83018ad7-3666-4396-bf3a-288a2b6a0e7c')
+ def test_show_port_pair(self):
+ # show a created port pair
+ created = self._try_create_port_pair()
+ pp = self.portpair_client.show_port_pair(
+ created['id'])
+ for key, value in pp['port_pair'].items():
+ self.assertEqual(created[key], value)
+
+ @decorators.idempotent_id('69d21fa4-bdd5-4142-b1cc-6578037f605a')
+ def test_update_port_pair(self):
+ # Create port pair
+ name1 = data_utils.rand_name('test')
+ pp = self._try_create_port_pair(
+ name=name1
+ )
+ pp_id = pp['id']
+
+ # Update port pair
+ name2 = data_utils.rand_name('test')
+ body = self.portpair_client.update_port_pair(
+ pp_id, name=name2)
+ self.assertEqual(body['port_pair']['name'], name2)
+
+ @decorators.idempotent_id('4fff9a4a-a98a-42bd-b3f4-483b93e6f297')
+ def test_create_port_pair_group_empty_port_pairs(self):
+ pg = self._try_create_port_pair_group(
+ port_pairs=[])
+ pgs = self.portpairgroup_client.list_port_pair_groups()
+ self.assertIn((
+ pg['id'],
+ pg['name'],
+ set(pg['port_pairs']),
+ ), [(
+ m['id'],
+ m['name'],
+ set(m['port_pairs'])
+ ) for m in pgs['port_pair_groups']])
+
+ @decorators.idempotent_id('1a1c98a0-ff54-4647-a798-011e902825fa')
+ def test_create_port_pair_group_multi_port_pairs(self):
+ pp1 = self._try_create_port_pair()
+ pp2 = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(
+ port_pairs=[pp1['id'], pp2['id']])
+ pgs = self.portpairgroup_client.list_port_pair_groups()
+ self.assertIn((
+ pg['id'],
+ pg['name'],
+ set(pg['port_pairs']),
+ ), [(
+ m['id'],
+ m['name'],
+ set(m['port_pairs'])
+ ) for m in pgs['port_pair_groups']])
+
+ @decorators.idempotent_id('e7d432c4-a7b4-444b-88cc-f420c5c1c29e')
+ def test_list_port_pair_group(self):
+ # List port pair groups
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ pgs = self.portpairgroup_client.list_port_pair_groups()
+ self.assertIn((
+ pg['id'],
+ pg['name'],
+ pg['port_pairs'],
+ ), [(
+ m['id'],
+ m['name'],
+ m['port_pairs']
+ ) for m in pgs['port_pair_groups']])
+
+ @decorators.idempotent_id('f12faa84-8dcb-4fbb-b03a-9ab05040a350')
+ def test_show_port_pair_group(self):
+ # show a created port pair group
+ pp = self._try_create_port_pair()
+ created = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ pg = self.portpairgroup_client.show_port_pair_group(
+ created['id'])
+ for key, value in pg['port_pair_group'].items():
+ self.assertEqual(created[key], value)
+
+ @decorators.idempotent_id('8991c2ef-71ba-4033-9037-5c8bf52a0c88')
+ def test_update_port_pair_group(self):
+ # Create port pair group
+ pp = self._try_create_port_pair()
+ name1 = data_utils.rand_name('test')
+ pg = self._try_create_port_pair_group(
+ name=name1, port_pairs=[pp['id']]
+ )
+ pg_id = pg['id']
+
+ # Update port pair group
+ name2 = data_utils.rand_name('test')
+ body = self.portpairgroup_client.update_port_pair_group(
+ pg_id, name=name2)
+ self.assertEqual(body['port_pair_group']['name'], name2)
+
+ @decorators.idempotent_id('d93d7ec3-f12e-4fad-b82b-759d358ff044')
+ def test_create_port_chain_empty_flow_classifiers(self):
+ # Create port chains
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[])
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ pc['port_pair_groups'],
+ pc['flow_classifiers']
+ ), [(
+ m['id'],
+ m['name'],
+ m['port_pair_groups'],
+ m['flow_classifiers']
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('0c5ac396-6027-4bd1-af21-79fda6df9b77')
+ def test_create_port_chain_multi_flowclassifiers(self):
+ # Create port chains
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc1 = self._try_create_flowclassifier()
+ fc2 = self._try_create_flowclassifier()
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[fc1['id'], fc2['id']])
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ set(pc['flow_classifiers'])
+ ), [(
+ m['id'],
+ m['name'],
+ set(m['flow_classifiers'])
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('81f0faba-49ae-435a-8454-566c1e0a929e')
+ def test_create_port_chain_flowclassifiers_symmetric(self):
+ # Create symmetric port chain
+ router = self.admin_routers_client.create_router(
+ name=data_utils.rand_name('router-'))['router']
+ self.addCleanup(
+ self.admin_routers_client.delete_router, router['id'])
+ port_kwargs = {"binding:host_id": self.host_id}
+ dst_port = self._create_port(
+ network=self.network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, dst_port['id'])
+ self.admin_routers_client.add_router_interface(
+ router['id'], port_id=dst_port['id'])
+ self.addCleanup(self.admin_routers_client.remove_router_interface,
+ router['id'],
+ port_id=dst_port['id'])
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc = self._try_create_flowclassifier(
+ logical_destination_port=dst_port['id'])
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[fc['id']],
+ chain_parameters={'symmetric': True})
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ pc['chain_parameters'],
+ set(pc['flow_classifiers'])
+ ), [(
+ m['id'],
+ m['name'],
+ m['chain_parameters'],
+ set(m['flow_classifiers'])
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('3f82c78f-e119-449f-bf6c-a964db45be3a')
+ def test_create_port_chain_multi_port_pair_groups(self):
+ # Create port chain
+ pp1 = self._try_create_port_pair()
+ pg1 = self._try_create_port_pair_group(port_pairs=[pp1['id']])
+ pp2 = self._try_create_port_pair()
+ pg2 = self._try_create_port_pair_group(port_pairs=[pp2['id']])
+ fc = self._try_create_flowclassifier()
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg1['id'], pg2['id']],
+ flow_classifiers=[fc['id']])
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ pc['port_pair_groups'],
+ ), [(
+ m['id'],
+ m['name'],
+ m['port_pair_groups']
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('144629ec-7538-4595-93ea-89e28ba50724')
+ def test_create_port_chain_port_pair_group_symmetric(self):
+ # Create symmetric port chain with port_pair_group
+ router = self.admin_routers_client.create_router(
+ name=data_utils.rand_name('router-'))['router']
+ self.addCleanup(
+ self.admin_routers_client.delete_router, router['id'])
+ port_kwargs = {"binding:host_id": self.host_id}
+ dst_port = self._create_port(
+ network=self.network, **port_kwargs)
+ self.addCleanup(self._try_delete_port, dst_port['id'])
+ self.admin_routers_client.add_router_interface(
+ router['id'], port_id=dst_port['id'])
+ self.addCleanup(self.admin_routers_client.remove_router_interface,
+ router['id'],
+ port_id=dst_port['id'])
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc = self._try_create_flowclassifier(
+ logical_destination_port=dst_port['id'])
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[fc['id']],
+ chain_parameters={'symmetric': True})
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ pc['port_pair_groups'],
+ pc['chain_parameters']
+ ), [(
+ m['id'],
+ m['name'],
+ m['port_pair_groups'],
+ m['chain_parameters']
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('83cfceba-f9d9-41e2-b27f-f919d8ff83a9')
+ def test_list_port_chain(self):
+ # List port chains
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc = self._try_create_flowclassifier()
+ pc = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[fc['id']])
+ pcs = self.portchain_client.list_port_chains()
+ self.assertIn((
+ pc['id'],
+ pc['name'],
+ pc['port_pair_groups'],
+ set(pc['flow_classifiers'])
+ ), [(
+ m['id'],
+ m['name'],
+ m['port_pair_groups'],
+ set(m['flow_classifiers'])
+ ) for m in pcs['port_chains']])
+
+ @decorators.idempotent_id('0433ca11-dbc9-448d-8433-0df252e3d0cd')
+ def test_show_port_chain(self):
+ # show a created port chain
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc = self._try_create_flowclassifier()
+ created = self._try_create_port_chain(
+ port_pair_groups=[pg['id']],
+ flow_classifiers=[fc['id']])
+ pc = self.portchain_client.show_port_chain(
+ created['id'])
+ for key, value in pc['port_chain'].items():
+ self.assertEqual(created[key], value)
+
+ @decorators.idempotent_id('4ad641d3-823f-4b25-9438-68970593253d')
+ def test_update_port_chain(self):
+ # Create port chain
+ pp = self._try_create_port_pair()
+ pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
+ fc = self._try_create_flowclassifier()
+ name1 = data_utils.rand_name('test')
+ pc = self._try_create_port_chain(
+ name=name1, port_pair_groups=[pg['id']],
+ flow_classifiers=[fc['id']]
+ )
+ pc_id = pc['id']
+
+ # Update port chain
+ name2 = data_utils.rand_name('test')
+ body = self.portchain_client.update_port_chain(
+ pc_id, name=name2)
+ self.assertEqual(body['port_chain']['name'], name2)
diff --git a/neutron_tempest_plugin/sfc/tests/flowclassifier_client.py b/neutron_tempest_plugin/sfc/tests/flowclassifier_client.py
new file mode 100644
index 0000000..8370ef8
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/flowclassifier_client.py
@@ -0,0 +1,45 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+
+from neutron_tempest_plugin.sfc.services import flowclassifier_client
+
+CONF = config.CONF
+
+
+class FlowClassifierClientMixin(object):
+
+ @classmethod
+ def resource_setup(cls):
+ super(FlowClassifierClientMixin, cls).resource_setup()
+ manager = cls.os_admin
+ cls.flowclassifier_client = (
+ flowclassifier_client.FlowClassifierClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params
+ )
+ )
+
+ @classmethod
+ def create_flowclassifier(cls, **kwargs):
+ body = cls.flowclassifier_client.create_flowclassifier(
+ **kwargs)
+ fc = body['flow_classifier']
+ return fc
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/__init__.py b/neutron_tempest_plugin/sfc/tests/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/scenario/__init__.py
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/base.py b/neutron_tempest_plugin/sfc/tests/scenario/base.py
new file mode 100644
index 0000000..d4cff18
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/scenario/base.py
@@ -0,0 +1,66 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from tempest import config
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.sfc.tests import flowclassifier_client
+from neutron_tempest_plugin.sfc.tests.scenario import manager
+from neutron_tempest_plugin.sfc.tests import sfc_client
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class SfcScenarioTest(
+ flowclassifier_client.FlowClassifierClientMixin,
+ sfc_client.SfcClientMixin,
+ manager.NetworkScenarioTest
+):
+ def _check_connectivity(
+ self, source_ip, destination_ip, routes=None,
+ username=None, private_key=None
+ ):
+ msg = "ip address %r is reachable" % source_ip
+ ok = self.ping_ip_address(source_ip, should_succeed=True)
+ self.assertTrue(ok, msg=msg)
+ client = self.get_remote_client(
+ source_ip, username=username, private_key=private_key)
+ cmd = 'traceroute -n -I %s' % destination_ip
+ LOG.debug('exec command on %s: %s', source_ip, cmd)
+ try:
+ result = client.exec_command(cmd)
+ LOG.debug(
+ 'traceroute from %s to %s:\n%s',
+ source_ip, destination_ip, result)
+ lines = result.split('\n')
+ lines = [line for line in lines if line]
+ lines = lines[1:-1]
+ if len(lines) != len(routes):
+ LOG.error('length mismatch:\n%s\nvs\n%s', lines, routes)
+ self.assertEqual(len(lines), len(routes))
+ for line, route_list in zip(lines, routes):
+ found = any([route in line for route in route_list])
+ if not found:
+ LOG.error('did not found any route %s in %s',
+ route_list, line)
+ self.assertTrue(found)
+ except lib_exc.SSHExecCommandFailed as e:
+ LOG.exception(e)
+ raise
+ except lib_exc.SSHTimeout as e:
+ LOG.exception(e)
+ raise
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/manager.py b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
new file mode 100644
index 0000000..e2571ab
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
@@ -0,0 +1,875 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+
+import netaddr
+from oslo_log import log
+from oslo_utils import netutils
+
+from tempest.common import compute
+from tempest.common.utils.linux import remote_client
+from tempest.common.utils import net_utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+import tempest.test
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class ScenarioTest(tempest.test.BaseTestCase):
+ """Base class for scenario tests. Uses tempest own clients. """
+
+ credentials = ['primary']
+
+ @classmethod
+ def setup_clients(cls):
+ super(ScenarioTest, cls).setup_clients()
+ # Clients (in alphabetical order)
+ cls.keypairs_client = cls.os_primary.keypairs_client
+ cls.servers_client = cls.os_primary.servers_client
+ # Neutron network client
+ cls.networks_client = cls.os_primary.networks_client
+ cls.ports_client = cls.os_primary.ports_client
+ cls.routers_client = cls.os_primary.routers_client
+ cls.subnets_client = cls.os_primary.subnets_client
+ cls.floating_ips_client = cls.os_primary.floating_ips_client
+ cls.security_groups_client = cls.os_primary.security_groups_client
+ cls.security_group_rules_client = (
+ cls.os_primary.security_group_rules_client)
+
+ # ## Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
+
+ def _create_port(self, network_id, client=None, namestart='port-quotatest',
+ **kwargs):
+ if not client:
+ client = self.ports_client
+ name = data_utils.rand_name(namestart)
+ result = client.create_port(
+ name=name,
+ network_id=network_id,
+ **kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
+ port = result['port']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_port, port['id'])
+ return port
+
+ def create_keypair(self, client=None):
+ if not client:
+ client = self.keypairs_client
+ name = data_utils.rand_name(self.__class__.__name__)
+ # We don't need to create a keypair by pubkey in scenario
+ body = client.create_keypair(name=name)
+ self.addCleanup(client.delete_keypair, name)
+ return body['keypair']
+
+ def create_server(self, name=None, image_id=None, flavor=None,
+ validatable=False, wait_until='ACTIVE',
+ clients=None, **kwargs):
+ """Wrapper utility that returns a test server.
+
+ This wrapper utility calls the common create test server and
+ returns a test server. The purpose of this wrapper is to minimize
+ the impact on the code of the tests already using this
+ function.
+ """
+
+ # NOTE(jlanoux): As a first step, ssh checks in the scenario
+ # tests need to be run regardless of the run_validation and
+ # validatable parameters and thus until the ssh validation job
+ # becomes voting in CI. The test resources management and IP
+ # association are taken care of in the scenario tests.
+ # Therefore, the validatable parameter is set to false in all
+ # those tests. In this way create_server just return a standard
+ # server and the scenario tests always perform ssh checks.
+
+ # Needed for the cross_tenant_traffic test:
+ if clients is None:
+ clients = self.os_primary
+
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__ + "-server")
+
+ vnic_type = CONF.network.port_vnic_type
+
+ # If vnic_type is configured create port for
+ # every network
+ if vnic_type:
+ ports = []
+
+ create_port_body = {'binding:vnic_type': vnic_type,
+ 'namestart': 'port-smoke'}
+ if kwargs:
+ # Convert security group names to security group ids
+ # to pass to create_port
+ if 'security_groups' in kwargs:
+ security_groups = \
+ clients.security_groups_client.list_security_groups(
+ ).get('security_groups')
+ sec_dict = dict([(s['name'], s['id'])
+ for s in security_groups])
+
+ sec_groups_names = [s['name'] for s in kwargs.pop(
+ 'security_groups')]
+ security_groups_ids = [sec_dict[s]
+ for s in sec_groups_names]
+
+ if security_groups_ids:
+ create_port_body[
+ 'security_groups'] = security_groups_ids
+ networks = kwargs.pop('networks', [])
+ else:
+ networks = []
+
+ # If there are no networks passed to us we look up
+ # for the project's private networks and create a port.
+ # The same behaviour as we would expect when passing
+ # the call to the clients with no networks
+ if not networks:
+ networks = clients.networks_client.list_networks(
+ **{'router:external': False, 'fields': 'id'})['networks']
+
+ # It's net['uuid'] if networks come from kwargs
+ # and net['id'] if they come from
+ # clients.networks_client.list_networks
+ for net in networks:
+ net_id = net.get('uuid', net.get('id'))
+ if 'port' not in net:
+ port = self._create_port(network_id=net_id,
+ client=clients.ports_client,
+ **create_port_body)
+ ports.append({'port': port['id']})
+ else:
+ ports.append({'port': net['port']})
+ if ports:
+ kwargs['networks'] = ports
+ self.ports = ports
+
+ tenant_network = self.get_tenant_network()
+
+ body, _ = compute.create_test_server(
+ clients,
+ tenant_network=tenant_network,
+ wait_until=wait_until,
+ name=name, flavor=flavor,
+ image_id=image_id, **kwargs)
+
+ self.addCleanup(waiters.wait_for_server_termination,
+ clients.servers_client, body['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ clients.servers_client.delete_server, body['id'])
+ server = clients.servers_client.show_server(body['id'])['server']
+ return server
+
+ def get_remote_client(self, ip_address, username=None, private_key=None,
+ server=None):
+ """Get a SSH client to a remote server
+
+ @param ip_address the server floating or fixed IP address to use
+ for ssh validation
+ @param username name of the Linux account on the remote server
+ @param private_key the SSH private key to use
+ @param server: server dict, used for debugging purposes
+ @return a RemoteClient object
+ """
+
+ if username is None:
+ username = CONF.validation.image_ssh_user
+ # Set this with 'keypair' or others to log in with keypair or
+ # username/password.
+ if CONF.validation.auth_method == 'keypair':
+ password = None
+ if private_key is None:
+ private_key = self.keypair['private_key']
+ else:
+ password = CONF.validation.image_ssh_password
+ private_key = None
+ linux_client = remote_client.RemoteClient(
+ ip_address, username, pkey=private_key, password=password,
+ server=server, servers_client=self.servers_client)
+ linux_client.validate_authentication()
+ return linux_client
+
+ def _log_console_output(self, servers=None, client=None):
+ if not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot log')
+ return
+ client = client or self.servers_client
+ if not servers:
+ servers = client.list_servers()
+ servers = servers['servers']
+ for server in servers:
+ try:
+ console_output = client.get_console_output(
+ server['id'])['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server['id'], console_output)
+ except lib_exc.NotFound:
+ LOG.debug("Server %s disappeared(deleted) while looking "
+ "for the console log", server['id'])
+
+ def _log_net_info(self, exc):
+ # network debug is called as part of ssh init
+ if not isinstance(exc, lib_exc.SSHTimeout):
+ LOG.debug('Network information on a devstack host')
+
+ def ping_ip_address(self, ip_address, should_succeed=True,
+ ping_timeout=None, mtu=None):
+ timeout = ping_timeout or CONF.validation.ping_timeout
+ cmd = ['ping', '-c1', '-w1']
+
+ if mtu:
+ cmd += [
+ # don't fragment
+ '-M', 'do',
+ # ping receives just the size of ICMP payload
+ '-s', str(net_utils.get_ping_payload_size(mtu, 4))
+ ]
+ cmd.append(ip_address)
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.communicate()
+
+ return (proc.returncode == 0) == should_succeed
+
+ caller = test_utils.find_test_caller()
+ LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
+ ' expected result is %(should_succeed)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'should_succeed':
+ 'reachable' if should_succeed else 'unreachable'
+ })
+ result = test_utils.call_until_true(ping, timeout, 1)
+ LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
+ 'ping result is %(result)s', {
+ 'caller': caller, 'ip': ip_address, 'timeout': timeout,
+ 'result': 'expected' if result else 'unexpected'
+ })
+ return result
+
+ def check_vm_connectivity(self, ip_address,
+ username=None,
+ private_key=None,
+ should_connect=True,
+ mtu=None):
+ """Check server connectivity
+
+ :param ip_address: server to test against
+ :param username: server's ssh username
+ :param private_key: server's ssh private key to be used
+ :param should_connect: True/False indicates positive/negative test
+ positive - attempt ping and ssh
+ negative - attempt ping and fail if succeed
+ :param mtu: network MTU to use for connectivity validation
+
+ :raises: AssertError if the result of the connectivity check does
+ not match the value of the should_connect param
+ """
+ if should_connect:
+ msg = "Timed out waiting for %s to become reachable" % ip_address
+ else:
+ msg = "ip address %s is reachable" % ip_address
+ self.assertTrue(self.ping_ip_address(ip_address,
+ should_succeed=should_connect,
+ mtu=mtu),
+ msg=msg)
+ if should_connect:
+ # no need to check ssh for negative connectivity
+ self.get_remote_client(ip_address, username, private_key)
+
+ def check_public_network_connectivity(self, ip_address, username,
+ private_key, should_connect=True,
+ msg=None, servers=None, mtu=None):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
+ try:
+ self.check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect,
+ mtu=mtu)
+ except Exception:
+ ex_msg = 'Public network connectivity check failed'
+ if msg:
+ ex_msg += ": " + msg
+ LOG.exception(ex_msg)
+ self._log_console_output(servers)
+ raise
+
+
+class NetworkScenarioTest(ScenarioTest):
+ """Base class for network scenario tests.
+
+ This class provide helpers for network scenario tests, using the neutron
+ API. Helpers from ancestor which use the nova network API are overridden
+ with the neutron API.
+
+ This Class also enforces using Neutron instead of novanetwork.
+ Subclassed tests will be skipped if Neutron is not enabled
+
+ """
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def skip_checks(cls):
+ super(NetworkScenarioTest, cls).skip_checks()
+ if not CONF.service_available.neutron:
+ raise cls.skipException('Neutron not available')
+
+ def _create_network(self, networks_client=None,
+ tenant_id=None,
+ namestart='network-smoke-',
+ port_security_enabled=True):
+ if not networks_client:
+ networks_client = self.networks_client
+ if not tenant_id:
+ tenant_id = networks_client.tenant_id
+ name = data_utils.rand_name(namestart)
+ network_kwargs = dict(name=name, tenant_id=tenant_id)
+ network_kwargs['port_security_enabled'] = port_security_enabled
+ result = networks_client.create_network(**network_kwargs)
+ network = result['network']
+
+ self.assertEqual(network['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ networks_client.delete_network,
+ network['id'])
+ return network
+
+ def _create_subnet(self, network, subnets_client=None,
+ routers_client=None, namestart='subnet-smoke',
+ **kwargs):
+ """Create a subnet for the given network
+
+ within the cidr block configured for tenant networks.
+ """
+ if not subnets_client:
+ subnets_client = self.subnets_client
+ if not routers_client:
+ routers_client = self.routers_client
+
+ def cidr_in_use(cidr, tenant_id):
+ """Check cidr existence
+
+ :returns: True if subnet with cidr already exist in tenant
+ False else
+ """
+ cidr_in_use = self.os_admin.subnets_client.list_subnets(
+ tenant_id=tenant_id, cidr=cidr)['subnets']
+ return len(cidr_in_use) != 0
+
+ ip_version = kwargs.pop('ip_version', 4)
+
+ if ip_version == 6:
+ tenant_cidr = netaddr.IPNetwork(
+ CONF.network.project_network_v6_cidr)
+ num_bits = CONF.network.project_network_v6_mask_bits
+ else:
+ tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ num_bits = CONF.network.project_network_mask_bits
+
+ result = None
+ str_cidr = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(num_bits):
+ str_cidr = str(subnet_cidr)
+ if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
+ continue
+
+ subnet = dict(
+ name=data_utils.rand_name(namestart),
+ network_id=network['id'],
+ tenant_id=network['tenant_id'],
+ cidr=str_cidr,
+ ip_version=ip_version,
+ **kwargs
+ )
+ try:
+ result = subnets_client.create_subnet(**subnet)
+ break
+ except lib_exc.Conflict as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+
+ subnet = result['subnet']
+ self.assertEqual(subnet['cidr'], str_cidr)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ subnets_client.delete_subnet, subnet['id'])
+
+ return subnet
+
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None):
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], fixed_ip=ip_addr)['ports']
+ # A port can have more than one IP address in some cases.
+ # If the network is dual-stack (IPv4 + IPv6), this port is associated
+ # with 2 subnets
+ p_status = ['ACTIVE']
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state.
+ # TODO(vsaienko) remove once bug: #1599836 is resolved.
+ if getattr(CONF.service_available, 'ironic', False):
+ p_status.append('DOWN')
+ port_map = [(p["id"], fxip["ip_address"])
+ for p in ports
+ for fxip in p["fixed_ips"]
+ if netutils.is_valid_ipv4(fxip["ip_address"]) and
+ p['status'] in p_status]
+ inactive = [p for p in ports if p['status'] != 'ACTIVE']
+ if inactive:
+ LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+ self.assertNotEmpty(port_map,
+ "No IPv4 addresses found in: %s" % ports)
+ self.assertEqual(len(port_map), 1,
+ "Found multiple IPv4 addresses: %s. "
+ "Unable to determine which port to target."
+ % port_map)
+ return port_map[0]
+
+ def _get_network_by_name(self, network_name):
+ net = self.os_admin.networks_client.list_networks(
+ name=network_name)['networks']
+ self.assertNotEmpty(net,
+ "Unable to get network by name: %s" % network_name)
+ return net[0]
+
+ def create_floating_ip(self, thing, external_network_id=None,
+ port_id=None, client=None):
+ """Create a floating IP and associates to a resource/port on Neutron"""
+ if not external_network_id:
+ external_network_id = CONF.network.public_network_id
+ if not client:
+ client = self.floating_ips_client
+ if not port_id:
+ port_id, ip4 = self._get_server_port_id_and_ip4(thing)
+ else:
+ ip4 = None
+ result = client.create_floatingip(
+ floating_network_id=external_network_id,
+ port_id=port_id,
+ tenant_id=thing['tenant_id'],
+ fixed_ip_address=ip4
+ )
+ floating_ip = result['floatingip']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_floatingip,
+ floating_ip['id'])
+ return floating_ip
+
+ def _associate_floating_ip(self, floating_ip, server):
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def _disassociate_floating_ip(self, floating_ip):
+ """:param floating_ip: floating_ips_client.create_floatingip"""
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
+ def check_floating_ip_status(self, floating_ip, status):
+ """Verifies floatingip reaches the given status
+
+ :param dict floating_ip: floating IP dict to check status
+ :param status: target status
+ :raises: AssertionError if status doesn't match
+ """
+ floatingip_id = floating_ip['id']
+
+ def refresh():
+ result = (self.floating_ips_client.
+ show_floatingip(floatingip_id)['floatingip'])
+ return status == result['status']
+
+ if not test_utils.call_until_true(refresh,
+ CONF.network.build_timeout,
+ CONF.network.build_interval):
+ floating_ip = self.floating_ips_client.show_floatingip(
+ floatingip_id)['floatingip']
+ self.assertEqual(status, floating_ip['status'],
+ message="FloatingIP: {fp} is at status: {cst}. "
+ "failed to reach status: {st}"
+ .format(fp=floating_ip, cst=floating_ip['status'],
+ st=status))
+ LOG.info("FloatingIP: {fp} is at status: {st}"
+ .format(fp=floating_ip, st=status))
+
+ def _check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
+ if not CONF.network.project_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ LOG.info(msg)
+ return
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ for ip_addresses in server['addresses'].values():
+ for ip_address in ip_addresses:
+ self.check_vm_connectivity(ip_address['addr'],
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception as e:
+ LOG.exception('Tenant network connectivity check failed')
+ self._log_console_output(servers_for_debug)
+ self._log_net_info(e)
+ raise
+
+ def _check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
+ """assert ping server via source ssh connection
+
+ Note: This is an internal method. Use check_remote_connectivity
+ instead.
+
+ :param source: RemoteClient: an ssh connection from which to ping
+ :param dest: and IP to ping against
+ :param should_succeed: boolean should ping succeed or not
+ :param nic: specific network interface to ping from
+ """
+ def ping_remote():
+ try:
+ source.ping_host(dest, nic=nic)
+ except lib_exc.SSHExecCommandFailed:
+ LOG.warning('Failed to ping IP: %s via a ssh connection '
+ 'from: %s.', dest, source.ssh_client.host)
+ return not should_succeed
+ return should_succeed
+
+ return test_utils.call_until_true(ping_remote,
+ CONF.validation.ping_timeout,
+ 1)
+
+ def check_remote_connectivity(self, source, dest, should_succeed=True,
+ nic=None):
+ """assert ping server via source ssh connection
+
+ :param source: RemoteClient: an ssh connection from which to ping
+ :param dest: and IP to ping against
+ :param should_succeed: boolean should ping succeed or not
+ :param nic: specific network interface to ping from
+ """
+ result = self._check_remote_connectivity(source, dest, should_succeed,
+ nic)
+ source_host = source.ssh_client.host
+ if should_succeed:
+ msg = "Timed out waiting for %s to become reachable from %s" \
+ % (dest, source_host)
+ else:
+ msg = "%s is reachable from %s" % (dest, source_host)
+ self.assertTrue(result, msg)
+
+ def _create_security_group(self, security_group_rules_client=None,
+ tenant_id=None,
+ namestart='secgroup-smoke',
+ security_groups_client=None):
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if tenant_id is None:
+ tenant_id = security_groups_client.tenant_id
+ secgroup = self._create_empty_security_group(
+ namestart=namestart, client=security_groups_client,
+ tenant_id=tenant_id)
+
+ # Add rules to the security group
+ rules = self._create_loginable_secgroup_rule(
+ security_group_rules_client=security_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client)
+ for rule in rules:
+ self.assertEqual(tenant_id, rule['tenant_id'])
+ self.assertEqual(secgroup['id'], rule['security_group_id'])
+ return secgroup
+
+ def _create_empty_security_group(self, client=None, tenant_id=None,
+ namestart='secgroup-smoke'):
+ """Create a security group without rules.
+
+ Default rules will be created:
+ - IPv4 egress to any
+ - IPv6 egress to any
+
+ :param tenant_id: secgroup will be created in this tenant
+ :returns: the created security group
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sg_name = data_utils.rand_name(namestart)
+ sg_desc = sg_name + " description"
+ sg_dict = dict(name=sg_name,
+ description=sg_desc)
+ sg_dict['tenant_id'] = tenant_id
+ result = client.create_security_group(**sg_dict)
+
+ secgroup = result['security_group']
+ self.assertEqual(secgroup['name'], sg_name)
+ self.assertEqual(tenant_id, secgroup['tenant_id'])
+ self.assertEqual(secgroup['description'], sg_desc)
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_security_group, secgroup['id'])
+ return secgroup
+
+ def _default_security_group(self, client=None, tenant_id=None):
+ """Get default secgroup for given tenant_id.
+
+ :returns: default secgroup for given tenant
+ """
+ if client is None:
+ client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ sgs = [
+ sg for sg in list(client.list_security_groups().values())[0]
+ if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
+ ]
+ msg = "No default security group for tenant %s." % (tenant_id)
+ self.assertNotEmpty(sgs, msg)
+ return sgs[0]
+
+ def _create_security_group_rule(self, secgroup=None,
+ sec_group_rules_client=None,
+ tenant_id=None,
+ security_groups_client=None, **kwargs):
+ """Create a rule from a dictionary of rule parameters.
+
+ Create a rule in a secgroup. if secgroup not defined will search for
+ default secgroup in tenant_id.
+
+ :param secgroup: the security group.
+ :param tenant_id: if secgroup not passed -- the tenant in which to
+ search for default secgroup
+ :param kwargs: a dictionary containing rule parameters:
+ for example, to allow incoming ssh:
+ rule = {
+ direction: 'ingress'
+ protocol:'tcp',
+ port_range_min: 22,
+ port_range_max: 22
+ }
+ """
+ if sec_group_rules_client is None:
+ sec_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ if not tenant_id:
+ tenant_id = security_groups_client.tenant_id
+ if secgroup is None:
+ secgroup = self._default_security_group(
+ client=security_groups_client, tenant_id=tenant_id)
+
+ ruleset = dict(security_group_id=secgroup['id'],
+ tenant_id=secgroup['tenant_id'])
+ ruleset.update(kwargs)
+
+ sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
+ sg_rule = sg_rule['security_group_rule']
+
+ self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
+ self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
+
+ return sg_rule
+
+ def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
+ """Create loginable security group rule
+
+ This function will create:
+ 1. egress and ingress tcp port 22 allow rule in order to allow ssh
+ access for ipv4.
+ 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
+ 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
+ """
+
+ if security_group_rules_client is None:
+ security_group_rules_client = self.security_group_rules_client
+ if security_groups_client is None:
+ security_groups_client = self.security_groups_client
+ rules = []
+ rulesets = [
+ dict(
+ # ssh
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22,
+ ),
+ dict(
+ # ping
+ protocol='icmp',
+ ),
+ dict(
+ # ipv6-icmp for ping6
+ protocol='icmp',
+ ethertype='IPv6',
+ )
+ ]
+ sec_group_rules_client = security_group_rules_client
+ for ruleset in rulesets:
+ for r_direction in ['ingress', 'egress']:
+ ruleset['direction'] = r_direction
+ try:
+ sg_rule = self._create_security_group_rule(
+ sec_group_rules_client=sec_group_rules_client,
+ secgroup=secgroup,
+ security_groups_client=security_groups_client,
+ **ruleset)
+ except lib_exc.Conflict as ex:
+ # if rule already exist - skip rule and continue
+ msg = 'Security group rule already exists'
+ if msg not in ex._error_string:
+ raise ex
+ else:
+ self.assertEqual(r_direction, sg_rule['direction'])
+ rules.append(sg_rule)
+
+ return rules
+
+ def _get_router(self, client=None, tenant_id=None):
+ """Retrieve a router for the given tenant id.
+
+ If a public router has been configured, it will be returned.
+
+ If a public router has not been configured, but a public
+ network has, a tenant router will be created and returned that
+ routes traffic to the public network.
+ """
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ router_id = CONF.network.public_router_id
+ network_id = CONF.network.public_network_id
+ if router_id:
+ body = client.show_router(router_id)
+ return body['router']
+ elif network_id:
+ router = self._create_router(client, tenant_id)
+ kwargs = {'external_gateway_info': dict(network_id=network_id)}
+ router = client.update_router(router['id'], **kwargs)['router']
+ return router
+ else:
+ raise Exception("Neither of 'public_router_id' or "
+ "'public_network_id' has been defined.")
+
+ def _create_router(self, client=None, tenant_id=None,
+ namestart='router-smoke'):
+ if not client:
+ client = self.routers_client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ name = data_utils.rand_name(namestart)
+ result = client.create_router(name=name,
+ admin_state_up=True,
+ tenant_id=tenant_id)
+ router = result['router']
+ self.assertEqual(router['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router,
+ router['id'])
+ return router
+
+ def _update_router_admin_state(self, router, admin_state_up):
+ kwargs = dict(admin_state_up=admin_state_up)
+ router = self.routers_client.update_router(
+ router['id'], **kwargs)['router']
+ self.assertEqual(admin_state_up, router['admin_state_up'])
+
+ def create_networks(self, networks_client=None,
+ routers_client=None, subnets_client=None,
+ tenant_id=None, dns_nameservers=None,
+ port_security_enabled=True):
+ """Create a network with a subnet connected to a router.
+
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
+ :param tenant_id: id of tenant to create resources in.
+ :param dns_nameservers: list of dns servers to send to subnet.
+ :returns: network, subnet, router
+ """
+ if CONF.network.shared_physical_network:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ if not CONF.compute.fixed_network_name:
+ m = 'fixed_network_name must be specified in config'
+ raise lib_exc.InvalidConfiguration(m)
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ network = self._create_network(
+ networks_client=networks_client,
+ tenant_id=tenant_id,
+ port_security_enabled=port_security_enabled)
+ router = self._get_router(client=routers_client,
+ tenant_id=tenant_id)
+ subnet_kwargs = dict(network=network,
+ subnets_client=subnets_client,
+ routers_client=routers_client)
+ # use explicit check because empty list is a valid option
+ if dns_nameservers is not None:
+ subnet_kwargs['dns_nameservers'] = dns_nameservers
+ subnet = self._create_subnet(**subnet_kwargs)
+ if not routers_client:
+ routers_client = self.routers_client
+ router_id = router['id']
+ routers_client.add_router_interface(router_id,
+ subnet_id=subnet['id'])
+
+ # save a cleanup job to remove this association between
+ # router and subnet
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ routers_client.remove_router_interface, router_id,
+ subnet_id=subnet['id'])
+ return network, subnet, router
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py b/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py
new file mode 100644
index 0000000..995868a
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py
@@ -0,0 +1,1251 @@
+# Copyright 2016 Futurewei. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from oslo_log import log
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.sfc.tests.scenario import base
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+class TestSfc(base.SfcScenarioTest):
+ @classmethod
+ def skip_checks(cls):
+ super(TestSfc, cls).skip_checks()
+ if not (CONF.network.project_networks_reachable or
+ CONF.network.public_network_id):
+ msg = ('Either project_networks_reachable must be "true", or '
+ 'public_network_id must be defined.')
+ raise cls.skipException(msg)
+ required_exts = ['sfc', 'flow_classifier']
+ for ext in required_exts:
+ if not utils.is_extension_enabled(ext, 'network'):
+ msg = "%s Extension not enabled." % ext
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setup_credentials(cls):
+ # Create no network resources for these tests.
+ cls.set_network_resources()
+ super(TestSfc, cls).setup_credentials()
+
+ def setUp(self):
+ super(TestSfc, self).setUp()
+
+ self.multi_node = CONF.compute.min_compute_nodes > 1
+ if self.multi_node:
+ LOG.info("Running test on multi node")
+ else:
+ LOG.info("Running test on single node")
+ # Save servers UUIDs
+ self.servers = []
+
+ self.ssh_user = CONF.validation.image_ssh_user
+ self.keypair = self.create_keypair()
+ self.net1, self.subnet1, self.router1 = self.create_networks(
+ port_security_enabled=False)
+ self.router2 = self._create_router()
+ self.router3 = self._create_router()
+ self.router2_net1 = self._create_port(self.net1['id'])
+ self._add_router_interface(
+ self.router2['id'], self.router2_net1['id'])
+ self.router3_net1 = self._create_port(self.net1['id'])
+ self._add_router_interface(
+ self.router3['id'], self.router3_net1['id'])
+ self.router2_net1_fixed_ip = self.router2_net1[
+ 'fixed_ips'][0]['ip_address']
+ self.router3_net1_fixed_ip = self.router3_net1[
+ 'fixed_ips'][0]['ip_address']
+
+ def _setup_server(self, network):
+ server = self._create_server(network=self.net1)
+ floating_ip = self._create_floating_ip(
+ server)
+ port_id, fixed_ip = (
+ self._get_server_port_id_and_ip4(server))
+ return floating_ip, port_id, fixed_ip
+
+ def _create_floating_ip(self, server, client=None):
+ floating_ip = self.create_floating_ip(
+ server, client=client)
+ self.check_floating_ip_status(floating_ip, 'ACTIVE')
+ floating_ip_addr = floating_ip['floating_ip_address']
+ self.check_public_network_connectivity(
+ floating_ip_addr, self.ssh_user,
+ self.keypair['private_key'], True,
+ servers=[server])
+ return floating_ip_addr
+
+ def _wait_for_port_chain_status(self, port_chain, status):
+ time.sleep(10)
+
+ def _create_port_chain_helper(self, symmetric):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ if symmetric:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ logical_destination_port=server2_port_id,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ else:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc['id']],
+ chain_parameters={'symmetric': symmetric}
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.delete_port_chain(port_chain['id'])
+ self._wait_for_port_chain_status(port_chain, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21a8')
+ @utils.services('compute', 'network')
+ def test_create_port_chain(self):
+ self._create_port_chain_helper(False)
+
+ @decorators.idempotent_id('35927961-1904-4a6b-9d08-ad819f1cf812')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_symmetric(self):
+ self._create_port_chain_helper(True)
+
+ def _create_port_chain_multi_fc_helper(self, symmetric):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ if symmetric:
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ logical_destination_port=server2_port_id,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ logical_destination_port=server1_port_id,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ else:
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc1['id'], fc2['id']],
+ chain_parameters={'symmetric': symmetric}
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.delete_port_chain(port_chain['id'])
+ self._wait_for_port_chain_status(port_chain, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21a9')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_flow_classifiers(self):
+ self._create_port_chain_multi_fc_helper(False)
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21b1')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_flow_classifiers_symmetric(self):
+ self._create_port_chain_multi_fc_helper(True)
+
+ def _create_port_chain_multi_port_pairs_helper(self, symmetric):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ if symmetric:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ logical_destination_port=server2_port_id,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ else:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair1['id'], port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc['id']],
+ chain_parameters={'symmetric': symmetric}
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip, self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.delete_port_chain(port_chain['id'])
+ self._wait_for_port_chain_status(port_chain, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21aa')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_port_pairs(self):
+ self._create_port_chain_multi_port_pairs_helper(False)
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f869be1e21ad')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_port_pairs_symmetric(self):
+ self._create_port_chain_multi_port_pairs_helper(True)
+
+ def _create_port_chain_multi_ppg_helper(self, symmetric):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ if symmetric:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix="%s/32" % server1_fixed_ip,
+ logical_destination_port=server2_port_id,
+ destination_ip_prefix="%s/32" % server2_fixed_ip
+ )
+ else:
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix="%s/32" % server1_fixed_ip,
+ destination_ip_prefix="%s/32" % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group1['id'], port_pair_group2['id']],
+ flow_classifiers=[fc['id']],
+ chain_parameters={'symmetric': symmetric}
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.delete_port_chain(port_chain['id'])
+ self._wait_for_port_chain_status(port_chain, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21ab')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_port_pair_groups(self):
+ self._create_port_chain_multi_ppg_helper(False)
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21b0')
+ @utils.services('compute', 'network')
+ def test_create_port_chain_multi_port_pair_groups_symmetric(self):
+ self._create_port_chain_multi_ppg_helper(True)
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e22ab')
+ @utils.services('compute', 'network')
+ def test_create_multi_port_chain(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain1 = self._create_port_chain(
+ port_pair_groups=[port_pair_group1['id'], port_pair_group2['id']],
+ flow_classifiers=[fc1['id']]
+ )
+ port_chain2 = self._create_port_chain(
+ port_pair_groups=[port_pair_group2['id'], port_pair_group1['id']],
+ flow_classifiers=[fc2['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router3_net1_fixed_ip], [self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.delete_port_chain(port_chain1['id'])
+ self.portchain_client.delete_port_chain(port_chain2['id'])
+ self._wait_for_port_chain_status(port_chain1, 'DELETED')
+ self._wait_for_port_chain_status(port_chain2, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21ac')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_add_flow_classifiers(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc1['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'], flow_classifiers=[fc1['id'], fc2['id']])
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21ad')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_remove_flow_classifiers(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc1['id'], fc2['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'], flow_classifiers=[fc1['id']])
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21ae')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_replace_flow_classifiers(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group['id']],
+ flow_classifiers=[fc1['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'], flow_classifiers=[fc2['id']])
+ self._wait_for_port_chain_status(port_chain, 'DELETED')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21af')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_add_port_pair_groups(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[port_pair_group1['id']],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'],
+ port_pair_groups=[
+ port_pair_group1['id'], port_pair_group2['id']
+ ]
+ )
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21bf')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_remove_port_pair_groups(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group1['id'], port_pair_group2['id']
+ ],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'],
+ port_pair_groups=[
+ port_pair_group1['id']
+ ]
+ )
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21be')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_replace_port_pair_groups(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group1['id']
+ ],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'],
+ port_pair_groups=[
+ port_pair_group2['id']
+ ]
+ )
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21bc')
+ @utils.services('compute', 'network')
+ def test_update_port_chain_replace_port_pair_groups_flow_classifiers(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ port_chain = self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group1['id']
+ ],
+ flow_classifiers=[fc1['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portchain_client.update_port_chain(
+ port_chain['id'],
+ port_pair_groups=[port_pair_group2['id']],
+ flow_classifiers=[fc2['id']])
+ self._wait_for_port_chain_status(port_chain, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ def _wait_for_port_pair_group_status(self, port_pair_group, status):
+ time.sleep(10)
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21bb')
+ @utils.services('compute', 'network')
+ def test_update_port_pair_group_add_port_pairs(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group['id']
+ ],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portpairgroup_client.update_port_pair_group(
+ port_pair_group['id'],
+ port_pairs=[
+ port_pair1['id'], port_pair2['id']
+ ]
+ )
+ self._wait_for_port_pair_group_status(port_pair_group, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip, self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21ba')
+ @utils.services('compute', 'network')
+ def test_update_port_pair_group_remove_port_pairs(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair1['id'], port_pair2['id']]
+ )
+ self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group['id']
+ ],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip, self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portpairgroup_client.update_port_pair_group(
+ port_pair_group['id'],
+ port_pairs=[port_pair1['id']])
+ self._wait_for_port_pair_group_status(port_pair_group, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('f970f6b3-6541-47ac-a9ea-f769be1e21b9')
+ @utils.services('compute', 'network')
+ def test_update_port_pair_group_replace_port_pairs(self):
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair_group = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group['id']
+ ],
+ flow_classifiers=[fc['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portpairgroup_client.update_port_pair_group(
+ port_pair_group['id'],
+ port_pairs=[port_pair2['id']])
+ self._wait_for_port_pair_group_status(port_pair_group, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ @decorators.idempotent_id('69f8f427-a3a1-4f8f-814f-87e6faea9d54')
+ @utils.services('compute', 'network')
+ def test_multi_port_chains_update_port_pair_group_replace_port_pairs(
+ self
+ ):
+ self.router4 = self._create_router()
+ self.router4_net1 = self._create_port(self.net1['id'])
+ self._add_router_interface(
+ self.router4['id'], self.router4_net1['id'])
+ self.router4_net1_fixed_ip = self.router4_net1[
+ 'fixed_ips'][0]['ip_address']
+ self.router5 = self._create_router()
+ self.router5_net1 = self._create_port(self.net1['id'])
+ self._add_router_interface(
+ self.router5['id'], self.router5_net1['id'])
+ self.router5_net1_fixed_ip = self.router5_net1[
+ 'fixed_ips'][0]['ip_address']
+ (
+ server1_floating_ip, server1_port_id, server1_fixed_ip
+ ) = self._setup_server(self.net1)
+ (
+ server2_floating_ip, server2_port_id, server2_fixed_ip
+ ) = self._setup_server(self.net1)
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ fc1 = self._create_flowclassifier(
+ logical_source_port=server1_port_id,
+ source_ip_prefix='%s/32' % server1_fixed_ip,
+ destination_ip_prefix='%s/32' % server2_fixed_ip
+ )
+ fc2 = self._create_flowclassifier(
+ logical_source_port=server2_port_id,
+ source_ip_prefix='%s/32' % server2_fixed_ip,
+ destination_ip_prefix='%s/32' % server1_fixed_ip
+ )
+ port_pair1 = self._create_port_pair(
+ ingress=self.router2_net1['id'],
+ egress=self.router2_net1['id']
+ )
+ port_pair2 = self._create_port_pair(
+ ingress=self.router3_net1['id'],
+ egress=self.router3_net1['id']
+ )
+ port_pair3 = self._create_port_pair(
+ ingress=self.router4_net1['id'],
+ egress=self.router4_net1['id']
+ )
+ port_pair4 = self._create_port_pair(
+ ingress=self.router5_net1['id'],
+ egress=self.router5_net1['id']
+ )
+ port_pair_group1 = self._create_port_pair_group(
+ port_pairs=[port_pair1['id']]
+ )
+ port_pair_group2 = self._create_port_pair_group(
+ port_pairs=[port_pair2['id']]
+ )
+ self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group1['id'], port_pair_group2['id']
+ ],
+ flow_classifiers=[fc1['id']]
+ )
+ self._create_port_chain(
+ port_pair_groups=[
+ port_pair_group2['id'], port_pair_group1['id']
+ ],
+ flow_classifiers=[fc2['id']]
+ )
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router2_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router3_net1_fixed_ip], [self.router2_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portpairgroup_client.update_port_pair_group(
+ port_pair_group1['id'],
+ port_pairs=[port_pair3['id']])
+ self._wait_for_port_pair_group_status(port_pair_group1, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router4_net1_fixed_ip], [self.router3_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router3_net1_fixed_ip], [self.router4_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self.portpairgroup_client.update_port_pair_group(
+ port_pair_group2['id'],
+ port_pairs=[port_pair4['id']])
+ self._wait_for_port_pair_group_status(port_pair_group1, 'ACTIVE')
+ self._check_connectivity(
+ server1_floating_ip, server2_fixed_ip,
+ [[self.router4_net1_fixed_ip], [self.router5_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+ self._check_connectivity(
+ server2_floating_ip, server1_fixed_ip,
+ [[self.router5_net1_fixed_ip], [self.router4_net1_fixed_ip]],
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ def _create_server(self, network):
+ kwargs = {}
+ if self.multi_node:
+ kwargs["scheduler_hints"] = {'different_host': self.servers}
+
+ inst = self.create_server(
+ networks=[{'uuid': network['id']}],
+ key_name=self.keypair['name'],
+ wait_until='ACTIVE',
+ **kwargs)
+
+ adm_get_server = self.os_admin.servers_client.show_server
+ server = adm_get_server(inst['id'])['server']
+
+ self._check_tenant_network_connectivity(
+ server, self.ssh_user, self.keypair['private_key'])
+
+ # Check server is on different node
+ if self.multi_node:
+ new_host = server["OS-EXT-SRV-ATTR:host"]
+ host_list = [adm_get_server(s)["server"]["OS-EXT-SRV-ATTR:host"]
+ for s in self.servers]
+ self.assertNotIn(new_host, host_list,
+ message="Failed to create servers on different "
+ "Compute nodes.")
+
+ self.servers.append(server["id"])
+
+ return server
+
+ def _add_router_interface(self, router_id, port_id):
+ interface = self.routers_client.add_router_interface(
+ router_id, port_id=port_id)
+ self.addCleanup(self._remove_router_interface, router_id, port_id)
+ return interface
+
+ def _remove_router_interface(self, router_id, port_id):
+ self.routers_client.remove_router_interface(
+ router_id, port_id=port_id)
+
+ def _create_flowclassifier(
+ self, flowclassifier_client=None,
+ **kwargs
+ ):
+ if not flowclassifier_client:
+ flowclassifier_client = self.flowclassifier_client
+ result = flowclassifier_client.create_flowclassifier(**kwargs)
+ fc = result['flow_classifier']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ flowclassifier_client.delete_flowclassifier, fc['id'])
+ return fc
+
+ def _create_port_pair(self, portpair_client=None, **kwargs):
+ if not portpair_client:
+ portpair_client = self.portpair_client
+ result = portpair_client.create_port_pair(**kwargs)
+ pp = result['port_pair']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ portpair_client.delete_port_pair, pp['id'])
+ return pp
+
+ def _create_port_pair_group(self, portpairgroup_client=None, **kwargs):
+ if not portpairgroup_client:
+ portpairgroup_client = self.portpairgroup_client
+ result = portpairgroup_client.create_port_pair_group(**kwargs)
+ pg = result['port_pair_group']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ portpairgroup_client.delete_port_pair_group, pg['id'])
+ return pg
+
+ def _create_port_chain(self, portchain_client=None, **kwargs):
+ if not portchain_client:
+ portchain_client = self.portchain_client
+ result = portchain_client.create_port_chain(**kwargs)
+ pc = result['port_chain']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ portchain_client.delete_port_chain, pc['id'])
+ self._wait_for_port_chain_status(pc, 'ACTIVE')
+ return pc
diff --git a/neutron_tempest_plugin/sfc/tests/sfc_client.py b/neutron_tempest_plugin/sfc/tests/sfc_client.py
new file mode 100644
index 0000000..a44efaa
--- /dev/null
+++ b/neutron_tempest_plugin/sfc/tests/sfc_client.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Futurewei. All rights reserved.
+# Copyright 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+
+from neutron_tempest_plugin.sfc.services import sfc_client
+
+CONF = config.CONF
+
+
+class SfcClientMixin(object):
+
+ @classmethod
+ def resource_setup(cls):
+ super(SfcClientMixin, cls).resource_setup()
+ manager = cls.os_admin
+ cls.portchain_client = (
+ sfc_client.PortChainClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params
+ )
+ )
+ cls.portpairgroup_client = (
+ sfc_client.PortPairGroupClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params
+ )
+ )
+ cls.portpair_client = (
+ sfc_client.PortPairClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params
+ )
+ )
+ cls.sfcgraph_client = (
+ sfc_client.ServiceGraphClient(
+ manager.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **manager.default_params
+ )
+ )
+
+ @classmethod
+ def create_port_chain(cls, **kwargs):
+ body = cls.portchain_client.create_port_chain(
+ **kwargs)
+ pc = body['port_chain']
+ return pc
+
+ @classmethod
+ def create_port_pair_group(cls, **kwargs):
+ body = cls.portpairgroup_client.create_port_pair_group(
+ **kwargs)
+ pg = body['port_pair_group']
+ return pg
+
+ @classmethod
+ def create_port_pair(cls, **kwargs):
+ body = cls.portpair_client.create_port_pair(
+ **kwargs)
+ pp = body['port_pair']
+ return pp
+
+ @classmethod
+ def create_service_graph(cls, **kwargs):
+ body = cls.sfcgraph_client.create_service_graph(
+ **kwargs)
+ pc = body['service_graph']
+ return pc