Merge "Test updating FIP with a different port"
diff --git a/.zuul.yaml b/.zuul.yaml
index 7839417..17c9e95 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -63,6 +63,7 @@
         - rbac-policies
         - rbac-security-groups
         - router
+        - router-admin-state-down-before-update
         - router_availability_zone
         - security-group
         - segment
@@ -74,6 +75,7 @@
         - standard-attr-tag
         - standard-attr-timestamp
         - subnet_allocation
+        - subnetpool-prefix-ops
         - trunk
         - trunk-details
         - uplink-status-propagation
diff --git a/doc/source/conf.py b/doc/source/conf.py
index c3cdb16..3a4cff9 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -23,7 +23,6 @@
 extensions = [
     'sphinx.ext.autodoc',
     'openstackdocstheme',
-    #'sphinx.ext.intersphinx',
 ]
 
 # autodoc generation is a bit aggressive and a nuisance when doing heavy
@@ -37,7 +36,6 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'openstack'
 copyright = u'2017, OpenStack Developers'
 
 # openstackdocstheme options
@@ -65,15 +63,15 @@
 html_theme = 'openstackdocs'
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = '%sdoc' % project
+htmlhelp_basename = 'openstackdoc'
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass
 # [howto/manual]).
 latex_documents = [
     ('index',
-     '%s.tex' % project,
-     u'%s Documentation' % project,
+     'openstack.tex',
+     u'openstack Documentation',
      u'OpenStack Developers', 'manual'),
 ]
 
diff --git a/neutron_tempest_plugin/api/test_routers.py b/neutron_tempest_plugin/api/test_routers.py
index 5c98c8b..ab6b0f6 100644
--- a/neutron_tempest_plugin/api/test_routers.py
+++ b/neutron_tempest_plugin/api/test_routers.py
@@ -280,6 +280,62 @@
         self.assertNotIn('ha', show_body['router'])
 
 
+class DvrRoutersTestUpdateDistributedExtended(base_routers.BaseRouterTest):
+
+    required_extensions = ['dvr', 'l3-ha',
+                           'router-admin-state-down-before-update']
+
+    @decorators.idempotent_id('0ffb9973-0c1a-4b76-a1f2-060178057661')
+    def test_convert_centralized_router_to_distributed_extended(self):
+        router_args = {'tenant_id': self.client.tenant_id,
+                       'distributed': False, 'ha': False}
+        router = self.admin_client.create_router(
+            data_utils.rand_name('router'), admin_state_up=True,
+            **router_args)['router']
+        self.addCleanup(self.admin_client.delete_router,
+                        router['id'])
+        self.assertTrue(router['admin_state_up'])
+        self.assertFalse(router['distributed'])
+        # take router down to allow setting the router to distributed
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=False)
+        self.assertFalse(update_body['router']['admin_state_up'])
+        # set the router to distributed
+        update_body = self.admin_client.update_router(router['id'],
+                                                      distributed=True)
+        self.assertTrue(update_body['router']['distributed'])
+        # bring the router back up
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=True)
+        self.assertTrue(update_body['router']['admin_state_up'])
+        self.assertTrue(update_body['router']['distributed'])
+
+    @decorators.idempotent_id('e9a8f55b-c535-44b7-8b0a-20af6a7c2921')
+    def test_convert_distributed_router_to_centralized_extended(self):
+        router_args = {'tenant_id': self.client.tenant_id,
+                       'distributed': True, 'ha': False}
+        router = self.admin_client.create_router(
+            data_utils.rand_name('router'), admin_state_up=True,
+            **router_args)['router']
+        self.addCleanup(self.admin_client.delete_router,
+                        router['id'])
+        self.assertTrue(router['admin_state_up'])
+        self.assertTrue(router['distributed'])
+        # take router down to allow setting the router to centralized
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=False)
+        self.assertFalse(update_body['router']['admin_state_up'])
+        # set router to centralized
+        update_body = self.admin_client.update_router(router['id'],
+                                                      distributed=False)
+        self.assertFalse(update_body['router']['distributed'])
+        # bring router back up
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=True)
+        self.assertTrue(update_body['router']['admin_state_up'])
+        self.assertFalse(update_body['router']['distributed'])
+
+
 class HaRoutersTest(base_routers.BaseRouterTest):
 
     required_extensions = ['l3-ha']
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index bbd6c5d..f085fc9 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -80,6 +80,56 @@
                 data_utils.rand_name('router'), distributed=True)
 
 
+class DvrRoutersNegativeTestExtended(RoutersNegativeTestBase):
+
+    required_extensions = ['dvr', 'router-admin-state-down-before-update']
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('5379fe06-e45e-4a4f-8b4a-9e28a924b451')
+    def test_router_update_distributed_returns_exception(self):
+        # create a centralized router
+        router_args = {'tenant_id': self.client.tenant_id,
+                       'distributed': False}
+        router = self.admin_client.create_router(
+            data_utils.rand_name('router'), admin_state_up=True,
+            **router_args)['router']
+        self.assertTrue(router['admin_state_up'])
+        self.assertFalse(router['distributed'])
+        # attempt to set the router to distributed, catch BadRequest exception
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_client.update_router,
+                          router['id'],
+                          distributed=True)
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('c277e945-3b39-442d-b149-e2e8cc6a2b40')
+    def test_router_update_centralized_returns_exception(self):
+        # create a centralized router
+        router_args = {'tenant_id': self.client.tenant_id,
+                       'distributed': False}
+        router = self.admin_client.create_router(
+            data_utils.rand_name('router'), admin_state_up=True,
+            **router_args)['router']
+        self.assertTrue(router['admin_state_up'])
+        self.assertFalse(router['distributed'])
+        # take the router down to modify distributed->True
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=False)
+        self.assertFalse(update_body['router']['admin_state_up'])
+        update_body = self.admin_client.update_router(router['id'],
+                                                      distributed=True)
+        self.assertTrue(update_body['router']['distributed'])
+        # set admin_state_up=True
+        update_body = self.admin_client.update_router(router['id'],
+                                                      admin_state_up=True)
+        self.assertTrue(update_body['router']['admin_state_up'])
+        # attempt to set the router to centralized, catch BadRequest exception
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_client.update_router,
+                          router['id'],
+                          distributed=False)
+
+
 class HaRoutersNegativeTest(RoutersNegativeTestBase):
 
     required_extensions = ['l3-ha']
diff --git a/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py b/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py
new file mode 100644
index 0000000..49cce5b
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_subnetpool_prefix_ops.py
@@ -0,0 +1,97 @@
+# Copyright 2019 SUSE LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import test_subnetpools
+
+SUBNETPOOL_NAME = 'smoke-subnetpool'
+SUBNET_NAME = 'smoke-subnet'
+
+
+class SubnetPoolPrefixOpsTestMixin(object):
+
+    def _compare_prefix_lists(self, list_expected, list_observed):
+        expected_set = netaddr.IPSet(iterable=list_expected)
+        observed_set = netaddr.IPSet(iterable=list_observed)
+
+        # compact the IPSet's
+        expected_set.compact()
+        observed_set.compact()
+
+        self.assertEqual(expected_set, observed_set)
+
+    @decorators.idempotent_id('b1d56d1f-2818-44ee-b6a3-3c1327c25318')
+    @utils.requires_ext(extension='subnetpool-prefix-ops', service='network')
+    def test_add_remove_prefix(self):
+        created_subnetpool = self._create_subnetpool()
+        req_body = {'prefixes': self.prefixes_to_add}
+
+        # Add a prefix to the subnet pool
+        resp = self.client.add_subnetpool_prefix(created_subnetpool['id'],
+                                                 **req_body)
+        self._compare_prefix_lists(self.prefixes + self.prefixes_to_add,
+                                   resp['prefixes'])
+
+        # Remove the prefix from the subnet pool
+        resp = self.client.remove_subnetpool_prefix(created_subnetpool['id'],
+                                                    **req_body)
+        self._compare_prefix_lists(self.prefixes, resp['prefixes'])
+
+    @decorators.idempotent_id('a36c18fc-10b5-4ebc-ab79-914f826c5bf5')
+    @utils.requires_ext(extension='subnetpool-prefix-ops', service='network')
+    def test_add_overlapping_prefix(self):
+        created_subnetpool = self._create_subnetpool()
+        req_body = {'prefixes': self.overlapping_prefixes}
+
+        # Add an overlapping prefix to the subnet pool
+        resp = self.client.add_subnetpool_prefix(created_subnetpool['id'],
+                                                 **req_body)
+        self._compare_prefix_lists(self.prefixes + self.overlapping_prefixes,
+                                   resp['prefixes'])
+
+
+class SubnetPoolPrefixOpsIpv4Test(test_subnetpools.SubnetPoolsTestBase,
+                                  SubnetPoolPrefixOpsTestMixin):
+
+    prefixes = ['192.168.1.0/24', '10.10.10.0/24']
+    prefixes_to_add = ['192.168.2.0/24']
+    overlapping_prefixes = ['10.10.0.0/16']
+    min_prefixlen = 16
+    ip_version = 4
+
+    @classmethod
+    def resource_setup(cls):
+        super(SubnetPoolPrefixOpsIpv4Test, cls).resource_setup()
+        cls._subnetpool_data = {'prefixes': cls.prefixes,
+                                'min_prefixlen': cls.min_prefixlen}
+
+
+class SubnetPoolPrefixOpsIpv6Test(test_subnetpools.SubnetPoolsTestBase,
+                                  SubnetPoolPrefixOpsTestMixin):
+
+    prefixes = ['2001:db8:1234::/48', '2001:db8:1235::/48']
+    prefixes_to_add = ['2001:db8:4321::/48']
+    overlapping_prefixes = ['2001:db8:1234:1111::/64']
+    min_prefixlen = 48
+    ip_version = 6
+
+    @classmethod
+    def resource_setup(cls):
+        super(SubnetPoolPrefixOpsIpv6Test, cls).resource_setup()
+        cls._subnetpool_data = {'prefixes': cls.prefixes,
+                                'min_prefixlen': cls.min_prefixlen}
diff --git a/neutron_tempest_plugin/bgpvpn/base.py b/neutron_tempest_plugin/bgpvpn/base.py
index aeecbfc..b436a5d 100644
--- a/neutron_tempest_plugin/bgpvpn/base.py
+++ b/neutron_tempest_plugin/bgpvpn/base.py
@@ -72,8 +72,13 @@
     @classmethod
     def skip_checks(cls):
         super(BaseBgpvpnTest, cls).skip_checks()
+        msg = None
         if not utils.is_extension_enabled('bgpvpn', 'network'):
             msg = "Bgpvpn extension not enabled."
+        elif not CONF.bgpvpn.run_bgpvpn_tests:
+            msg = ("Running of bgpvpn related tests is disabled in "
+                   "plugin configuration.")
+        if msg:
             raise cls.skipException(msg)
 
     def create_bgpvpn(self, client, **kwargs):
diff --git a/neutron_tempest_plugin/common/socat.py b/neutron_tempest_plugin/common/socat.py
deleted file mode 100644
index 6bd1fdc..0000000
--- a/neutron_tempest_plugin/common/socat.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2018 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-COMMAND = 'socat'
-
-
-class SocatAddress(object):
-
-    def __init__(self, address, args=None, options=None):
-        self.address = address
-        self.args = args
-        self.options = options
-
-    @classmethod
-    def udp_datagram(cls, host, port, options=None, ip_version=None):
-        address = 'UDP{}-DATAGRAM'.format(ip_version or '')
-        return cls(address, (host, int(port)), options)
-
-    @classmethod
-    def udp_recvfrom(cls, port, options=None, ip_version=None):
-        address = 'UDP{}-RECVFROM'.format(ip_version or '')
-        return cls(address, (int(port),), options)
-
-    @classmethod
-    def stdio(cls):
-        return cls('STDIO')
-
-    def __str__(self):
-        address = self.address
-        if self.args:
-            address += ':' + ':'.join(str(a) for a in self.args)
-        if self.options:
-            address += ',' + ','.join(str(o) for o in self.options)
-        return address
-
-    def format(self, *args, **kwargs):
-        return str(self).format(*args, **kwargs)
-
-
-STDIO = SocatAddress.stdio()
-
-
-class SocatOption(object):
-
-    def __init__(self, name, *args):
-        self.name = name
-        self.args = args
-
-    @classmethod
-    def bind(cls, host):
-        return cls('bind', host)
-
-    @classmethod
-    def fork(cls):
-        return cls('fork')
-
-    @classmethod
-    def ip_multicast_ttl(cls, ttl):
-        return cls('ip-multicast-ttl', int(ttl))
-
-    @classmethod
-    def ip_multicast_if(cls, interface_address):
-        return cls('ip-multicast-if', interface_address)
-
-    @classmethod
-    def ip_add_membership(cls, multicast_address, interface_address):
-        return cls('ip-add-membership', multicast_address, interface_address)
-
-    def __str__(self):
-        result = self.name
-        args = self.args
-        if args:
-            result += '=' + ':'.join(str(a) for a in args)
-        return result
-
-
-class SocatCommand(object):
-
-    def __init__(self, source=STDIO, destination=STDIO, command=COMMAND):
-        self.source = source
-        self.destination = destination
-        self.command = command
-
-    def __str__(self):
-        words = [self.command, self.source, self.destination]
-        return ' '.join(str(obj) for obj in words)
-
-
-def socat_command(source=STDIO, destination=STDIO, command=COMMAND):
-    command = SocatCommand(source=source, destination=destination,
-                           command=command)
-    return str(command)
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index e07c92a..54dc16e 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -65,6 +65,12 @@
                choices=['None', 'linuxbridge', 'ovs', 'sriov'],
                help='Agent used for devstack@q-agt.service'),
 
+    # Multicast tests settings
+    cfg.StrOpt('multicast_group_range',
+               default='224.0.0.120-224.0.0.250',
+               help='Unallocated multi-cast IPv4 range, which will be used to '
+                    'test the multi-cast support.'),
+
     # Option for feature to connect via SSH to VMs using an intermediate SSH
     # server
     cfg.StrOpt('ssh_proxy_jump_host',
@@ -116,7 +122,16 @@
 for opt in NeutronPluginOptions:
     CONF.register_opt(opt, 'neutron_plugin_options')
 
+# TODO(slaweq): This config option is added to avoid running bgpvpn tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. Bgpvpn tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards bgpvpn tests will run from
+# neutron-tempest-plugins.
 BgpvpnGroup = [
+    cfg.BoolOpt('run_bgpvpn_tests',
+                default=True,
+                help=("If it is set to False bgpvpn api and scenario tests "
+                      "will be skipped")),
     cfg.IntOpt('min_asn',
                default=100,
                help=("Minimum number for the range of "
@@ -140,6 +155,39 @@
 CONF.register_group(bgpvpn_group)
 CONF.register_opts(BgpvpnGroup, group="bgpvpn")
 
+# TODO(slaweq): This config option is added to avoid running fwaas tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. Fwaas tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards fwaas tests will run from
+# neutron-tempest-plugins.
+FwaasGroup = [
+    cfg.BoolOpt('run_fwaas_tests',
+                default=True,
+                help=("If it is set to False fwaas api and scenario tests "
+                      "will be skipped")),
+]
+
+fwaas_group = cfg.OptGroup(
+    name="fwaas", title=("Neutron-fwaas Service Options"))
+CONF.register_group(fwaas_group)
+CONF.register_opts(FwaasGroup, group="fwaas")
+
+# TODO(slaweq): This config option is added to avoid running SFC tests twice
+# on stable branches till stable/stein. We need to remove this config option
+# once stable/stein is EOL. SFC tempest plugin has been merged into
+# neutron-tempest-plugin from Train. Train onwards SFC tests will run from
+# neutron-tempest-plugins.
+SfcGroup = [
+    cfg.BoolOpt('run_sfc_tests',
+                default=True,
+                help=("If it is set to False SFC api and scenario tests "
+                      "will be skipped")),
+]
+
+sfc_group = cfg.OptGroup(name="sfc", title=("Networking-sfc Service Options"))
+CONF.register_group(sfc_group)
+CONF.register_opts(SfcGroup, group="sfc")
+
 config_opts_translator = {
     'project_network_cidr': 'tenant_network_cidr',
     'project_network_v6_cidr': 'tenant_network_v6_cidr',
diff --git a/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py b/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py
index 7a39978..f4f63ec 100644
--- a/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py
+++ b/neutron_tempest_plugin/fwaas/api/fwaas_v2_base.py
@@ -13,9 +13,21 @@
 #    under the License.
 
 from tempest.api.network import base
+from tempest import config
 
 from neutron_tempest_plugin.fwaas.common import fwaas_v2_client
 
+CONF = config.CONF
+
 
 class BaseFWaaSTest(fwaas_v2_client.FWaaSClientMixin, base.BaseNetworkTest):
-    pass
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseFWaaSTest, cls).skip_checks()
+        msg = None
+        if not CONF.fwaas.run_fwaas_tests:
+            msg = ("Running of fwaas related tests is disabled in "
+                   "plugin configuration.")
+        if msg:
+            raise cls.skipException(msg)
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
index 01ca8c5..5ead2a7 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -41,6 +41,16 @@
     credentials = ['primary']
 
     @classmethod
+    def skip_checks(cls):
+        super(ScenarioTest, cls).skip_checks()
+        msg = None
+        if not CONF.fwaas.run_fwaas_tests:
+            msg = ("Running of fwaas related tests is disabled in "
+                   "plugin configuration.")
+        if msg:
+            raise cls.skipException(msg)
+
+    @classmethod
     def setup_clients(cls):
         super(ScenarioTest, cls).setup_clients()
         # Clients (in alphabetical order)
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
new file mode 100644
index 0000000..cfaa73f
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -0,0 +1,297 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from neutron_lib import constants
+from oslo_log import log
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+def get_receiver_script(group, port, hello_message, ack_message, result_file):
+
+    return """
+import socket
+import struct
+import sys
+
+multicast_group = '%(group)s'
+server_address = ('', %(port)s)
+
+# Create the socket
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+
+# Bind to the server address
+sock.bind(server_address)
+
+# Tell the operating system to add the socket to the multicast group
+# on all interfaces.
+group = socket.inet_aton(multicast_group)
+mreq = struct.pack('4sL', group, socket.INADDR_ANY)
+sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+
+# Receive/respond loop
+with open('%(result_file)s', 'w') as f:
+    f.write('%(hello_message)s')
+    f.flush()
+    data, address = sock.recvfrom(1024)
+    f.write('received ' + str(len(data)) + ' bytes from ' + str(address))
+    f.write(str(data))
+sock.sendto(b'%(ack_message)s', address)
+    """ % {'group': group,
+           'port': port,
+           'hello_message': hello_message,
+           'ack_message': ack_message,
+           'result_file': result_file}
+
+
+def get_sender_script(group, port, message, result_file):
+
+    return """
+import socket
+import sys
+
+message = b'%(message)s'
+multicast_group = ('%(group)s', %(port)s)
+
+# Create the datagram socket
+sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+# Set the time-to-live for messages to 1 so they do not go past the
+# local network segment.
+sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)
+
+# Set a timeout so the socket does not block indefinitely when trying
+# to receive data.
+sock.settimeout(1)
+
+with open('%(result_file)s', 'w') as f:
+    try:
+        # Send data to the multicast group
+        sent = sock.sendto(message, multicast_group)
+
+        # Look for responses from all recipients
+        while True:
+            try:
+                data, server = sock.recvfrom(1024)
+            except socket.timeout:
+                f.write('timed out, no more responses')
+                break
+            else:
+                f.write('received reply ' + str(data) + ' from ' + str(server))
+    finally:
+        sys.stdout.write('closing socket')
+        sock.close()
+    """ % {'group': group,
+           'port': port,
+           'message': message,
+           'result_file': result_file}
+
+
+class BaseMulticastTest(object):
+
+    credentials = ['primary']
+    force_tenant_isolation = False
+
+    # Import configuration options
+    available_type_drivers = (
+        CONF.neutron_plugin_options.available_type_drivers)
+
+    hello_message = "I am waiting..."
+    multicast_port = 5007
+    multicast_message = "Big Bang"
+    receiver_output_file = "/tmp/receiver_mcast_out"
+    sender_output_file = "/tmp/sender_mcast_out"
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseMulticastTest, cls).skip_checks()
+        advanced_image_available = (
+            CONF.neutron_plugin_options.advanced_image_ref or
+            CONF.neutron_plugin_options.default_image_is_advanced)
+        if not advanced_image_available:
+            skip_reason = "This test require advanced tools for this test"
+            raise cls.skipException(skip_reason)
+
+    @classmethod
+    def resource_setup(cls):
+        super(BaseMulticastTest, cls).resource_setup()
+
+        if CONF.neutron_plugin_options.default_image_is_advanced:
+            cls.flavor_ref = CONF.compute.flavor_ref
+            cls.image_ref = CONF.compute.image_ref
+            cls.username = CONF.validation.image_ssh_user
+        else:
+            cls.flavor_ref = (
+                CONF.neutron_plugin_options.advanced_image_flavor_ref)
+            cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
+            cls.username = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+        # setup basic topology for servers we can log into it
+        cls.network = cls.create_network()
+        cls.subnet = cls.create_subnet(cls.network)
+        cls.router = cls.create_router_by_client()
+        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+        cls.keypair = cls.create_keypair()
+
+        cls.secgroup = cls.os_primary.network_client.create_security_group(
+            name='secgroup_mtu')
+        cls.security_groups.append(cls.secgroup['security_group'])
+        cls.create_loginable_secgroup_rule(
+            secgroup_id=cls.secgroup['security_group']['id'])
+        cls.create_pingable_secgroup_rule(
+            secgroup_id=cls.secgroup['security_group']['id'])
+        # Create security group rule for UDP (multicast traffic)
+        cls.create_secgroup_rules(
+            rule_list=[dict(protocol=constants.PROTO_NAME_UDP,
+                            direction=constants.INGRESS_DIRECTION,
+                            remote_ip_prefix=cls.any_addresses,
+                            ethertype=cls.ethertype)],
+            secgroup_id=cls.secgroup['security_group']['id'])
+
+        # Multicast IP range to be used for multicast group IP asignement
+        if '-' in cls.multicast_group_range:
+            multicast_group_range = netaddr.IPRange(
+                *cls.multicast_group_range.split('-'))
+        else:
+            multicast_group_range = netaddr.IPNetwork(
+                cls.multicast_group_range)
+        cls.multicast_group_iter = iter(multicast_group_range)
+
+    def _create_server(self):
+        name = data_utils.rand_name("multicast-server")
+        server = self.create_server(
+            flavor_ref=self.flavor_ref,
+            image_ref=self.image_ref,
+            key_name=self.keypair['name'], name=name,
+            networks=[{'uuid': self.network['id']}],
+            security_groups=[{'name': self.secgroup['security_group']['name']}]
+        )['server']
+        self.wait_for_server_active(server)
+        port = self.client.list_ports(
+            network_id=self.network['id'], device_id=server['id'])['ports'][0]
+        server['fip'] = self.create_floatingip(port=port)
+        return server
+
+    def _prepare_sender(self, server, mcast_address):
+        check_script = get_sender_script(
+            group=mcast_address, port=self.multicast_port,
+            message=self.multicast_message,
+            result_file=self.sender_output_file)
+        ssh_client = ssh.Client(server['fip']['floating_ip_address'],
+                                self.username,
+                                pkey=self.keypair['private_key'])
+
+        ssh_client.execute_script(
+            'echo "%s" > ~/multicast_traffic_sender.py' % check_script)
+        return ssh_client
+
+    def _prepare_receiver(self, server, mcast_address):
+        check_script = get_receiver_script(
+            group=mcast_address, port=self.multicast_port,
+            hello_message=self.hello_message, ack_message=server['id'],
+            result_file=self.receiver_output_file)
+        ssh_client = ssh.Client(
+            server['fip']['floating_ip_address'],
+            self.username,
+            pkey=self.keypair['private_key'])
+        ssh_client.execute_script(
+            'echo "%s" > ~/multicast_traffic_receiver.py' % check_script)
+        return ssh_client
+
+    @decorators.idempotent_id('113486fc-24c9-4be4-8361-03b1c9892867')
+    def test_multicast_between_vms_on_same_network(self):
+        """Test multicast messaging between two servers on the same network
+
+        [Sender server] -> (Multicast network) -> [Receiver server]
+        """
+        sender = self._create_server()
+        receivers = [self._create_server() for _ in range(1)]
+        # Sender can be also receiver of multicast traffic
+        receivers.append(sender)
+        self._check_multicast_conectivity(sender=sender, receivers=receivers)
+
+    def _check_multicast_conectivity(self, sender, receivers):
+        """Test multi-cast messaging between two servers
+
+        [Sender server] -> ... some network topology ... -> [Receiver server]
+        """
+        mcast_address = next(self.multicast_group_iter)
+        LOG.debug("Multicast group address: %s", mcast_address)
+
+        def _message_received(client, msg, file_path):
+            result = client.execute_script(
+                "cat {path} || echo '{path} not exists yet'".format(
+                    path=file_path))
+            return msg in result
+
+        sender_ssh_client = self._prepare_sender(sender, mcast_address)
+        receiver_ssh_clients = []
+        receiver_ids = []
+        for receiver in receivers:
+            receiver_ssh_client = self._prepare_receiver(
+                receiver, mcast_address)
+            receiver_ssh_client.execute_script(
+                "python3 ~/multicast_traffic_receiver.py &", shell="bash")
+            utils.wait_until_true(
+                lambda: _message_received(
+                    receiver_ssh_client, self.hello_message,
+                    self.receiver_output_file),
+                exception=RuntimeError(
+                    "Receiver script didn't start properly on server "
+                    "{!r}.".format(receiver['id'])))
+
+            receiver_ssh_clients.append(receiver_ssh_client)
+            receiver_ids.append(receiver['id'])
+
+        # Now lets run scripts on sender
+        sender_ssh_client.execute_script(
+            "python3 ~/multicast_traffic_sender.py")
+
+        # And check if message was received
+        for receiver_ssh_client in receiver_ssh_clients:
+            utils.wait_until_true(
+                lambda: _message_received(
+                    receiver_ssh_client, self.multicast_message,
+                    self.receiver_output_file),
+                exception=RuntimeError(
+                    "Receiver {!r} didn't get multicast message".format(
+                        receiver['id'])))
+
+        # TODO(slaweq): add validation of answears on sended server
+        replies_result = sender_ssh_client.execute_script(
+            "cat {path} || echo '{path} not exists yet'".format(
+                path=self.sender_output_file))
+        for receiver_id in receiver_ids:
+            self.assertIn(receiver_id, replies_result)
+
+
+class MulticastTestIPv4(BaseMulticastTest, base.BaseTempestTestCase):
+
+    # Import configuration options
+    multicast_group_range = CONF.neutron_plugin_options.multicast_group_range
+
+    # IP version specific parameters
+    _ip_version = constants.IP_VERSION_4
+    any_addresses = constants.IPv4_ANY
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 422b071..11ba8ef 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -227,6 +227,23 @@
         self.expected_success(200, resp.status)
         return service_client.ResponseBody(resp, body)
 
+    def add_subnetpool_prefix(self, id, **kwargs):
+        return self._subnetpool_prefix_operation(id, 'add_prefixes', kwargs)
+
+    def remove_subnetpool_prefix(self, id, **kwargs):
+        return self._subnetpool_prefix_operation(id,
+                                                 'remove_prefixes',
+                                                 kwargs)
+
+    def _subnetpool_prefix_operation(self, id, operation, op_body):
+        uri = self.get_uri("subnetpools")
+        op_prefix_uri = '%s/%s/%s' % (uri, id, operation)
+        body = jsonutils.dumps(op_body)
+        resp, body = self.put(op_prefix_uri, body)
+        body = jsonutils.loads(body)
+        self.expected_success(200, resp.status)
+        return service_client.ResponseBody(resp, body)
+
     # Common methods that are hard to automate
     def create_bulk_network(self, names, shared=False):
         network_list = [{'name': name, 'shared': shared} for name in names]
diff --git a/neutron_tempest_plugin/sfc/tests/api/base.py b/neutron_tempest_plugin/sfc/tests/api/base.py
index 732e2dc..606aed6 100644
--- a/neutron_tempest_plugin/sfc/tests/api/base.py
+++ b/neutron_tempest_plugin/sfc/tests/api/base.py
@@ -18,17 +18,31 @@
 import netaddr
 from tempest.api.network import base
 from tempest.common import utils
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import exceptions as lib_exc
 
 from neutron_tempest_plugin.sfc.tests import flowclassifier_client
 from neutron_tempest_plugin.sfc.tests import sfc_client
 
+CONF = config.CONF
+
 
 class BaseFlowClassifierTest(
     flowclassifier_client.FlowClassifierClientMixin,
     base.BaseAdminNetworkTest
 ):
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseFlowClassifierTest, cls).skip_checks()
+        msg = None
+        if not CONF.sfc.run_sfc_tests:
+            msg = ("Running of SFC related tests is disabled in "
+                   "plugin configuration.")
+        if msg:
+            raise cls.skipException(msg)
+
     @classmethod
     def resource_setup(cls):
         super(BaseFlowClassifierTest, cls).resource_setup()
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/base.py b/neutron_tempest_plugin/sfc/tests/scenario/base.py
index d4cff18..44b5cd2 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/base.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/base.py
@@ -30,6 +30,17 @@
     sfc_client.SfcClientMixin,
     manager.NetworkScenarioTest
 ):
+
+    @classmethod
+    def skip_checks(cls):
+        super(SfcScenarioTest, cls).skip_checks()
+        msg = None
+        if not CONF.sfc.run_sfc_tests:
+            msg = ("Running of SFC related tests is disabled in "
+                   "plugin configuration.")
+        if msg:
+            raise cls.skipException(msg)
+
     def _check_connectivity(
         self, source_ip, destination_ip, routes=None,
         username=None, private_key=None
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 66d6a9e..770396a 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -52,7 +52,6 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'Neutron Tempest Plugin Release Notes'
 copyright = u'2017, Neutron Tempest Plugin Developers'
 
 # openstackdocstheme options
@@ -60,16 +59,6 @@
 bug_project = 'neutron'
 bug_tag = ''
 
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-# The full version, including alpha/beta/rc tags.
-release = ''
-# The short X.Y version.
-version = ''
-
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
 # language = None
@@ -196,17 +185,6 @@
 
 # -- Options for LaTeX output ---------------------------------------------
 
-latex_elements = {
-    # The paper size ('letterpaper' or 'a4paper').
-    # 'papersize': 'letterpaper',
-
-    # The font size ('10pt', '11pt' or '12pt').
-    # 'pointsize': '10pt',
-
-    # Additional stuff for the LaTeX preamble.
-    # 'preamble': '',
-}
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title,
 #  author, documentclass [howto, manual, or own class]).
diff --git a/test-requirements.txt b/test-requirements.txt
index 20b29f4..8b251f6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -8,10 +8,10 @@
 flake8-import-order==0.12 # LGPLv3
 python-subunit>=1.0.0 # Apache-2.0/BSD
 sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7'  # BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.2;python_version>='3.4'  # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4'  # BSD
 oslotest>=3.2.0 # Apache-2.0
 stestr>=1.0.0 # Apache-2.0
 testtools>=2.2.0 # MIT
-openstackdocstheme>=1.18.1 # Apache-2.0
+openstackdocstheme>=1.20.0 # Apache-2.0
 # releasenotes
 reno>=2.5.0 # Apache-2.0