Merge "QoS - Change the way we measure bw limits"
diff --git a/.zuul.yaml b/.zuul.yaml
index a2c317f..2e99198 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -80,6 +80,7 @@
         - subnet_allocation
         - subnet-dns-publish-fixed-ip
         - subnetpool-prefix-ops
+        - tag-ports-during-bulk-creation
         - trunk
         - trunk-details
         - uplink-status-propagation
@@ -104,6 +105,7 @@
         neutron-network-segment-range: true
         neutron-port-forwarding: true
         neutron-conntrack-helper: true
+        neutron-tag-ports-during-bulk-creation: true
       devstack_local_conf:
         post-config:
           $NEUTRON_CONF:
@@ -251,6 +253,12 @@
     description: |
       This job run on py2 for stable/rocky gate.
     override-checkout: stable/rocky
+    required-projects: &required-projects-rocky
+      - openstack/devstack-gate
+      - openstack/neutron
+      - name: openstack/neutron-tempest-plugin
+        override-checkout: 0.9.0
+      - openstack/tempest
     vars: &api_vars_rocky
       branch_override: stable/rocky
       # TODO(slaweq): find a way to put this list of extensions in
@@ -331,6 +339,7 @@
       This job run on py3 for other than stable/rocky gate
       which is nothing but neutron-tempest-pluign master gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars:
       <<: *api_vars_rocky
       devstack_localrc:
@@ -575,6 +584,7 @@
       This job run on py2 for stable/rocky gate.
     nodeset: openstack-single-node-xenial
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars: &scenario_vars_rocky
       branch_override: stable/rocky
       network_api_extensions: *api_extensions_rocky
@@ -582,6 +592,10 @@
         USE_PYTHON3: false
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+      # NOTE(bcafarel): newer tests, unstable on rocky branch
+      tempest_black_regex: "\
+          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
     branches:
       - stable/rocky
 
@@ -593,6 +607,7 @@
       This job run on py3 for other than stable/rocky gate
       which is nothing but neutron-tempest-pluign master gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars:
       <<: *scenario_vars_rocky
       devstack_localrc:
@@ -659,6 +674,7 @@
     description: |
       This job run on py2 for stable/rocky gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars: &openvswitch_vars_rocky
       branch_override: stable/rocky
       network_api_extensions: *api_extensions_rocky
@@ -666,6 +682,13 @@
         USE_PYTHON3: false
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+      # TODO(bcafarel): remove trunks subport_connectivity test from blacklist
+      # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
+      # NOTE(bcafarel): other are newer tests, unstable on rocky branch
+      tempest_black_regex: "\
+          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
     branches:
       - stable/rocky
 
@@ -677,6 +700,7 @@
       This job run on py3 for other than stable/rocky gate
       which is nothing but neutron-tempest-pluign master gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars:
       <<: *openvswitch_vars_rocky
       devstack_localrc:
@@ -769,6 +793,7 @@
       This job run on py2 for stable/rocky gate.
     nodeset: openstack-single-node-xenial
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars: &linuxbridge_vars_rocky
       branch_override: stable/rocky
       network_api_extensions: *api_extensions_rocky
@@ -784,6 +809,10 @@
           $TEMPEST_CONFIG:
             neutron_plugin_options:
               q_agent: None
+      # NOTE(bcafarel): newer tests, unstable on rocky branch
+      tempest_black_regex: "\
+          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
     branches:
       - stable/rocky
 
@@ -795,6 +824,7 @@
       This job run on py3 for other than stable/rocky gate
       which is nothing but neutron-tempest-pluign master gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars:
       <<: *linuxbridge_vars_rocky
       devstack_localrc:
@@ -985,12 +1015,17 @@
       This job run on py2 for stable/rocky gate.
     nodeset: openstack-two-node-xenial
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars: &multinode_scenario_vars_rocky
       branch_override: stable/rocky
       network_api_extensions_common: *api_extensions_rocky
       devstack_localrc:
         USE_PYTHON3: false
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+      # NOTE(bcafarel): newer tests, unstable on rocky branch
+      tempest_black_regex: "\
+          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
     branches:
       - stable/rocky
 
@@ -1006,6 +1041,7 @@
       <<: *multinode_scenario_vars_rocky
       devstack_localrc:
         USE_PYTHON3: True
+    required-projects: *required-projects-rocky
     group-vars:
       subnode:
         devstack_localrc:
@@ -1103,7 +1139,8 @@
     required-projects:
       - openstack/devstack-gate
       - openstack/neutron
-      - openstack/neutron-tempest-plugin
+      - name: openstack/neutron-tempest-plugin
+        override-checkout: 0.9.0
       - name: openstack/designate-tempest-plugin
         override-checkout: 0.7.0
       - openstack/tempest
@@ -1124,6 +1161,7 @@
       This job run on py3 for other than stable/rocky gate
       which is nothing but neutron-tempest-plugin master gate.
     override-checkout: stable/rocky
+    required-projects: *required-projects-rocky
     vars:
       <<: *designate_scenario_vars_rocky
       devstack_localrc:
@@ -1379,9 +1417,6 @@
     templates:
       - build-openstack-docs-pti
       - neutron-tempest-plugin-jobs
-      # TODO(slaweq): bring rocky jobs back when dropping py27
-      # drama will be finally over
-      # - neutron-tempest-plugin-jobs-rocky
       - neutron-tempest-plugin-jobs-stein
       - neutron-tempest-plugin-jobs-train
       - check-requirements
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index 52783b9..8867eee 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import copy
+
 from tempest.common import utils
 from tempest.lib import decorators
 
@@ -203,3 +205,62 @@
     @decorators.idempotent_id('74293e59-d794-4a93-be09-38667199ef68')
     def test_list_pagination_page_reverse_with_href_links(self):
         self._test_list_pagination_page_reverse_with_href_links()
+
+
+class PortsTaggingOnCreationTestJSON(base.BaseNetworkTest):
+
+    _tags = [
+        ['tag-1', 'tag-2', 'tag-3'],
+        ['tag-1', 'tag-2'],
+        ['tag-1', 'tag-3'],
+        []
+    ]
+
+    @classmethod
+    def resource_setup(cls):
+        super(PortsTaggingOnCreationTestJSON, cls).resource_setup()
+        cls.network = cls.create_network()
+
+    def _create_ports_in_bulk(self, ports):
+        body = self.client.create_bulk_port(ports)
+        for port in body['ports']:
+            self.ports.append(port)
+        return body
+
+    def _create_ports_list(self):
+        num_ports = len(self._tags)
+        net_id = self.network['id']
+        port = {'port': {'network_id': net_id,
+                         'admin_state_up': True}}
+        return [copy.deepcopy(port) for x in range(num_ports)]
+
+    @decorators.idempotent_id('5cf26014-fdd3-4a6d-b94d-a05f0c55da89')
+    @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+                        service="network")
+    def test_tagging_ports_during_bulk_creation(self):
+        ports = self._create_ports_list()
+        ports_tags_map = {}
+        for port, tags in zip(ports, self._tags):
+            port['port']['tags'] = tags
+            port['port']['name'] = '-'.join(tags)
+            ports_tags_map[port['port']['name']] = tags
+        body = self._create_ports_in_bulk(ports)
+        for port in body['ports']:
+            self.assertEqual(ports_tags_map[port['name']], port['tags'])
+
+    @decorators.idempotent_id('33eda785-a08a-44a0-1bbb-fb50a2f1cd78')
+    @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+                        service="network")
+    def test_tagging_ports_during_bulk_creation_no_tags(self):
+        ports = self._create_ports_list()
+        body = self._create_ports_in_bulk(ports)
+        for port in body['ports']:
+            self.assertFalse(port['tags'])
+
+    @decorators.idempotent_id('6baa43bf-88fb-8bca-6051-97ea1a5e8f4f')
+    @utils.requires_ext(extension="tag-ports-during-bulk-creation",
+                        service="network")
+    def test_tagging_ports_during_creation(self):
+        port = {'name': 'port', 'tags': self._tags[0]}
+        body = self.create_port(self.network, **port)
+        self.assertEqual(self._tags[0], body['tags'])
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index 67925f7..7ea9f82 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -17,12 +17,16 @@
 
 from neutron_lib import constants
 from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
 import testtools
 
 from neutron_tempest_plugin.api import base
 from neutron_tempest_plugin.api import base_security_groups
+from oslo_log import log
+
+LOG = log.getLogger(__name__)
 
 
 class SecGroupTest(base.BaseAdminNetworkTest):
@@ -154,8 +158,7 @@
         project_id = self.client.tenant_id
         self.admin_client.update_quotas(project_id, **{'security_group': val})
         self.addCleanup(self.admin_client.update_quotas,
-                project_id,
-                **{'security_group': sg_quota})
+                        project_id, **{'security_group': sg_quota})
 
     def _get_sg_quota(self):
         project_id = self.client.tenant_id
@@ -193,9 +196,9 @@
         self._create_max_allowed_sg_amount()
         quota_set = self._get_sg_quota()
         self.assertEqual(quota_set, new_quota,
-                "Security group quota was not changed correctly")
+                         "Security group quota was not changed correctly")
         self.assertEqual(quota_set, self._get_sg_amount(),
-                "Amount of security groups doesn't match quota")
+                         "Amount of security groups doesn't match quota")
 
     @decorators.idempotent_id('ba95676c-8d9a-4482-b4ec-74d51a4602a6')
     def test_sg_quota_decrease_less_than_created(self):
@@ -212,6 +215,85 @@
         self.assertGreater(new_sg_amount, sg_amount)
 
 
+class BaseSecGroupRulesQuota(base.BaseAdminNetworkTest):
+
+    def _create_max_allowed_sg_rules_amount(self, port_index=1):
+        sg_rules_amount = self._get_sg_rules_amount()
+        sg_rules_quota = self._get_sg_rules_quota()
+        sg_rules_to_create = sg_rules_quota - sg_rules_amount
+        port_index += sg_rules_to_create
+        self._create_security_group_rules(sg_rules_to_create,
+                                         port_index=port_index)
+
+    def _create_security_group_rules(self, amount, port_index=1):
+        for i in range(amount):
+            self.create_security_group_rule(**{
+                'project_id': self.client.tenant_id,
+                'direction': 'ingress',
+                'port_range_max': port_index + i,
+                'port_range_min': port_index + i,
+                'protocol': 'tcp'})
+
+    def _increase_sg_rules_quota(self):
+        sg_rules_quota = self._get_sg_rules_quota()
+        new_sg_rules_quota = 2 * sg_rules_quota
+        self._set_sg_rules_quota(new_sg_rules_quota)
+        self.assertGreater(self._get_sg_rules_quota(), sg_rules_quota,
+                         "Security group rules quota wasn't changed correctly")
+        return new_sg_rules_quota
+
+    def _decrease_sg_rules_quota(self):
+        sg_rules_quota = self._get_sg_rules_quota()
+        new_sg_rules_quota = sg_rules_quota // 2
+        self._set_sg_rules_quota(new_sg_rules_quota)
+        return new_sg_rules_quota
+
+    def _set_sg_rules_quota(self, val):
+        project_id = self.client.tenant_id
+        self.admin_client.update_quotas(project_id,
+                                        **{'security_group_rule': val})
+        LOG.info('Trying to update security group rule quota {} '.format(val))
+
+    def _get_sg_rules_quota(self):
+        project_id = self.client.tenant_id
+        quotas = self.admin_client.show_quotas(project_id)
+        return quotas['quota']['security_group_rule']
+
+    def _get_sg_rules_amount(self):
+        project_id = self.client.tenant_id
+        filter_query = {'project_id': project_id}
+        security_group_rules = self.client.list_security_group_rules(
+                **filter_query)
+        return len(security_group_rules['security_group_rules'])
+
+
+class SecGroupRulesQuotaTest(BaseSecGroupRulesQuota):
+
+    credentials = ['primary', 'admin']
+    required_extensions = ['security-group', 'quotas']
+
+    def setUp(self):
+        super(SecGroupRulesQuotaTest, self).setUp()
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.admin_client.reset_quotas, self.client.tenant_id)
+        self._set_sg_rules_quota(10)
+
+    @decorators.idempotent_id('77ec038c-5638-11ea-8e2d-0242ac130003')
+    def test_sg_rules_quota_increased(self):
+        """Test security group rules quota increased.
+
+        This test checks if it is possible to increase the SG rules Quota
+        value and creates security group rules according to new quota value.
+        """
+        self._create_max_allowed_sg_rules_amount()
+        new_quota = self._increase_sg_rules_quota()
+        port_index = new_quota
+        self._create_max_allowed_sg_rules_amount(port_index)
+        quota_set = self._get_sg_rules_quota()
+        self.assertEqual(quota_set, self._get_sg_rules_amount(),
+                         "Amount of security groups rules doesn't match quota")
+
+
 class SecGroupProtocolTest(base.BaseNetworkTest):
 
     protocol_names = base_security_groups.V4_PROTOCOL_NAMES
@@ -365,7 +447,7 @@
         # ensure that 'client2' can't see the rbac-policy sharing the
         # sg to it because the rbac-policy belongs to 'client'
         self.assertNotIn(rbac_policy['id'], [p['id'] for p in
-                          self.client2.list_rbac_policies()['rbac_policies']])
+                         self.client2.list_rbac_policies()['rbac_policies']])
 
     @decorators.idempotent_id('2a9fd480-2a35-11e9-9cb6-acde48001122')
     def test_filter_fields(self):
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index 7b66494..fa91b31 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -273,6 +273,11 @@
             servers = self.os_primary.servers_client.list_servers()
             servers = servers['servers']
         for server in servers:
+            # NOTE(slaweq): sometimes servers are passed in dictionary with
+            # "server" key as first level key and in other cases it may be that
+            # it is just the "inner" dict without "server" key. Lets try to
+            # handle both cases
+            server = server.get("server") or server
             try:
                 console_output = (
                     self.os_primary.servers_client.get_console_output(
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
index c847077..bc94cbf 100644
--- a/neutron_tempest_plugin/scenario/test_qos.py
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -19,6 +19,7 @@
 from neutron_lib.services.qos import constants as qos_consts
 from oslo_log import log as logging
 from tempest.common import utils as tutils
+from tempest.common import waiters
 from tempest.lib import decorators
 
 from neutron_tempest_plugin.api import base as base_api
@@ -139,6 +140,47 @@
                                         shared=True)
         return policy['policy']['id']
 
+    def _create_server_by_port(self, port=None):
+        """Launch an instance using a port interface;
+
+        In case that the given port is None, a new port is created,
+        activated and configured with inbound SSH and TCP connection.
+        """
+        # Create and activate the port that will be assign to the instance.
+        if port is None:
+            secgroup = self.create_security_group()
+            self.create_loginable_secgroup_rule(
+                secgroup_id=secgroup['id'])
+
+            secgroup_rules = [{'protocol': 'tcp',
+                               'direction': 'ingress',
+                               'port_range_min': self.NC_PORT,
+                               'port_range_max': self.NC_PORT,
+                               'remote_ip_prefix': '0.0.0.0/0'}]
+
+            self.create_secgroup_rules(secgroup_rules,
+                                       secgroup['id'])
+
+            port = self.create_port(self.network,
+                                    security_groups=[secgroup['id']])
+            self.fip = self.create_floatingip(port=port)
+
+        keypair = self.create_keypair()
+
+        server_kwargs = {
+            'flavor_ref': CONF.compute.flavor_ref,
+            'image_ref': CONF.compute.image_ref,
+            'key_name': keypair['name'],
+            'networks': [{'port': port['id']}],
+        }
+
+        server = self.create_server(**server_kwargs)
+        self.wait_for_server_active(server['server'])
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                keypair['private_key'])
+        return server, port
+
 
 class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
     @classmethod
@@ -260,3 +302,58 @@
             expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
             timeout=self.CHECK_TIMEOUT,
             sleep=1)
+
+    @decorators.idempotent_id('66e5673e-0522-11ea-8d71-362b9e155667')
+    def test_attach_previously_used_port_to_new_instance(self):
+        """The test spawns new instance using port with QoS policy.
+
+        Ports with attached QoS policy could be used multiple times.
+        The policy rules have to be enforced on the new machines.
+        """
+        self.network = self.create_network()
+        self.subnet = self.create_subnet(self.network)
+        self.router = self.create_router_by_client()
+        self.create_router_interface(self.router['id'], self.subnet['id'])
+
+        vm, vm_port = self._create_server_by_port()
+
+        port_policy = self.os_admin.network_client.create_qos_policy(
+            name='port-policy',
+            description='policy for attach',
+            shared=False)['policy']
+
+        rule = self.os_admin.network_client.create_bandwidth_limit_rule(
+            policy_id=port_policy['id'],
+            max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
+            max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
+                    'bandwidth_limit_rule']
+
+        self.os_admin.network_client.update_port(
+            vm_port['id'], qos_policy_id=port_policy['id'])
+
+        self.os_primary.servers_client.delete_server(vm['server']['id'])
+        waiters.wait_for_server_termination(
+            self.os_primary.servers_client,
+            vm['server']['id'])
+
+        # Launch a new server using the same port with attached policy
+        self._create_server_by_port(port=vm_port)
+
+        retrieved_port = self.os_admin.network_client.show_port(
+            vm_port['id'])
+        self.assertEqual(port_policy['id'],
+                         retrieved_port['port']['qos_policy_id'],
+                         """The expected policy ID is {0},
+                         the actual value is {1}""".
+                         format(port_policy['id'],
+                                retrieved_port['port']['qos_policy_id']))
+
+        retrieved_policy = self.os_admin.network_client.show_qos_policy(
+                           retrieved_port['port']['qos_policy_id'])
+
+        retrieved_rule_id = retrieved_policy['policy']['rules'][0]['id']
+        self.assertEqual(rule['id'],
+                         retrieved_rule_id,
+                         """The expected rule ID is {0},
+                         the actual value is {1}""".
+                         format(rule['id'], retrieved_rule_id))