Merge "Move test cases from networking-midonet repository"
diff --git a/.zuul.yaml b/.zuul.yaml
index 8c6072a..65e00b7 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -14,9 +14,71 @@
vars:
tempest_concurrency: 4
tox_envlist: all
+ network_api_extensions_common: &api_extensions_master
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - empty-string-filtering
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - filter-validation
+ - fip-port-details
+ - flavors
+ - floatingip-pools
+ - ip-substring-filtering
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - pagination
+ - port-resource-request
+ - port-mac-address-regenerate
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-bw-minimum-ingress
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-policies
+ - router
+ - router_availability_zone
+ - security-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-tag
+ - standard-attr-timestamp
+ - subnet_allocation
+ - trunk
+ - trunk-details
+ - uplink-status-propagation
+ network_api_extensions_tempest:
+ - dvr
devstack_localrc:
+ USE_PYTHON3: true
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
devstack_plugins:
neutron: git://git.openstack.org/openstack/neutron.git
neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
@@ -27,6 +89,7 @@
neutron-qos: true
neutron-segments: true
neutron-trunk: true
+ neutron-uplink-status-propagation: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -65,6 +128,7 @@
agent_availability_zone: nova
image_is_advanced: true
available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ provider_net_base_segm_id: 1
irrelevant-files: &tempest-irrelevant-files
- ^(test-|)requirements.txt$
- ^releasenotes/.*$
@@ -96,27 +160,138 @@
- job:
name: neutron-tempest-plugin-api-queens
+ nodeset: openstack-single-node-xenial
parent: neutron-tempest-plugin-api
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ network_api_extensions_common: &api_extensions_queens
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - flavors
+ - ip-substring-filtering
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - pagination
+ - port-security
+ - project-id
+ - provider
+ - qos
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-policies
+ - router
+ - router_availability_zone
+ - security-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-timestamp
+ - standard-attr-tag
+ - subnet_allocation
+ - trunk
+ - trunk-details
+ network_api_extensions_tempest:
+ - dvr
devstack_localrc:
- # TODO(slaweq): find a way to put this list of extensions in
- # neutron repository and keep it different per branch,
- # then it could be removed from here
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+ USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
- job:
name: neutron-tempest-plugin-api-rocky
+ nodeset: openstack-single-node-xenial
parent: neutron-tempest-plugin-api
override-checkout: stable/rocky
vars:
branch_override: stable/rocky
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ network_api_extensions_common: &api_extensions_rocky
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - empty-string-filtering
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - fip-port-details
+ - flavors
+ - ip-substring-filtering
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - pagination
+ - port-mac-address-regenerate
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-policies
+ - router
+ - router_availability_zone
+ - security-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-timestamp
+ - standard-attr-tag
+ - subnet_allocation
+ - trunk
+ - trunk-details
+ network_api_extensions_tempest:
+ - dvr
devstack_localrc:
- # TODO(slaweq): find a way to put this list of extensions in
- # neutron repository and keep it different per branch,
- # then it could be removed from here
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details
+ USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
- job:
name: neutron-tempest-plugin-scenario
@@ -143,9 +318,10 @@
parent: neutron-tempest-plugin-scenario
timeout: 10000
vars:
+ network_api_extensions: *api_extensions_master
devstack_localrc:
Q_AGENT: linuxbridge
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -163,24 +339,47 @@
$TEMPEST_CONFIG:
neutron_plugin_options:
available_type_drivers: flat,vlan,local,vxlan
+ q_agent: linuxbridge
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-queens
parent: neutron-tempest-plugin-scenario-linuxbridge
+ nodeset: openstack-single-node-xenial
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ network_api_extensions: *api_extensions_queens
devstack_localrc:
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+ USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ # NOTE: ignores linux bridge's trunk delete on bound port test
+ # for queens branch (as https://review.openstack.org/#/c/605589/
+ # fix will not apply for queens branch)
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ q_agent: None
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-rocky
parent: neutron-tempest-plugin-scenario-linuxbridge
+ nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
vars:
branch_override: stable/rocky
+ network_api_extensions: *api_extensions_rocky
devstack_localrc:
- NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+ USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ # NOTE: ignores linux bridge's trunk delete on bound port test
+ # for rocky branch (as https://review.openstack.org/#/c/605589/
+ # fix will not apply for rocky branch)
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ q_agent: None
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -200,9 +399,13 @@
tempest_concurrency: 4
tox_envlist: all
tempest_test_regex: ^neutron_tempest_plugin\.scenario
+ network_api_extensions_common: *api_extensions_master
+ network_api_extensions_dvr:
+ - dvr
devstack_localrc:
+ USE_PYTHON3: true
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
PHYSICAL_NETWORK: default
DOWNLOAD_DEFAULT_IMAGES: false
IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
@@ -310,16 +513,22 @@
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-queens
parent: neutron-tempest-plugin-dvr-multinode-scenario
+ nodeset: openstack-two-node-xenial
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ devstack_localrc:
+ USE_PYTHON3: false
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
parent: neutron-tempest-plugin-dvr-multinode-scenario
+ nodeset: openstack-two-node-xenial
override-checkout: stable/rocky
vars:
branch_override: stable/rocky
+ devstack_localrc:
+ USE_PYTHON3: false
- job:
name: neutron-tempest-plugin-designate-scenario
@@ -350,16 +559,22 @@
- job:
name: neutron-tempest-plugin-designate-scenario-queens
parent: neutron-tempest-plugin-designate-scenario
+ nodeset: openstack-single-node-xenial
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ devstack_localrc:
+ USE_PYTHON3: false
- job:
name: neutron-tempest-plugin-designate-scenario-rocky
parent: neutron-tempest-plugin-designate-scenario
+ nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
vars:
branch_override: stable/rocky
+ devstack_localrc:
+ USE_PYTHON3: false
- project-template:
name: neutron-tempest-plugin-jobs
diff --git a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
index d449ead..048a1e5 100644
--- a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
+++ b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
@@ -17,11 +17,9 @@
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as base_security
-class PortSecurityAdminTests(base_security.BaseSecGroupTest,
- base.BaseAdminNetworkTest):
+class PortSecurityAdminTests(base.BaseAdminNetworkTest):
required_extensions = ['port-security']
diff --git a/neutron_tempest_plugin/api/admin/test_ports.py b/neutron_tempest_plugin/api/admin/test_ports.py
index cbcd933..642e910 100644
--- a/neutron_tempest_plugin/api/admin/test_ports.py
+++ b/neutron_tempest_plugin/api/admin/test_ports.py
@@ -14,11 +14,17 @@
# under the License.
import netaddr
+import six
+from neutron_lib import constants as const
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin import config
+
+CONF = config.CONF
class PortTestCasesAdmin(base.BaseAdminNetworkTest):
@@ -58,3 +64,126 @@
new_mac = body['port']['mac_address']
self.assertNotEqual(current_mac, new_mac)
self.assertTrue(netaddr.valid_mac(new_mac))
+
+
+class PortTestCasesResourceRequest(base.BaseAdminNetworkTest):
+
+ required_extensions = ['port-resource-request',
+ 'qos',
+ 'qos-bw-minimum-ingress']
+
+ EGRESS_KBPS = 1000
+ INGRESS_KBPS = 2000
+
+ @classmethod
+ def skip_checks(cls):
+ super(PortTestCasesResourceRequest, cls).skip_checks()
+ if not config.CONF.neutron_plugin_options.provider_vlans:
+ msg = "Skipped as provider VLANs are not available in config"
+ raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortTestCasesResourceRequest, cls).resource_setup()
+
+ cls.vnic_type = 'normal'
+
+ # Note(lajoskatona): to avoid creating provider network use vxlan
+ # as provider network type:
+ cls.network = cls.create_network(provider_network_type='vxlan')
+ cls.physnet_name = CONF.neutron_plugin_options.provider_vlans[0]
+ base_segm = CONF.neutron_plugin_options.provider_net_base_segm_id
+ cls.prov_network = cls.create_provider_network(
+ physnet_name=cls.physnet_name, start_segmentation_id=base_segm)
+
+ def _create_qos_policy_and_port(self, network, vnic_type,
+ network_policy=False):
+ qos_policy = self.create_qos_policy(
+ name=data_utils.rand_name('test_policy'), shared=True)
+ self.create_qos_minimum_bandwidth_rule(qos_policy['id'],
+ self.EGRESS_KBPS,
+ const.EGRESS_DIRECTION)
+ self.create_qos_minimum_bandwidth_rule(qos_policy['id'],
+ self.INGRESS_KBPS,
+ const.INGRESS_DIRECTION)
+
+ port_policy_id = qos_policy['id'] if not network_policy else None
+ port_kwargs = {
+ 'qos_policy_id': port_policy_id,
+ 'binding:vnic_type': vnic_type
+ }
+
+ if network_policy:
+ self.admin_client.update_network(network['id'],
+ qos_policy_id=qos_policy['id'])
+
+ port_id = self.create_port(network, **port_kwargs)['id']
+ return self.admin_client.show_port(port_id)['port']
+
+ def _assert_resource_request(self, port, vnic_type):
+ self.assertIn('resource_request', port)
+ vnic_trait = 'CUSTOM_VNIC_TYPE_%s' % vnic_type.upper()
+ physnet_trait = 'CUSTOM_PHYSNET_%s' % self.physnet_name.upper()
+ six.assertCountEqual(self, [physnet_trait, vnic_trait],
+ port['resource_request']['required'])
+
+ self.assertEqual(
+ {'NET_BW_EGR_KILOBIT_PER_SEC': self.EGRESS_KBPS,
+ 'NET_BW_IGR_KILOBIT_PER_SEC': self.INGRESS_KBPS},
+ port['resource_request']['resources']
+ )
+
+ @decorators.idempotent_id('ebb86dc4-716c-4558-8516-6dfc4a67601f')
+ def test_port_resource_request(self):
+ port = self._create_qos_policy_and_port(
+ network=self.prov_network, vnic_type=self.vnic_type)
+ port_id = port['id']
+
+ self._assert_resource_request(port, self.vnic_type)
+
+ # Note(lajoskatona): port-resource-request is an admin only feature,
+ # so test if non-admin user can't see the new field.
+ port = self.client.show_port(port_id)['port']
+ self.assertNotIn('resource_request', port)
+
+ self.update_port(port, **{'qos_policy_id': None})
+ port = self.admin_client.show_port(port_id)['port']
+ self.assertIsNone(port['resource_request'])
+
+ @decorators.idempotent_id('10b3308b-d8a2-459b-9b89-a146863c357f')
+ def test_port_resource_request_no_provider_net(self):
+ port = self._create_qos_policy_and_port(
+ network=self.network, vnic_type=self.vnic_type)
+
+ self.assertIn('resource_request', port)
+ self.assertIsNone(port['resource_request'])
+
+ @decorators.idempotent_id('0eeb6ffa-9a7a-40b5-83dd-dbdcd67e2e64')
+ def test_port_resource_request_empty(self):
+ qos_policy = self.create_qos_policy(
+ name=data_utils.rand_name('test_policy'), shared=True)
+
+ # Note(lajoskatona): Add a non-minimum-bandwidth-rule to the policy
+ # to make sure that the resource request is not filled with it.
+ self.create_qos_bandwidth_limit_rule(qos_policy['id'],
+ self.EGRESS_KBPS, 800,
+ const.EGRESS_DIRECTION)
+
+ port_kwargs = {
+ 'qos_policy_id': qos_policy['id'],
+ 'binding:vnic_type': self.vnic_type
+ }
+
+ port_id = self.create_port(self.prov_network, **port_kwargs)['id']
+ port = self.admin_client.show_port(port_id)['port']
+
+ self.assertIn('resource_request', port)
+ self.assertIsNone(port['resource_request'])
+
+ @decorators.idempotent_id('b6c34ae4-44c8-47f0-86de-7ef9866fa000')
+ def test_port_resource_request_inherited_policy(self):
+ port = self._create_qos_policy_and_port(
+ network=self.prov_network, vnic_type=self.vnic_type,
+ network_policy=True)
+
+ self._assert_resource_request(port, self.vnic_type)
diff --git a/neutron_tempest_plugin/api/admin/test_quotas_negative.py b/neutron_tempest_plugin/api/admin/test_quotas_negative.py
index cd64e5c..9c37d92 100644
--- a/neutron_tempest_plugin/api/admin/test_quotas_negative.py
+++ b/neutron_tempest_plugin/api/admin/test_quotas_negative.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from neutron_lib import constants
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -110,54 +111,38 @@
@decorators.idempotent_id('5c924ff7-b7a9-474f-92a3-dbe0f976ec13')
@utils.requires_ext(extension="security-group", service="network")
def test_create_security_group_when_quotas_is_full(self):
- tenant_id = self.create_project()['id']
- sg_args = {'tenant_id': tenant_id}
- # avoid a number that is made by default
- sg_list = self.admin_client.list_security_groups(
- tenant_id=tenant_id)['security_groups']
- num = len(sg_list) + 1
+ project = self.create_project()
- new_quotas = {'security_group': num}
- self._setup_quotas(tenant_id, **new_quotas)
+ # Set quotas to allow to create only one more security group
+ security_groups = self.admin_client.list_security_groups(
+ tenant_id=project['id'])['security_groups']
+ self._setup_quotas(project['id'],
+ security_group=len(security_groups) + 1)
- sg = self.admin_client.create_security_group(
- **sg_args)['security_group']
- self.addCleanup(self.admin_client.delete_security_group, sg['id'])
-
- self.assertRaises(lib_exc.Conflict,
- self.admin_client.create_security_group, **sg_args)
+ self.create_security_group(project=project)
+ self.assertRaises(lib_exc.Conflict, self.create_security_group,
+ project=project)
@decorators.attr(type='negative')
@decorators.idempotent_id('b7143480-6118-4ed4-be38-1b6f15f30d05')
@utils.requires_ext(extension="security-group", service="network")
def test_create_security_group_rule_when_quotas_is_full(self):
- tenant_id = self.create_project()['id']
- sg_args = {'tenant_id': tenant_id}
+ project = self.create_project()
+ security_group = self.create_security_group(project=project)
- sg = self.admin_client.create_security_group(
- **sg_args)['security_group']
- self.addCleanup(self.admin_client.delete_security_group, sg['id'])
+ # Set quotas to allow to create only one more security group rule
+ security_group_rules = self.admin_client.list_security_group_rules(
+ tenant_id=project['id'])['security_group_rules']
+ self._setup_quotas(project['id'],
+ security_group_rule=len(security_group_rules) + 1)
- # avoid a number that is made by default
- sg_rule_list = self.admin_client.list_security_group_rules(
- tenant_id=tenant_id)['security_group_rules']
- num = len(sg_rule_list) + 1
-
- new_quotas = {'security_group_rule': num}
- self._setup_quotas(tenant_id, **new_quotas)
-
- sg_rule_args = {'tenant_id': tenant_id,
- 'security_group_id': sg['id'],
- 'direction': 'ingress'}
- sg_rule = self.admin_client.create_security_group_rule(
- **sg_rule_args)['security_group_rule']
- self.addCleanup(
- self.admin_client.delete_security_group_rule, sg_rule['id'])
-
- sg_rule_args['direction'] = 'egress'
+ self.create_security_group_rule(
+ project=project, security_group=security_group,
+ direction=constants.INGRESS_DIRECTION)
self.assertRaises(lib_exc.Conflict,
- self.admin_client.create_security_group_rule,
- **sg_rule_args)
+ self.create_security_group_rule,
+ project=project, security_group=security_group,
+ direction=constants.EGRESS_DIRECTION)
@decorators.attr(type='negative')
@decorators.idempotent_id('d00fe5bb-9db8-4e1a-9c31-490f52897e6f')
diff --git a/neutron_tempest_plugin/api/admin/test_security_groups.py b/neutron_tempest_plugin/api/admin/test_security_groups.py
index de7e7d2..d79b0ee 100644
--- a/neutron_tempest_plugin/api/admin/test_security_groups.py
+++ b/neutron_tempest_plugin/api/admin/test_security_groups.py
@@ -14,10 +14,10 @@
from tempest.lib import decorators
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
-class SecGroupAdminTest(base.BaseSecGroupTest):
+class SecGroupAdminTest(base.BaseNetworkTest):
required_extensions = ['security-group']
credentials = ['primary', 'admin']
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index cef0ffc..eb902b9 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -101,8 +101,7 @@
@decorators.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
def test_create_port_shared_network_as_non_admin_tenant(self):
# create a port as non admin
- body = self.client.create_port(network_id=self.shared_network['id'])
- port = body['port']
+ port = self.create_port(self.shared_network)
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
self.assertNotEqual(self.shared_network['tenant_id'],
@@ -257,7 +256,7 @@
def test_port_presence_prevents_network_rbac_policy_deletion(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
- port = self.client.create_port(network_id=res['network']['id'])['port']
+ port = self.create_port(res['network'])
# a port on the network should prevent the deletion of a policy
# required for it to exist
with testtools.ExpectedException(lib_exc.Conflict):
@@ -274,7 +273,7 @@
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=net['tenant_id'])['rbac_policy']
- port = self.client.create_port(network_id=net['id'])['port']
+ port = self.create_port(net)
self.client.delete_rbac_policy(self_share['id'])
self.client.delete_port(port['id'])
@@ -290,8 +289,7 @@
@decorators.idempotent_id('f7539232-389a-4e9c-9e37-e42a129eb541')
def test_tenant_cant_delete_other_tenants_ports(self):
net = self.create_network()
- port = self.client.create_port(network_id=net['id'])['port']
- self.addCleanup(self.client.delete_port, port['id'])
+ port = self.create_port(net)
with testtools.ExpectedException(lib_exc.NotFound):
self.client2.delete_port(port['id'])
@@ -405,7 +403,7 @@
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=net['tenant_id'])['rbac_policy']
- port = self.client.create_port(network_id=net['id'])['port']
+ port = self.create_port(net)
self.client.update_rbac_policy(self_share['id'],
target_tenant=self.client2.tenant_id)
self.client.delete_port(port['id'])
diff --git a/neutron_tempest_plugin/api/admin/test_tag.py b/neutron_tempest_plugin/api/admin/test_tag.py
index fdcb6a1..7879b4c 100644
--- a/neutron_tempest_plugin/api/admin/test_tag.py
+++ b/neutron_tempest_plugin/api/admin/test_tag.py
@@ -19,7 +19,7 @@
class TagTestJSON(base.BaseAdminNetworkTest):
- required_extensions = ['tag']
+ required_extensions = ['standard-attr-tag']
@classmethod
def resource_setup(cls):
@@ -100,7 +100,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('2805aabf-a94c-4e70-a0b2-9814f06beb03')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_subnet_tags(self):
self._test_tag_operations()
@@ -116,7 +116,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('c7c44f2c-edb0-4ebd-a386-d37cec155c34')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_port_tags(self):
self._test_tag_operations()
@@ -133,7 +133,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('bdc1c24b-c0b5-4835-953c-8f67dc11edfe')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_subnetpool_tags(self):
self._test_tag_operations()
@@ -149,7 +149,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('b898ff92-dc33-4232-8ab9-2c6158c80d28')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_router_tags(self):
self._test_tag_operations()
@@ -220,7 +220,7 @@
class TagFilterTestJSON(base.BaseAdminNetworkTest):
credentials = ['primary', 'alt', 'admin']
- required_extensions = ['tag']
+ required_extensions = ['standard-attr-tag']
@classmethod
def resource_setup(cls):
@@ -322,7 +322,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('dd8f9ba7-bcf6-496f-bead-714bd3daac10')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_filter_subnet_tags(self):
self._test_filter_tags()
@@ -342,7 +342,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('09c036b8-c8d0-4bee-b776-7f4601512898')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_filter_port_tags(self):
self._test_filter_tags()
@@ -363,7 +363,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('16ae7ad2-55c2-4821-9195-bfd04ab245b7')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_filter_subnetpool_tags(self):
self._test_filter_tags()
@@ -383,7 +383,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('cdd3f3ea-073d-4435-a6cb-826a4064193d')
- @utils.requires_ext(extension="tag-ext", service="network")
+ @utils.requires_ext(extension="standard-attr-tag", service="network")
def test_filter_router_tags(self):
self._test_filter_tags()
@@ -471,7 +471,7 @@
class UpdateTagsTest(base.BaseAdminNetworkTest):
- required_extensions = ['tag']
+ required_extensions = ['standard-attr-tag']
def _get_and_compare_tags(self, tags, res_id):
# nothing specific about networks here, just a resource that is
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index c4bc71d..2943198 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -15,9 +15,11 @@
import functools
import math
+import time
import netaddr
from neutron_lib import constants as const
+from oslo_log import log
from tempest.common import utils as tutils
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
@@ -31,6 +33,8 @@
CONF = config.CONF
+LOG = log.getLogger(__name__)
+
class BaseNetworkTest(test.BaseTestCase):
@@ -189,15 +193,15 @@
network['id'])
# Clean up security groups
- for secgroup in cls.security_groups:
- cls._try_delete_resource(cls.client.delete_security_group,
- secgroup['id'])
+ for security_group in cls.security_groups:
+ cls._try_delete_resource(cls.delete_security_group,
+ security_group)
# Clean up admin security groups
- for secgroup in cls.admin_security_groups:
- cls._try_delete_resource(
- cls.admin_client.delete_security_group,
- secgroup['id'])
+ for security_group in cls.admin_security_groups:
+ cls._try_delete_resource(cls.delete_security_group,
+ security_group,
+ client=cls.admin_client)
for subnetpool in cls.subnetpools:
cls._try_delete_resource(cls.client.delete_subnetpool,
@@ -431,6 +435,8 @@
ip_version=ip_version, cidr=cidr, mask_bits=mask_bits):
if gateway is not None:
kwargs['gateway_ip'] = str(gateway or (subnet_cidr.ip + 1))
+ else:
+ kwargs['gateway_ip'] = None
try:
body = client.create_subnet(
network_id=network['id'],
@@ -611,7 +617,11 @@
cls.external_network_id)
if port:
- kwargs['port_id'] = port['id']
+ port_id = kwargs.setdefault('port_id', port['id'])
+ if port_id != port['id']:
+ message = "Port ID specified twice: {!s} != {!s}".format(
+ port_id, port['id'])
+ raise ValueError(message)
fip = client.create_floatingip(external_network_id,
**kwargs)['floatingip']
@@ -668,8 +678,20 @@
return qos_rule
@classmethod
+ def create_qos_minimum_bandwidth_rule(cls, policy_id, min_kbps,
+ direction=const.EGRESS_DIRECTION):
+ """Wrapper utility that creates and returns a QoS min bw rule."""
+ body = cls.admin_client.create_minimum_bandwidth_rule(
+ policy_id, direction, min_kbps)
+ qos_rule = body['minimum_bandwidth_rule']
+ cls.qos_rules.append(qos_rule)
+ return qos_rule
+
+ @classmethod
def delete_router(cls, router, client=None):
client = client or cls.client
+ if 'routes' in router:
+ client.remove_router_extra_routes(router['id'])
body = client.list_router_interfaces(router['id'])
interfaces = [port for port in body['ports']
if port['device_owner'] in const.ROUTER_INTERFACE_OWNERS]
@@ -710,18 +732,78 @@
description=test_description)['project']
cls.projects.append(project)
# Create a project will create a default security group.
- # We make these security groups into admin_security_groups.
sgs_list = cls.admin_client.list_security_groups(
tenant_id=project['id'])['security_groups']
- for sg in sgs_list:
- cls.admin_security_groups.append(sg)
+ for security_group in sgs_list:
+ # Make sure delete_security_group method will use
+ # the admin client for this group
+ security_group['client'] = cls.admin_client
+ cls.security_groups.append(security_group)
return project
@classmethod
- def create_security_group(cls, name, **kwargs):
- body = cls.client.create_security_group(name=name, **kwargs)
- cls.security_groups.append(body['security_group'])
- return body['security_group']
+ def create_security_group(cls, name=None, project=None, client=None,
+ **kwargs):
+ if project:
+ client = client or cls.admin_client
+ project_id = kwargs.setdefault('project_id', project['id'])
+ tenant_id = kwargs.setdefault('tenant_id', project['id'])
+ if project_id != project['id'] or tenant_id != project['id']:
+ raise ValueError('Project ID specified multiple times')
+ else:
+ client = client or cls.client
+
+ name = name or data_utils.rand_name(cls.__name__)
+ security_group = client.create_security_group(name=name, **kwargs)[
+ 'security_group']
+ security_group['client'] = client
+ cls.security_groups.append(security_group)
+ return security_group
+
+ @classmethod
+ def delete_security_group(cls, security_group, client=None):
+ client = client or security_group.get('client') or cls.client
+ client.delete_security_group(security_group['id'])
+
+ @classmethod
+ def create_security_group_rule(cls, security_group=None, project=None,
+ client=None, ip_version=None, **kwargs):
+ if project:
+ client = client or cls.admin_client
+ project_id = kwargs.setdefault('project_id', project['id'])
+ tenant_id = kwargs.setdefault('tenant_id', project['id'])
+ if project_id != project['id'] or tenant_id != project['id']:
+ raise ValueError('Project ID specified multiple times')
+
+ if 'security_group_id' not in kwargs:
+ security_group = (security_group or
+ cls.get_security_group(client=client))
+
+ if security_group:
+ client = client or security_group.get('client')
+ security_group_id = kwargs.setdefault('security_group_id',
+ security_group['id'])
+ if security_group_id != security_group['id']:
+ raise ValueError('Security group ID specified multiple times.')
+
+ ip_version = ip_version or cls._ip_version
+ default_params = (
+ constants.DEFAULT_SECURITY_GROUP_RULE_PARAMS[ip_version])
+ for key, value in default_params.items():
+ kwargs.setdefault(key, value)
+
+ client = client or cls.client
+ return client.create_security_group_rule(**kwargs)[
+ 'security_group_rule']
+
+ @classmethod
+ def get_security_group(cls, name='default', client=None):
+ client = client or cls.client
+ security_groups = client.list_security_groups()['security_groups']
+ for security_group in security_groups:
+ if security_group['name'] == name:
+ return security_group
+ raise ValueError("No such security group named {!r}".format(name))
@classmethod
def create_keypair(cls, client=None, name=None, **kwargs):
@@ -762,7 +844,7 @@
return trunk
@classmethod
- def delete_trunk(cls, trunk, client=None):
+ def delete_trunk(cls, trunk, client=None, detach_parent_port=True):
"""Delete network trunk
:param trunk: dictionary containing trunk ID (trunk['id'])
@@ -788,7 +870,7 @@
parent_port.update(client.show_port(parent_port['id'])['port'])
return not parent_port['device_id']
- if not is_parent_port_detached():
+ if detach_parent_port and not is_parent_port_detached():
# this could probably happen when trunk is deleted and parent port
# has been assigned to a VM that is still running. Here we are
# assuming that device_id points to such VM.
@@ -906,6 +988,29 @@
"net(%s) has no usable IP address in allocation pools" % net_id)
raise exceptions.InvalidConfiguration(message)
+ @classmethod
+ def create_provider_network(cls, physnet_name, start_segmentation_id,
+ max_attempts=30):
+ segmentation_id = start_segmentation_id
+ for attempts in range(max_attempts):
+ try:
+ return cls.create_network(
+ name=data_utils.rand_name('test_net'),
+ shared=True,
+ provider_network_type='vlan',
+ provider_physical_network=physnet_name,
+ provider_segmentation_id=segmentation_id)
+ except lib_exc.Conflict:
+ segmentation_id += 1
+ if segmentation_id > 4095:
+ raise lib_exc.TempestException(
+ "No free segmentation id was found for provider "
+ "network creation!")
+ time.sleep(CONF.network.build_interval)
+ LOG.exception("Failed to create provider network after "
+ "%d attempts", max_attempts)
+ raise lib_exc.TimeoutException
+
def require_qos_rule_type(rule_type):
def decorator(f):
diff --git a/neutron_tempest_plugin/api/base_security_groups.py b/neutron_tempest_plugin/api/base_security_groups.py
index 127bbd9..ca2c17a 100644
--- a/neutron_tempest_plugin/api/base_security_groups.py
+++ b/neutron_tempest_plugin/api/base_security_groups.py
@@ -14,9 +14,6 @@
# under the License.
from neutron_lib import constants
-from tempest.lib.common.utils import data_utils
-
-from neutron_tempest_plugin.api import base
# NOTE(yamamoto): The list of protocols here is what we had in Ocata.
@@ -45,9 +42,13 @@
'udplite',
'vrrp',
}
-V4_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
- k in V4_PROTOCOL_NAMES)
-V6_PROTOCOL_LEGACY = set([constants.PROTO_NAME_IPV6_ICMP_LEGACY])
+
+V4_PROTOCOL_INTS = {v
+ for k, v in constants.IP_PROTOCOL_MAP.items()
+ if k in V4_PROTOCOL_NAMES}
+
+V6_PROTOCOL_LEGACY = {constants.PROTO_NAME_IPV6_ICMP_LEGACY}
+
V6_PROTOCOL_NAMES = {
'ipv6-encap',
'ipv6-frag',
@@ -56,66 +57,7 @@
'ipv6-opts',
'ipv6-route',
}
-V6_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
- k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY))
-
-class BaseSecGroupTest(base.BaseNetworkTest):
-
- def _create_security_group(self, **kwargs):
- # Create a security group
- name = data_utils.rand_name('secgroup-')
- group_create_body = self.client.create_security_group(name=name,
- **kwargs)
- self.addCleanup(self._delete_security_group,
- group_create_body['security_group']['id'])
- self.assertEqual(group_create_body['security_group']['name'], name)
- return group_create_body, name
-
- def _delete_security_group(self, secgroup_id):
- self.client.delete_security_group(secgroup_id)
- # Asserting that the security group is not found in the list
- # after deletion
- list_body = self.client.list_security_groups()
- secgroup_list = list()
- for secgroup in list_body['security_groups']:
- secgroup_list.append(secgroup['id'])
- self.assertNotIn(secgroup_id, secgroup_list)
-
- def _create_security_group_rule(self, **kwargs):
- rule_create_body = self.client.create_security_group_rule(**kwargs)
- # List rules and verify created rule is in response
- rule_list_body = (
- self.client.list_security_group_rules())
- rule_list = [rule['id']
- for rule in rule_list_body['security_group_rules']]
- self.assertIn(rule_create_body['security_group_rule']['id'],
- rule_list)
- self.addCleanup(self._delete_security_group_rule,
- rule_create_body['security_group_rule']['id'])
- return rule_create_body
-
- def _show_security_group_rule(self, **kwargs):
- show_rule_body = self.client.show_security_group_rule(kwargs['id'])
- for key, value in kwargs.items():
- self.assertEqual(value,
- show_rule_body['security_group_rule'][key],
- "%s does not match." % key)
-
- def _delete_security_group_rule(self, secgroup_rule_id):
- self.client.delete_security_group_rule(secgroup_rule_id)
- rule_list_body = self.client.list_security_group_rules()
- rule_list = [rule['id']
- for rule in rule_list_body['security_group_rules']]
- self.assertNotIn(secgroup_rule_id, rule_list)
-
- def _test_create_show_delete_security_group_rule(self, **kwargs):
- # The security group rule is deleted by the cleanup call in
- # _create_security_group_rule.
- rule_create_body = (
- self._create_security_group_rule(**kwargs)['security_group_rule'])
- self._show_security_group_rule(
- id=rule_create_body['id'],
- protocol=rule_create_body['protocol'],
- direction=rule_create_body['direction'],
- ethertype=rule_create_body['ethertype'])
+V6_PROTOCOL_INTS = {v
+ for k, v in constants.IP_PROTOCOL_MAP.items()
+ if k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY)}
diff --git a/neutron_tempest_plugin/api/test_allowed_address_pair.py b/neutron_tempest_plugin/api/test_allowed_address_pair.py
index 0137ff2..dd48382 100644
--- a/neutron_tempest_plugin/api/test_allowed_address_pair.py
+++ b/neutron_tempest_plugin/api/test_allowed_address_pair.py
@@ -53,11 +53,10 @@
# Create port with allowed address pair attribute
allowed_address_pairs = [{'ip_address': self.ip_address,
'mac_address': self.mac_address}]
- body = self.client.create_port(
- network_id=self.network['id'],
+ body = self.create_port(
+ self.network,
allowed_address_pairs=allowed_address_pairs)
- port_id = body['port']['id']
- self.addCleanup(self.client.delete_port, port_id)
+ port_id = body['id']
# Confirm port was created with allowed address pair attribute
body = self.client.list_ports()
@@ -69,9 +68,8 @@
def _update_port_with_address(self, address, mac_address=None, **kwargs):
# Create a port without allowed address pair
- body = self.client.create_port(network_id=self.network['id'])
- port_id = body['port']['id']
- self.addCleanup(self.client.delete_port, port_id)
+ body = self.create_port(self.network)
+ port_id = body['id']
if mac_address is None:
mac_address = self.mac_address
@@ -99,11 +97,9 @@
@decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
# Create an ip _address and mac_address through port create
- resp = self.client.create_port(network_id=self.network['id'])
- newportid = resp['port']['id']
- self.addCleanup(self.client.delete_port, newportid)
- ipaddress = resp['port']['fixed_ips'][0]['ip_address']
- macaddress = resp['port']['mac_address']
+ resp = self.create_port(self.network)
+ ipaddress = resp['fixed_ips'][0]['ip_address']
+ macaddress = resp['mac_address']
# Update allowed address pair port with multiple ip and mac
allowed_address_pairs = {'ip_address': ipaddress,
diff --git a/neutron_tempest_plugin/api/test_availability_zones.py b/neutron_tempest_plugin/api/test_availability_zones.py
new file mode 100644
index 0000000..9d75c28
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_availability_zones.py
@@ -0,0 +1,30 @@
+# Copyright 2018 AT&T Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import base
+
+
+class ListAvailableZonesTest(base.BaseNetworkTest):
+
+ @decorators.idempotent_id('5a8a8a1a-c265-11e8-a611-080027758b73')
+ @utils.requires_ext(extension="availability_zone",
+ service="network")
+ def test_list_available_zones(self):
+ body = self.client.list_availability_zones()
+ self.assertIsNotNone(body)
+ self.assertIsInstance(body['availability_zones'], list)
diff --git a/neutron_tempest_plugin/api/test_extension_driver_port_security.py b/neutron_tempest_plugin/api/test_extension_driver_port_security.py
index 8a8c4f2..6b05557 100644
--- a/neutron_tempest_plugin/api/test_extension_driver_port_security.py
+++ b/neutron_tempest_plugin/api/test_extension_driver_port_security.py
@@ -19,15 +19,13 @@
from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as base_security
FAKE_IP = '10.0.0.1'
FAKE_MAC = '00:25:64:e8:19:dd'
@ddt.ddt
-class PortSecTest(base_security.BaseSecGroupTest,
- base.BaseNetworkTest):
+class PortSecTest(base.BaseNetworkTest):
@decorators.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495')
@utils.requires_ext(extension='port-security', service='network')
@@ -76,7 +74,7 @@
network = self.create_network()
self.create_subnet(network)
- sec_group_body, _ = self._create_security_group()
+ security_group = self.create_security_group()
port = self.create_port(network)
# Exception when set port-sec to False with sec-group defined
@@ -88,7 +86,7 @@
self.assertEmpty(port['security_groups'])
self.assertFalse(port['port_security_enabled'])
port = self.update_port(
- port, security_groups=[sec_group_body['security_group']['id']],
+ port, security_groups=[security_group['id']],
port_security_enabled=True)
self.assertNotEmpty(port['security_groups'])
@@ -102,11 +100,11 @@
def test_port_sec_update_pass(self):
network = self.create_network()
self.create_subnet(network)
- sec_group, _ = self._create_security_group()
- sec_group_id = sec_group['security_group']['id']
- port = self.create_port(network, security_groups=[sec_group_id],
- port_security_enabled=True)
+ security_group = self.create_security_group()
+ port = self.create_port(network,
+ security_groups=[security_group['id']],
+ port_security_enabled=True)
self.assertNotEmpty(port['security_groups'])
self.assertTrue(port['port_security_enabled'])
@@ -114,7 +112,7 @@
self.assertEmpty(port['security_groups'])
self.assertTrue(port['port_security_enabled'])
- port = self.update_port(port, security_groups=[sec_group_id])
+ port = self.update_port(port, security_groups=[security_group['id']])
self.assertNotEmpty(port['security_groups'])
port = self.update_port(port, security_groups=[],
port_security_enabled=False)
diff --git a/neutron_tempest_plugin/api/test_extra_dhcp_options.py b/neutron_tempest_plugin/api/test_extra_dhcp_options.py
index cb4dba8..844666a 100644
--- a/neutron_tempest_plugin/api/test_extra_dhcp_options.py
+++ b/neutron_tempest_plugin/api/test_extra_dhcp_options.py
@@ -56,11 +56,10 @@
@decorators.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
def test_create_list_port_with_extra_dhcp_options(self):
# Create a port with Extra DHCP Options
- body = self.client.create_port(
- network_id=self.network['id'],
+ body = self.create_port(
+ self.network,
extra_dhcp_opts=self.extra_dhcp_opts)
- port_id = body['port']['id']
- self.addCleanup(self.client.delete_port, port_id)
+ port_id = body['id']
# Confirm port created has Extra DHCP Options
body = self.client.list_ports()
diff --git a/neutron_tempest_plugin/api/test_floating_ips.py b/neutron_tempest_plugin/api/test_floating_ips.py
index ea3d22e..9c1af14 100644
--- a/neutron_tempest_plugin/api/test_floating_ips.py
+++ b/neutron_tempest_plugin/api/test_floating_ips.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -121,3 +122,39 @@
self.assertEqual(port['status'], port_details['status'])
self.assertEqual(port['device_id'], port_details['device_id'])
self.assertEqual(port['device_owner'], port_details['device_owner'])
+
+
+class FloatingIPPoolTestJSON(base.BaseAdminNetworkTest):
+
+ required_extensions = ['router']
+
+ @decorators.idempotent_id('6c438332-4554-461c-9668-512ae09bf952')
+ @utils.requires_ext(extension="floatingip-pools", service="network")
+ def test_create_floatingip_from_specific_pool(self):
+ network = self.create_network(client=self.admin_client, external=True)
+ subnet1 = self.create_subnet(network, client=self.admin_client)
+ subnet2 = self.create_subnet(network, client=self.admin_client)
+ pools = self.client.list_floatingip_pools()["floatingip_pools"]
+
+ def test_create_floatingip_from_subnet(pools, subnet):
+ pool = None
+ for p in pools:
+ if p['network_id'] == subnet['network_id'] \
+ and p['subnet_id'] == subnet['id']:
+ pool = p
+ break
+
+ self.assertTrue(pool)
+ new_floatingip = self.create_floatingip(
+ pool['network_id'], subnet_id=pool['subnet_id'])
+ cidr = netaddr.IPNetwork(pool['cidr'])
+ ip_address = netaddr.IPAddress(
+ new_floatingip['floating_ip_address'])
+ self.assertIn(ip_address, cidr)
+ fip_id = new_floatingip['id']
+ floatingip = self.client.get_floatingip(fip_id)['floatingip']
+ self.assertEqual(new_floatingip['floating_ip_address'],
+ floatingip['floating_ip_address'])
+
+ test_create_floatingip_from_subnet(pools, subnet1)
+ test_create_floatingip_from_subnet(pools, subnet2)
diff --git a/neutron_tempest_plugin/api/test_network_ip_availability.py b/neutron_tempest_plugin/api/test_network_ip_availability.py
index 1cdfc7e..e798680 100644
--- a/neutron_tempest_plugin/api/test_network_ip_availability.py
+++ b/neutron_tempest_plugin/api/test_network_ip_availability.py
@@ -19,7 +19,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
+from tempest.lib import exceptions
from neutron_tempest_plugin.api import base
@@ -53,27 +53,26 @@
def skip_checks(cls):
super(NetworksIpAvailabilityTest, cls).skip_checks()
- def _get_used_ips(self, network, net_availability):
- if network:
+ @staticmethod
+ def _get_availability(network, net_availability):
+ if 'network_ip_availabilities' in net_availability:
for availability in net_availability['network_ip_availabilities']:
if availability['network_id'] == network['id']:
- return availability['used_ips']
+ return availability
+ raise exceptions.TempestException('Network IP Availability not '
+ 'found')
+ else:
+ return net_availability['network_ip_availability']
- def _cleanUp_port(self, port_id):
- # delete port, any way to avoid race
- try:
- self.client.delete_port(port_id)
- # if port is not found, this means it was deleted in the test
- except lib_exc.NotFound:
- pass
+ def _get_used_ips(self, network, net_availability):
+ availability = self._get_availability(network, net_availability)
+ return availability and availability['used_ips']
def _assert_total_and_used_ips(self, expected_used, expected_total,
network, net_availability):
- if network:
- for availability in net_availability['network_ip_availabilities']:
- if availability['network_id'] == network['id']:
- self.assertEqual(expected_total, availability['total_ips'])
- self.assertEqual(expected_used, availability['used_ips'])
+ availability = self._get_availability(network, net_availability)
+ self.assertEqual(expected_total, availability['total_ips'])
+ self.assertEqual(expected_used, availability['used_ips'])
def calc_total_ips(prefix, ip_version):
@@ -89,56 +88,87 @@
class NetworksIpAvailabilityIPv4Test(NetworksIpAvailabilityTest):
- @decorators.idempotent_id('0f33cc8c-1bf6-47d1-9ce1-010618240599')
- def test_admin_network_availability_before_subnet(self):
+ def setUp(self):
+ super(NetworksIpAvailabilityIPv4Test, self).setUp()
net_name = data_utils.rand_name('network')
- network = self.create_network(network_name=net_name)
- self.addCleanup(self.client.delete_network, network['id'])
+ self.network = self.create_network(network_name=net_name)
+
+ @decorators.idempotent_id('0f33cc8c-1bf6-47d1-9ce1-010618240599')
+ def test_list_ip_availability_before_subnet(self):
net_availability = self.admin_client.list_network_ip_availabilities()
- self._assert_total_and_used_ips(0, 0, network, net_availability)
+ self._assert_total_and_used_ips(0, 0, self.network, net_availability)
@decorators.idempotent_id('3aecd3b2-16ed-4b87-a54a-91d7b3c2986b')
- def test_net_ip_availability_after_subnet_and_ports(self):
- net_name = data_utils.rand_name('network')
- network = self.create_network(network_name=net_name)
- self.addCleanup(self.client.delete_network, network['id'])
- subnet = self.create_subnet(network, enable_dhcp=False)
+ def test_list_ip_availability_after_subnet_and_ports(self):
+ subnet = self.create_subnet(self.network, enable_dhcp=False)
prefix = netaddr.IPNetwork(subnet['cidr']).prefixlen
- self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.admin_client.list_network_ip_availabilities()
- used_ip = self._get_used_ips(network, body)
- port1 = self.client.create_port(network_id=network['id'])
- self.addCleanup(self.client.delete_port, port1['port']['id'])
- port2 = self.client.create_port(network_id=network['id'])
- self.addCleanup(self.client.delete_port, port2['port']['id'])
+ used_ips_before_port_create = self._get_used_ips(self.network, body)
+ self.create_port(self.network)
net_availability = self.admin_client.list_network_ip_availabilities()
self._assert_total_and_used_ips(
- used_ip + 2,
+ used_ips_before_port_create + 1,
calc_total_ips(prefix, self._ip_version),
- network, net_availability)
+ self.network, net_availability)
@decorators.idempotent_id('9f11254d-757b-492e-b14b-f52144e4ee7b')
- def test_net_ip_availability_after_port_delete(self):
- net_name = data_utils.rand_name('network')
- network = self.create_network(network_name=net_name)
- self.addCleanup(self.client.delete_network, network['id'])
- subnet = self.create_subnet(network, enable_dhcp=False)
- self.addCleanup(self.client.delete_subnet, subnet['id'])
- port = self.client.create_port(network_id=network['id'])
- self.addCleanup(self._cleanUp_port, port['port']['id'])
+ def test_list_ip_availability_after_port_delete(self):
+ self.create_subnet(self.network, enable_dhcp=False)
+ port = self.create_port(self.network)
net_availability = self.admin_client.list_network_ip_availabilities()
- used_ip = self._get_used_ips(network, net_availability)
- self.client.delete_port(port['port']['id'])
+ used_ips = self._get_used_ips(self.network, net_availability)
+ self.client.delete_port(port['id'])
- def get_net_availability():
+ def is_count_ip_availability_valid():
availabilities = self.admin_client.list_network_ip_availabilities()
- used_ip_after_port_delete = self._get_used_ips(network,
- availabilities)
- return used_ip - 1 == used_ip_after_port_delete
+ used_ips_after_port_delete = self._get_used_ips(self.network,
+ availabilities)
+ return used_ips - 1 == used_ips_after_port_delete
self.assertTrue(
test_utils.call_until_true(
- get_net_availability, DELETE_TIMEOUT, DELETE_SLEEP),
+ is_count_ip_availability_valid, DELETE_TIMEOUT, DELETE_SLEEP),
+ msg="IP address did not become available after port delete")
+
+ @decorators.idempotent_id('da1fbed5-b4a9-45b3-bdcb-b1660710d565')
+ def test_show_ip_availability_after_subnet_and_ports_create(self):
+ net_availability = self.admin_client.show_network_ip_availability(
+ self.network['id'])
+ self._assert_total_and_used_ips(0, 0, self.network, net_availability)
+ subnet = self.create_subnet(self.network, enable_dhcp=False)
+ prefix = netaddr.IPNetwork(subnet['cidr']).prefixlen
+ net_availability = self.admin_client.show_network_ip_availability(
+ self.network['id'])
+ used_ips_before_port_create = self._get_used_ips(self.network,
+ net_availability)
+ self.create_port(self.network)
+ net_availability = self.admin_client.show_network_ip_availability(
+ self.network['id'])
+ self._assert_total_and_used_ips(
+ used_ips_before_port_create + 1,
+ calc_total_ips(prefix, self._ip_version),
+ self.network,
+ net_availability)
+
+ @decorators.idempotent_id('a4d1e291-c152-4d62-9316-8c9bf1c6aee2')
+ def test_show_ip_availability_after_port_delete(self):
+ self.create_subnet(self.network, enable_dhcp=False)
+ port = self.create_port(self.network)
+ net_availability = self.admin_client.show_network_ip_availability(
+ self.network['id'])
+ used_ips = self._get_used_ips(self.network, net_availability)
+ self.client.delete_port(port['id'])
+
+ def is_count_ip_availability_valid():
+ availabilities = self.admin_client.show_network_ip_availability(
+ self.network['id'])
+ used_ips_after_port_delete = self._get_used_ips(self.network,
+ availabilities)
+ return used_ips - 1 == used_ips_after_port_delete
+
+ self.assertTrue(
+ test_utils.call_until_true(
+ is_count_ip_availability_valid, DELETE_TIMEOUT, DELETE_SLEEP),
msg="IP address did not become available after port delete")
diff --git a/neutron_tempest_plugin/api/test_networks_negative.py b/neutron_tempest_plugin/api/test_networks_negative.py
index 93f32f7..1cc8b93 100644
--- a/neutron_tempest_plugin/api/test_networks_negative.py
+++ b/neutron_tempest_plugin/api/test_networks_negative.py
@@ -28,8 +28,7 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('9f80f25b-5d1b-4f26-9f6b-774b9b270819')
def test_delete_network_in_use(self):
- port = self.client.create_port(network_id=self.network['id'])
- self.addCleanup(self.client.delete_port, port['port']['id'])
+ self.create_port(self.network)
with testtools.ExpectedException(lib_exc.Conflict):
self.client.delete_subnet(self.subnet['id'])
with testtools.ExpectedException(lib_exc.Conflict):
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index 3b877c2..52783b9 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -134,6 +134,28 @@
expected = [s['id'], s['id']]
self.assertEqual(expected, subnets)
+ @decorators.idempotent_id('9700828d-86eb-4f21-9fa3-da487a2d77f2')
+ @utils.requires_ext(extension="uplink-status-propagation",
+ service="network")
+ def test_create_port_with_propagate_uplink_status(self):
+ body = self.create_port(self.network, propagate_uplink_status=True)
+ self.assertTrue(body['propagate_uplink_status'])
+ body = self.client.list_ports(id=body['id'])['ports'][0]
+ self.assertTrue(body['propagate_uplink_status'])
+ body = self.client.show_port(body['id'])['port']
+ self.assertTrue(body['propagate_uplink_status'])
+
+ @decorators.idempotent_id('c396a880-0c7b-409d-a80b-800a3d09bdc4')
+ @utils.requires_ext(extension="uplink-status-propagation",
+ service="network")
+ def test_create_port_without_propagate_uplink_status(self):
+ body = self.create_port(self.network)
+ self.assertFalse(body['propagate_uplink_status'])
+ body = self.client.list_ports(id=body['id'])['ports'][0]
+ self.assertFalse(body['propagate_uplink_status'])
+ body = self.client.show_port(body['id'])['port']
+ self.assertFalse(body['propagate_uplink_status'])
+
class PortsSearchCriteriaTest(base.BaseSearchCriteriaTest):
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 1b88a7a..4f93577 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -1053,6 +1053,25 @@
policy_id=policy['id'],
direction=self.DIRECTION_EGRESS, min_kbps=201)
+ @decorators.idempotent_id('35baf998-ae65-495c-9902-35a0d11e8936')
+ @utils.requires_ext(extension="qos-bw-minimum-ingress",
+ service="network")
+ def test_rule_create_pass_for_direction_ingress(self):
+ policy = self.create_qos_policy(name='test-policy',
+ description='test policy',
+ shared=False)
+ self.admin_client.create_minimum_bandwidth_rule(
+ policy_id=policy['id'],
+ direction=self.DIRECTION_INGRESS,
+ min_kbps=201)
+
+ retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
+ policy_rules = retrieved_policy['policy']['rules']
+ self.assertEqual(1, len(policy_rules))
+ self.assertEqual(qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH,
+ policy_rules[0]['type'])
+ self.assertEqual(self.DIRECTION_INGRESS, policy_rules[0]['direction'])
+
@decorators.idempotent_id('a49a6988-2568-47d2-931e-2dbc858943b3')
def test_rule_update(self):
policy = self.create_qos_policy(name='test-policy',
diff --git a/neutron_tempest_plugin/api/test_revisions.py b/neutron_tempest_plugin/api/test_revisions.py
index b03285d..0d590f6 100644
--- a/neutron_tempest_plugin/api/test_revisions.py
+++ b/neutron_tempest_plugin/api/test_revisions.py
@@ -12,16 +12,16 @@
import netaddr
+from neutron_lib import constants
from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions
from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as bsg
from neutron_tempest_plugin import config
-class TestRevisions(base.BaseAdminNetworkTest, bsg.BaseSecGroupTest):
+class TestRevisions(base.BaseAdminNetworkTest):
required_extensions = ['standard-attr-revisions']
@@ -111,46 +111,51 @@
@decorators.idempotent_id('6c256f71-c929-4200-b3dc-4e1843506be5')
@utils.requires_ext(extension="security-group", service="network")
def test_update_sg_group_bumps_revision(self):
- sg, name = self._create_security_group()
- self.assertIn('revision_number', sg['security_group'])
- update_body = self.client.update_security_group(
- sg['security_group']['id'], name='new_sg_name')
- self.assertGreater(update_body['security_group']['revision_number'],
- sg['security_group']['revision_number'])
+ security_group = self.create_security_group()
+ self.assertIn('revision_number', security_group)
+ updated_security_group = self.client.update_security_group(
+ security_group['id'], name='new_sg_name')['security_group']
+ self.assertGreater(updated_security_group['revision_number'],
+ security_group['revision_number'])
@decorators.idempotent_id('6489632f-8550-4453-a674-c98849742967')
@utils.requires_ext(extension="security-group", service="network")
def test_update_port_sg_binding_bumps_revision(self):
- net = self.create_network()
- self.addCleanup(self.client.delete_network, net['id'])
- port = self.create_port(net)
- self.addCleanup(self.client.delete_port, port['id'])
- sg = self._create_security_group()[0]
- self.client.update_port(
- port['id'], security_groups=[sg['security_group']['id']])
- updated = self.client.show_port(port['id'])
- updated2 = self.client.update_port(port['id'], security_groups=[])
- self.assertGreater(updated['port']['revision_number'],
+ network = self.create_network()
+ port = self.create_port(network)
+
+ security_group = self.create_security_group()
+ updated_port = self.client.update_port(
+ port['id'], security_groups=[security_group['id']])['port']
+ self.assertGreater(updated_port['revision_number'],
port['revision_number'])
- self.assertGreater(updated2['port']['revision_number'],
- updated['port']['revision_number'])
+
+ updated_port2 = self.client.update_port(
+ port['id'], security_groups=[])['port']
+ self.assertGreater(updated_port2['revision_number'],
+ updated_port['revision_number'])
@decorators.idempotent_id('29c7ab2b-d1d8-425d-8cec-fcf632960f22')
@utils.requires_ext(extension="security-group", service="network")
def test_update_sg_rule_bumps_sg_revision(self):
- sg, name = self._create_security_group()
- rule = self.client.create_security_group_rule(
- security_group_id=sg['security_group']['id'],
- protocol='tcp', direction='ingress', ethertype=self.ethertype,
- port_range_min=60, port_range_max=70)
- updated = self.client.show_security_group(sg['security_group']['id'])
- self.assertGreater(updated['security_group']['revision_number'],
- sg['security_group']['revision_number'])
- self.client.delete_security_group_rule(
- rule['security_group_rule']['id'])
- updated2 = self.client.show_security_group(sg['security_group']['id'])
- self.assertGreater(updated2['security_group']['revision_number'],
- updated['security_group']['revision_number'])
+ security_group = self.create_security_group()
+
+ security_group_rule = self.create_security_group_rule(
+ security_group=security_group,
+ protocol=constants.PROTO_NAME_TCP,
+ direction=constants.INGRESS_DIRECTION,
+ port_range_min=60,
+ port_range_max=70)
+ updated_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
+ self.assertGreater(updated_security_group['revision_number'],
+ security_group['revision_number'])
+
+ self.client.delete_security_group_rule(security_group_rule['id'])
+ updated_security_group2 = self.client.show_security_group(
+ security_group['id'])['security_group']
+ self.assertGreater(updated_security_group2['revision_number'],
+ updated_security_group['revision_number'])
@decorators.idempotent_id('db70c285-0365-4fac-9f55-2a0ad8cf55a8')
@utils.requires_ext(extension="allowed-address-pairs", service="network")
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index 2f4ad44..bbd6c5d 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -39,9 +39,9 @@
@decorators.idempotent_id('e3e751af-15a2-49cc-b214-a7154579e94f')
def test_delete_router_in_use(self):
# This port is deleted after a test by remove_router_interface.
- port = self.client.create_port(network_id=self.network['id'])
+ port = self.create_port(self.network)
self.client.add_router_interface_with_port_id(
- self.router['id'], port['port']['id'])
+ self.router['id'], port['id'])
with testtools.ExpectedException(lib_exc.Conflict):
self.client.delete_router(self.router['id'])
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index 299a62e..b6d344d 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -17,39 +17,40 @@
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin.api import base_security_groups
-class SecGroupTest(base.BaseSecGroupTest):
+class SecGroupTest(base.BaseNetworkTest):
required_extensions = ['security-group']
@decorators.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
def test_create_list_update_show_delete_security_group(self):
- group_create_body, name = self._create_security_group()
+ security_group = self.create_security_group()
# List security groups and verify if created group is there in response
- list_body = self.client.list_security_groups()
- secgroup_list = list()
- for secgroup in list_body['security_groups']:
- secgroup_list.append(secgroup['id'])
- self.assertIn(group_create_body['security_group']['id'], secgroup_list)
+ security_groups = self.client.list_security_groups()['security_groups']
+ self.assertIn(security_group['id'],
+ {sg['id'] for sg in security_groups})
+
# Update the security group
new_name = data_utils.rand_name('security')
new_description = data_utils.rand_name('security-description')
- update_body = self.client.update_security_group(
- group_create_body['security_group']['id'],
- name=new_name,
- description=new_description)
+ updated_security_group = self.client.update_security_group(
+ security_group['id'], name=new_name,
+ description=new_description)['security_group']
+
# Verify if security group is updated
- self.assertEqual(update_body['security_group']['name'], new_name)
- self.assertEqual(update_body['security_group']['description'],
+ self.assertEqual(updated_security_group['name'], new_name)
+ self.assertEqual(updated_security_group['description'],
new_description)
+
# Show details of the updated security group
- show_body = self.client.show_security_group(
- group_create_body['security_group']['id'])
- self.assertEqual(show_body['security_group']['name'], new_name)
- self.assertEqual(show_body['security_group']['description'],
+ observed_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
+ self.assertEqual(observed_security_group['name'], new_name)
+ self.assertEqual(observed_security_group['description'],
new_description)
@decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
@@ -67,58 +68,48 @@
self.assertIsNotNone(secgrp['id'])
-class SecGroupProtocolTest(base.BaseSecGroupTest):
+class SecGroupProtocolTest(base.BaseNetworkTest):
+
+ protocol_names = base_security_groups.V4_PROTOCOL_NAMES
+ protocol_ints = base_security_groups.V4_PROTOCOL_INTS
@decorators.idempotent_id('282e3681-aa6e-42a7-b05c-c341aa1e3cdf')
- def test_create_show_delete_security_group_rule_names(self):
- group_create_body, _ = self._create_security_group()
- for protocol in base.V4_PROTOCOL_NAMES:
- self._test_create_show_delete_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- protocol=protocol,
+ def test_security_group_rule_protocol_names(self):
+ self._test_security_group_rule_protocols(protocols=self.protocol_names)
+
+ @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
+ def test_security_group_rule_protocol_ints(self):
+ self._test_security_group_rule_protocols(protocols=self.protocol_ints)
+
+ def _test_security_group_rule_protocols(self, protocols):
+ security_group = self.create_security_group()
+ for protocol in protocols:
+ self._test_security_group_rule(
+ security_group=security_group,
+ protocol=str(protocol),
direction=constants.INGRESS_DIRECTION,
ethertype=self.ethertype)
- @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
- def test_create_show_delete_security_group_rule_integers(self):
- group_create_body, _ = self._create_security_group()
- for protocol in base.V4_PROTOCOL_INTS:
- self._test_create_show_delete_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- protocol=protocol,
- direction=constants.INGRESS_DIRECTION,
- ethertype=self.ethertype)
+ def _test_security_group_rule(self, security_group, **kwargs):
+ security_group_rule = self.create_security_group_rule(
+ security_group=security_group, **kwargs)
+ observed_security_group_rule = self.client.show_security_group_rule(
+ security_group_rule['id'])['security_group_rule']
+ for key, value in kwargs.items():
+ self.assertEqual(value, security_group_rule[key],
+ "{!r} does not match.".format(key))
+ self.assertEqual(value, observed_security_group_rule[key],
+ "{!r} does not match.".format(key))
class SecGroupProtocolIPv6Test(SecGroupProtocolTest):
- _ip_version = constants.IP_VERSION_6
- @decorators.idempotent_id('1f7cc9f5-e0d5-487c-8384-3d74060ab530')
- def test_create_security_group_rule_with_ipv6_protocol_names(self):
- group_create_body, _ = self._create_security_group()
- for protocol in base.V6_PROTOCOL_NAMES:
- self._test_create_show_delete_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- protocol=protocol,
- direction=constants.INGRESS_DIRECTION,
- ethertype=self.ethertype)
+ _ip_version = constants.IP_VERSION_6
+ protocol_names = base_security_groups.V6_PROTOCOL_NAMES
+ protocol_ints = base_security_groups.V6_PROTOCOL_INTS
+ protocol_legacy_names = base_security_groups.V6_PROTOCOL_LEGACY
@decorators.idempotent_id('c7d17b41-3b4e-4add-bb3b-6af59baaaffa')
- def test_create_security_group_rule_with_ipv6_protocol_legacy_names(self):
- group_create_body, _ = self._create_security_group()
- for protocol in base.V6_PROTOCOL_LEGACY:
- self._test_create_show_delete_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- protocol=protocol,
- direction=constants.INGRESS_DIRECTION,
- ethertype=self.ethertype)
-
- @decorators.idempotent_id('bcfce0b7-bc96-40ae-9b08-3f6774ee0260')
- def test_create_security_group_rule_with_ipv6_protocol_integers(self):
- group_create_body, _ = self._create_security_group()
- for protocol in base.V6_PROTOCOL_INTS:
- self._test_create_show_delete_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- protocol=protocol,
- direction=constants.INGRESS_DIRECTION,
- ethertype=self.ethertype)
+ def test_security_group_rule_protocol_legacy_names(self):
+ self._test_security_group_rule_protocols(
+ protocols=self.protocol_legacy_names)
diff --git a/neutron_tempest_plugin/api/test_security_groups_negative.py b/neutron_tempest_plugin/api/test_security_groups_negative.py
index c427691..1fcbd18 100644
--- a/neutron_tempest_plugin/api/test_security_groups_negative.py
+++ b/neutron_tempest_plugin/api/test_security_groups_negative.py
@@ -18,12 +18,14 @@
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin.api import base_security_groups
+
LONG_NAME_NG = 'x' * (db_const.NAME_FIELD_SIZE + 1)
-class NegativeSecGroupTest(base.BaseSecGroupTest):
+class NegativeSecGroupTest(base.BaseNetworkTest):
required_extensions = ['security-group']
@@ -36,72 +38,68 @@
@decorators.idempotent_id('594edfa8-9a5b-438e-9344-49aece337d49')
def test_create_security_group_with_too_long_name(self):
self.assertRaises(lib_exc.BadRequest,
- self.client.create_security_group,
+ self.create_security_group,
name=LONG_NAME_NG)
@decorators.attr(type='negative')
@decorators.idempotent_id('b6b79838-7430-4d3f-8e07-51dfb61802c2')
def test_create_security_group_with_boolean_type_name(self):
self.assertRaises(lib_exc.BadRequest,
- self.client.create_security_group,
+ self.create_security_group,
name=True)
@decorators.attr(type='negative')
@decorators.idempotent_id('55100aa8-b24f-333c-0bef-64eefd85f15c')
def test_update_default_security_group_name(self):
- sg_list = self.client.list_security_groups(name='default')
- sg = sg_list['security_groups'][0]
+ security_group = self.client.list_security_groups(name='default')[
+ 'security_groups'][0]
self.assertRaises(lib_exc.Conflict, self.client.update_security_group,
- sg['id'], name='test')
+ security_group['id'], name='test')
@decorators.attr(type='negative')
@decorators.idempotent_id('c8510dd8-c3a8-4df9-ae44-24354db50960')
def test_update_security_group_with_too_long_name(self):
- sg_list = self.client.list_security_groups(name='default')
- sg = sg_list['security_groups'][0]
+ security_group = self.client.list_security_groups(name='default')[
+ 'security_groups'][0]
self.assertRaises(lib_exc.BadRequest,
self.client.update_security_group,
- sg['id'], name=LONG_NAME_NG)
+ security_group['id'], name=LONG_NAME_NG)
@decorators.attr(type='negative')
@decorators.idempotent_id('d9a14917-f66f-4eca-ab72-018563917f1b')
def test_update_security_group_with_boolean_type_name(self):
- sg_list = self.client.list_security_groups(name='default')
- sg = sg_list['security_groups'][0]
+ security_group = self.client.list_security_groups(name='default')[
+ 'security_groups'][0]
self.assertRaises(lib_exc.BadRequest,
self.client.update_security_group,
- sg['id'], name=True)
+ security_group['id'], name=True)
@decorators.attr(type='negative')
@decorators.idempotent_id('3200b1a8-d73b-48e9-b03f-e891a4abe2d3')
def test_delete_in_use_sec_group(self):
- sgroup = self.os_primary.network_client.create_security_group(
- name='sgroup')
- self.security_groups.append(sgroup['security_group'])
- port = self.client.create_port(
- network_id=self.network['id'],
- security_groups=[sgroup['security_group']['id']])
- self.ports.append(port['port'])
+ security_group = self.create_security_group()
+ self.create_port(network=self.network,
+ security_groups=[security_group['id']])
self.assertRaises(lib_exc.Conflict,
self.os_primary.network_client.delete_security_group,
- security_group_id=sgroup['security_group']['id'])
+ security_group_id=security_group['id'])
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
_ip_version = constants.IP_VERSION_6
-class NegativeSecGroupProtocolTest(base.BaseSecGroupTest):
+class NegativeSecGroupProtocolTest(base.BaseNetworkTest):
def _test_create_security_group_rule_with_bad_protocols(self, protocols):
- group_create_body, _ = self._create_security_group()
+ security_group = self.create_security_group()
# bad protocols can include v6 protocols because self.ethertype is v4
for protocol in protocols:
self.assertRaises(
lib_exc.BadRequest,
self.client.create_security_group_rule,
- security_group_id=group_create_body['security_group']['id'],
+ security_group_id=security_group['id'],
protocol=protocol, direction=constants.INGRESS_DIRECTION,
ethertype=self.ethertype)
@@ -109,10 +107,10 @@
@decorators.idempotent_id('cccbb0f3-c273-43ed-b3fc-1efc48833810')
def test_create_security_group_rule_with_ipv6_protocol_names(self):
self._test_create_security_group_rule_with_bad_protocols(
- base.V6_PROTOCOL_NAMES)
+ base_security_groups.V6_PROTOCOL_NAMES)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8aa636bd-7060-4fdf-b722-cdae28e2f1ef')
def test_create_security_group_rule_with_ipv6_protocol_integers(self):
self._test_create_security_group_rule_with_bad_protocols(
- base.V6_PROTOCOL_INTS)
+ base_security_groups.V6_PROTOCOL_INTS)
diff --git a/neutron_tempest_plugin/api/test_timestamp.py b/neutron_tempest_plugin/api/test_timestamp.py
index f5888f9..9ec982d 100644
--- a/neutron_tempest_plugin/api/test_timestamp.py
+++ b/neutron_tempest_plugin/api/test_timestamp.py
@@ -11,14 +11,15 @@
# under the License.
import copy
+import time
+from neutron_lib import constants
from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
from neutron_tempest_plugin.api import base_routers
-from neutron_tempest_plugin.api import base_security_groups
from neutron_tempest_plugin import config
CONF = config.CONF
@@ -276,7 +277,7 @@
show_fip['updated_at'])
-class TestTimeStampWithSecurityGroup(base_security_groups.BaseSecGroupTest):
+class TestTimeStampWithSecurityGroup(base.BaseNetworkTest):
required_extensions = ['standard-attr-timestamp']
@@ -287,66 +288,66 @@
@decorators.idempotent_id('a3150a7b-d31a-423a-abf3-45e71c97cbac')
def test_create_sg_with_timestamp(self):
- sg, _ = self._create_security_group()
+ security_group = self.create_security_group()
# Verifies body contains timestamp fields
- self.assertIsNotNone(sg['security_group']['created_at'])
- self.assertIsNotNone(sg['security_group']['updated_at'])
+ self.assertIsNotNone(security_group['created_at'])
+ self.assertIsNotNone(security_group['updated_at'])
@decorators.idempotent_id('432ae0d3-32b4-413e-a9b3-091ac76da31b')
def test_update_sg_with_timestamp(self):
- sgc, _ = self._create_security_group()
- sg = sgc['security_group']
- origin_updated_at = sg['updated_at']
- update_body = {'name': sg['name'] + 'new'}
- body = self.client.update_security_group(sg['id'], **update_body)
- updated_sg = body['security_group']
- new_updated_at = updated_sg['updated_at']
- self.assertEqual(sg['created_at'], updated_sg['created_at'])
- # Verify that origin_updated_at is not same with new_updated_at
- self.assertIsNot(origin_updated_at, new_updated_at)
+ security_group = self.create_security_group()
+
+ # Make sure update time will be different
+ time.sleep(2.)
+ updated_security_group = self.client.update_security_group(
+ security_group['id'], name=security_group['name'] + 'new')[
+ 'security_group']
+
+ # Verify that created_at hasn't changed
+ self.assertEqual(security_group['created_at'],
+ updated_security_group['created_at'])
+ # Verify that updated_at has changed
+ self.assertNotEqual(security_group['updated_at'],
+ updated_security_group['updated_at'])
@decorators.idempotent_id('521e6723-43d6-12a6-8c3d-f5042ad9fc32')
def test_show_sg_attribute_with_timestamp(self):
- sg, _ = self._create_security_group()
- body = self.client.show_security_group(sg['security_group']['id'])
- show_sg = body['security_group']
- # verify the timestamp from creation and showed is same
- self.assertEqual(sg['security_group']['created_at'],
- show_sg['created_at'])
- self.assertEqual(sg['security_group']['updated_at'],
- show_sg['updated_at'])
+ security_group = self.create_security_group()
+ observed_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
- def _prepare_sgrule_test(self):
- sg, _ = self._create_security_group()
- sg_id = sg['security_group']['id']
- direction = 'ingress'
- protocol = 'tcp'
- port_range_min = 77
- port_range_max = 77
- rule_create_body = self.client.create_security_group_rule(
- security_group_id=sg_id,
- direction=direction,
- ethertype=self.ethertype,
- protocol=protocol,
- port_range_min=port_range_min,
- port_range_max=port_range_max,
- remote_group_id=None,
- remote_ip_prefix=None
- )
- return rule_create_body['security_group_rule']
+ # Verify that created_at hasn't changed
+ self.assertEqual(security_group['created_at'],
+ observed_security_group['created_at'])
+ # Verify that updated_at hasn't changed
+ self.assertEqual(security_group['updated_at'],
+ observed_security_group['updated_at'])
+
+ def _create_security_group_rule(self):
+ security_group = self.create_security_group()
+ return self.create_security_group_rule(
+ security_group=security_group,
+ direction=constants.INGRESS_DIRECTION,
+ protocol=constants.PROTO_NAME_TCP,
+ port_range_min=77,
+ port_range_max=77)
@decorators.idempotent_id('83e8bd32-43e0-a3f0-1af3-12a5733c653e')
def test_create_sgrule_with_timestamp(self):
- sgrule = self._prepare_sgrule_test()
+ security_group_rule = self._create_security_group_rule()
# Verifies body contains timestamp fields
- self.assertIsNotNone(sgrule['created_at'])
- self.assertIsNotNone(sgrule['updated_at'])
+ self.assertIn('created_at', security_group_rule)
+ self.assertIn('updated_at', security_group_rule)
@decorators.idempotent_id('143da0e6-ba17-43ad-b3d7-03aa759c3cb4')
def test_show_sgrule_attribute_with_timestamp(self):
- sgrule = self._prepare_sgrule_test()
- body = self.client.show_security_group_rule(sgrule['id'])
- show_sgrule = body['security_group_rule']
- # verify the timestamp from creation and showed is same
- self.assertEqual(sgrule['created_at'], show_sgrule['created_at'])
- self.assertEqual(sgrule['updated_at'], show_sgrule['updated_at'])
+ security_group_rule = self._create_security_group_rule()
+
+ observed_security_group_rule = self.client.show_security_group_rule(
+ security_group_rule['id'])['security_group_rule']
+
+ # Verify the time stamp from creation and showed are equal
+ self.assertEqual(security_group_rule['created_at'],
+ observed_security_group_rule['created_at'])
+ self.assertEqual(security_group_rule['updated_at'],
+ observed_security_group_rule['updated_at'])
diff --git a/neutron_tempest_plugin/common/constants.py b/neutron_tempest_plugin/common/constants.py
index 4dc7844..f695f6c 100644
--- a/neutron_tempest_plugin/common/constants.py
+++ b/neutron_tempest_plugin/common/constants.py
@@ -171,3 +171,11 @@
# Possible types of values (e.g. in QoS rule types)
VALUES_TYPE_CHOICES = "choices"
VALUES_TYPE_RANGE = "range"
+
+# Security group parameters values mapped by IP version
+DEFAULT_SECURITY_GROUP_RULE_PARAMS = {
+ lib_constants.IP_VERSION_4: {'ethertype': lib_constants.IPv4,
+ 'remote_ip_prefix': lib_constants.IPv4_ANY},
+ lib_constants.IP_VERSION_6: {'ethertype': lib_constants.IPv6,
+ 'remote_ip_prefix': lib_constants.IPv6_ANY},
+}
diff --git a/neutron_tempest_plugin/common/ip.py b/neutron_tempest_plugin/common/ip.py
new file mode 100644
index 0000000..1702bd3
--- /dev/null
+++ b/neutron_tempest_plugin/common/ip.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import subprocess
+
+import netaddr
+from neutron_lib import constants
+from oslo_log import log
+from oslo_utils import excutils
+
+from neutron_tempest_plugin.common import shell
+
+
+LOG = log.getLogger(__name__)
+
+
+class IPCommand(object):
+
+ sudo = 'sudo'
+ ip_path = '/sbin/ip'
+
+ def __init__(self, ssh_client=None, timeout=None):
+ self.ssh_client = ssh_client
+ self.timeout = timeout
+
+ def get_command(self, obj, *command):
+ command_line = '{sudo!s} {ip_path!r} {object!s} {command!s}'.format(
+ sudo=self.sudo, ip_path=self.ip_path, object=obj,
+ command=subprocess.list2cmdline([str(c) for c in command]))
+ return command_line
+
+ def execute(self, obj, *command):
+ command_line = self.get_command(obj, *command)
+ return shell.execute(command_line, ssh_client=self.ssh_client,
+ timeout=self.timeout).stdout
+
+ def configure_vlan_subport(self, port, subport, vlan_tag, subnets):
+ addresses = self.list_addresses()
+ try:
+ subport_device = get_port_device_name(addresses=addresses,
+ port=subport)
+ except ValueError:
+ pass
+ else:
+ LOG.debug('Interface %r already configured.', subport_device)
+ return subport_device
+
+ subport_ips = [
+ "{!s}/{!s}".format(ip, prefix_len)
+ for ip, prefix_len in _get_ip_address_prefix_len_pairs(
+ port=subport, subnets=subnets)]
+ if not subport_ips:
+ raise ValueError(
+ "Unable to get IP address and subnet prefix lengths for "
+ "subport")
+
+ port_device = get_port_device_name(addresses=addresses, port=port)
+ subport_device = '{!s}.{!s}'.format(port_device, vlan_tag)
+ LOG.debug('Configuring VLAN subport interface %r on top of interface '
+ '%r with IPs: %s', subport_device, port_device,
+ ', '.join(subport_ips))
+
+ self.add_link(link=port_device, name=subport_device, link_type='vlan',
+ segmentation_id=vlan_tag)
+ self.set_link(device=subport_device, state='up')
+ for subport_ip in subport_ips:
+ self.add_address(address=subport_ip, device=subport_device)
+ return subport_device
+
+ def list_addresses(self, device=None, ip_addresses=None, port=None,
+ subnets=None):
+ command = ['list']
+ if device:
+ command += ['dev', device]
+ output = self.execute('address', *command)
+ addresses = list(parse_addresses(output))
+
+ return list_ip_addresses(addresses=addresses,
+ ip_addresses=ip_addresses, port=port,
+ subnets=subnets)
+
+ def add_link(self, name, link_type, link=None, segmentation_id=None):
+ command = ['add']
+ if link:
+ command += ['link', link]
+ command += ['name', name, 'type', link_type]
+ if id:
+ command += ['id', segmentation_id]
+ return self.execute('link', *command)
+
+ def set_link(self, device, state=None):
+ command = ['set', 'dev', device]
+ if state:
+ command.append(state)
+ return self.execute('link', *command)
+
+ def add_address(self, address, device):
+ # ip addr add 192.168.1.1/24 dev em1
+ return self.execute('address', 'add', address, 'dev', device)
+
+ def list_routes(self, *args):
+ output = self.execute('route', 'show', *args)
+ return list(parse_routes(output))
+
+
+def parse_addresses(command_output):
+ address = device = None
+ addresses = []
+ for i, line in enumerate(command_output.split('\n')):
+ try:
+ line_number = i + 1
+ fields = line.strip().split()
+ if not fields:
+ continue
+ indent = line.index(fields[0] + ' ')
+ if indent == 0:
+ # example of line
+ # 2: enp0s25: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000 # noqa
+ address = None
+ name = fields[1]
+ if name.endswith(':'):
+ name = name[:-1]
+ if '@' in name:
+ name, parent = name.split('@', 1)
+ else:
+ parent = None
+
+ if len(fields) > 2:
+ # flags example: <LOOPBACK,UP,LOWER_UP>
+ flags = fields[2]
+ if flags.startswith('<'):
+ flags = flags[1:]
+ if flags.startswith('>'):
+ flags = flags[:-1]
+ flags = flags.split(',')
+
+ device = Device(name=name, parent=parent, flags=flags,
+ properties=dict(parse_properties(fields[3:])))
+ LOG.debug("Device parsed: %r", device)
+
+ elif indent == 4:
+ address = Address.create(
+ family=fields[0], address=fields[1], device=device,
+ properties=dict(parse_properties(fields[2:])))
+ addresses.append(address)
+ LOG.debug("Address parsed: %r", address)
+
+ elif indent == 7:
+ address.properties.update(parse_properties(fields))
+ LOG.debug("Address properties parsed: %r", address.properties)
+
+ else:
+ assert False, "Invalid line indentation: {!r}".format(indent)
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception("Error parsing ip command output at line %d:\n"
+ "%r\n",
+ line_number, line)
+ raise
+
+ return addresses
+
+
+def parse_properties(fields):
+ for i, field in enumerate(fields):
+ if i % 2 == 0:
+ key = field
+ else:
+ yield key, field
+
+
+class HasProperties(object):
+
+ def __getattr__(self, name):
+ try:
+ return self.properties[name]
+ except KeyError:
+ pass
+ # This should raise AttributeError
+ return getattr(super(HasProperties, self), name)
+
+
+class Address(HasProperties,
+ collections.namedtuple('Address',
+ ['family', 'address', 'device',
+ 'properties'])):
+
+ _subclasses = {}
+
+ @classmethod
+ def create(cls, family, address, device, properties):
+ cls = cls._subclasses.get(family, cls)
+ return cls(family=family, address=address, device=device,
+ properties=properties)
+
+ @classmethod
+ def register_subclass(cls, family, subclass=None):
+ if not issubclass(subclass, cls):
+ msg = "{!r} is not sub-class of {!r}".format(cls, Address)
+ raise TypeError(msg)
+ cls._subclasses[family] = subclass
+
+
+class Device(HasProperties,
+ collections.namedtuple('Device',
+ ['name', 'parent', 'flags',
+ 'properties'])):
+ pass
+
+
+def register_address_subclass(families):
+
+ def decorator(subclass):
+ for family in families:
+ Address.register_subclass(family=family, subclass=subclass)
+ return subclass
+
+ return decorator
+
+
+@register_address_subclass(['inet', 'inet6'])
+class InetAddress(Address):
+
+ @property
+ def ip(self):
+ return self.network.ip
+
+ @property
+ def network(self):
+ return netaddr.IPNetwork(self.address)
+
+
+def parse_routes(command_output):
+ for line in command_output.split('\n'):
+ fields = line.strip().split()
+ if fields:
+ dest = fields[0]
+ properties = dict(parse_properties(fields[1:]))
+ if dest == 'default':
+ dest = constants.IPv4_ANY
+ via = properties.get('via')
+ if via:
+ dest = constants.IP_ANY[netaddr.IPAddress(via).version]
+ yield Route(dest=dest, properties=properties)
+
+
+def list_ip_addresses(addresses, ip_addresses=None, port=None,
+ subnets=None):
+ if port:
+ # filter addresses by port IP addresses
+ ip_addresses = set(ip_addresses) if ip_addresses else set()
+ ip_addresses.update(list_port_ip_addresses(port=port,
+ subnets=subnets))
+ if ip_addresses:
+ addresses = [a for a in addresses if (hasattr(a, 'ip') and
+ str(a.ip) in ip_addresses)]
+ return addresses
+
+
+def list_port_ip_addresses(port, subnets=None):
+ fixed_ips = port['fixed_ips']
+ if subnets:
+ subnets = {subnet['id']: subnet for subnet in subnets}
+ fixed_ips = [fixed_ip
+ for fixed_ip in fixed_ips
+ if fixed_ip['subnet_id'] in subnets]
+ return [ip['ip_address'] for ip in port['fixed_ips']]
+
+
+def get_port_device_name(addresses, port):
+ for address in list_ip_addresses(addresses=addresses, port=port):
+ return address.device.name
+
+ msg = "Port %r fixed IPs not found on server.".format(port['id'])
+ raise ValueError(msg)
+
+
+def _get_ip_address_prefix_len_pairs(port, subnets):
+ subnets = {subnet['id']: subnet for subnet in subnets}
+ for fixed_ip in port['fixed_ips']:
+ subnet = subnets.get(fixed_ip['subnet_id'])
+ if subnet:
+ yield (fixed_ip['ip_address'],
+ netaddr.IPNetwork(subnet['cidr']).prefixlen)
+
+
+class Route(HasProperties,
+ collections.namedtuple('Route',
+ ['dest', 'properties'])):
+
+ @property
+ def dest_ip(self):
+ return netaddr.IPNetwork(self.dest)
+
+ @property
+ def via_ip(self):
+ return netaddr.IPAddress(self.via)
+
+ @property
+ def src_ip(self):
+ return netaddr.IPAddress(self.src)
diff --git a/neutron_tempest_plugin/common/shell.py b/neutron_tempest_plugin/common/shell.py
new file mode 100644
index 0000000..bd4a7a3
--- /dev/null
+++ b/neutron_tempest_plugin/common/shell.py
@@ -0,0 +1,180 @@
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import subprocess
+import sys
+
+from oslo_log import log
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
+
+
+LOG = log.getLogger(__name__)
+
+CONF = config.CONF
+
+if ssh.Client.proxy_jump_host:
+ # Perform all SSH connections passing through configured SSH server
+ SSH_PROXY_CLIENT = ssh.Client.create_proxy_client()
+else:
+ SSH_PROXY_CLIENT = None
+
+
+def execute(command, ssh_client=None, timeout=None, check=True):
+ """Execute command inside a remote or local shell
+
+ :param command: command string to be executed
+
+ :param ssh_client: SSH client instance used for remote shell execution
+
+ :param timeout: command execution timeout in seconds
+
+ :param check: when False it doesn't raises ShellCommandError when
+ exit status is not zero. True by default
+
+ :returns: STDOUT text when command execution terminates with zero exit
+ status.
+
+ :raises ShellTimeoutExpired: when timeout expires before command execution
+ terminates. In such case it kills the process, then it eventually would
+ try to read STDOUT and STDERR buffers (not fully implemented) before
+ raising the exception.
+
+ :raises ShellCommandError: when command execution terminates with non-zero
+ exit status.
+ """
+ ssh_client = ssh_client or SSH_PROXY_CLIENT
+ if timeout:
+ timeout = float(timeout)
+
+ if ssh_client:
+ result = execute_remote_command(command=command, timeout=timeout,
+ ssh_client=ssh_client)
+ else:
+ result = execute_local_command(command=command, timeout=timeout)
+
+ if result.exit_status == 0:
+ LOG.debug("Command %r succeeded:\n"
+ "stderr:\n%s\n"
+ "stdout:\n%s\n",
+ command, result.stderr, result.stdout)
+ elif result.exit_status is None:
+ LOG.debug("Command %r timeout expired (timeout=%s):\n"
+ "stderr:\n%s\n"
+ "stdout:\n%s\n",
+ command, timeout, result.stderr, result.stdout)
+ else:
+ LOG.debug("Command %r failed (exit_status=%s):\n"
+ "stderr:\n%s\n"
+ "stdout:\n%s\n",
+ command, result.exit_status, result.stderr, result.stdout)
+ if check:
+ result.check()
+
+ return result
+
+
+def execute_remote_command(command, ssh_client, timeout=None):
+ """Execute command on a remote host using SSH client"""
+ LOG.debug("Executing command %r on remote host %r (timeout=%r)...",
+ command, ssh_client.host, timeout)
+
+ stdout = stderr = exit_status = None
+
+ try:
+ # TODO(fressi): re-implement to capture stderr
+ stdout = ssh_client.exec_command(command, timeout=timeout)
+ exit_status = 0
+
+ except lib_exc.TimeoutException:
+ # TODO(fressi): re-implement to capture STDOUT and STDERR and make
+ # sure process is killed
+ pass
+
+ except lib_exc.SSHExecCommandFailed as ex:
+ # Please note class SSHExecCommandFailed has been re-based on
+ # top of ShellCommandError
+ stdout = ex.stdout
+ stderr = ex.stderr
+ exit_status = ex.exit_status
+
+ return ShellExecuteResult(command=command, timeout=timeout,
+ exit_status=exit_status,
+ stdout=stdout, stderr=stderr)
+
+
+def execute_local_command(command, timeout=None):
+ """Execute command on local host using local shell"""
+
+ LOG.debug("Executing command %r on local host (timeout=%r)...",
+ command, timeout)
+
+ process = subprocess.Popen(command, shell=True,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ if timeout and sys.version_info < (3, 3):
+ # TODO(fressi): re-implement to timeout support on older Pythons
+ LOG.warning("Popen.communicate method doens't support for timeout "
+ "on Python %r", sys.version)
+ timeout = None
+
+ # Wait for process execution while reading STDERR and STDOUT streams
+ if timeout:
+ try:
+ stdout, stderr = process.communicate(timeout=timeout)
+ except subprocess.TimeoutExpired:
+ # At this state I expect the process to be still running
+ # therefore it has to be kill later after calling poll()
+ LOG.exception("Command %r timeout expired.", command)
+ stdout = stderr = None
+ else:
+ stdout, stderr = process.communicate()
+
+ # Check process termination status
+ exit_status = process.poll()
+ if exit_status is None:
+ # The process is still running after calling communicate():
+ # let kill it and then read buffers again
+ process.kill()
+ stdout, stderr = process.communicate()
+
+ return ShellExecuteResult(command=command, timeout=timeout,
+ stdout=stdout, stderr=stderr,
+ exit_status=exit_status)
+
+
+class ShellExecuteResult(collections.namedtuple(
+ 'ShellExecuteResult', ['command', 'timeout', 'exit_status', 'stdout',
+ 'stderr'])):
+
+ def check(self):
+ if self.exit_status is None:
+ raise exceptions.ShellTimeoutExpired(command=self.command,
+ timeout=self.timeout,
+ stderr=self.stderr,
+ stdout=self.stdout)
+
+ elif self.exit_status != 0:
+ raise exceptions.ShellCommandError(command=self.command,
+ exit_status=self.exit_status,
+ stderr=self.stderr,
+ stdout=self.stdout)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index 4829db2..ea30a28 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import locale
import os
import time
@@ -21,6 +22,7 @@
from tempest.lib import exceptions
from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions as exc
CONF = config.CONF
@@ -41,13 +43,13 @@
def __init__(self, host, username, password=None, timeout=None, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None,
- port=22, proxy_client=None):
+ port=22, proxy_client=None, create_proxy_client=True):
timeout = timeout or self.timeout
- if self.proxy_jump_host:
+ if not proxy_client and create_proxy_client and self.proxy_jump_host:
# Perform all SSH connections passing through configured SSH server
- proxy_client = proxy_client or self.create_proxy_client(
+ proxy_client = self.create_proxy_client(
timeout=timeout, channel_timeout=channel_timeout)
super(Client, self).__init__(
@@ -113,10 +115,10 @@
"set 'proxy_jump_keyfile' to provide a valid SSH key "
"file.", login)
- return ssh.Client(
+ return Client(
host=host, username=username, password=password,
look_for_keys=look_for_keys, key_filename=key_file,
- port=port, proxy_client=None, **kwargs)
+ port=port, create_proxy_client=False, **kwargs)
# attribute used to keep reference to opened client connection
_client = None
@@ -145,6 +147,10 @@
# more times
_get_ssh_connection = connect
+ # This overrides superclass test_connection_auth method forbidding it to
+ # close connection
+ test_connection_auth = connect
+
def close(self):
"""Closes connection to SSH server and cleanup resources."""
client = self._client
@@ -152,6 +158,9 @@
client.close()
self._client = None
+ def __exit__(self, _exception_type, _exception_value, _traceback):
+ self.close()
+
def open_session(self):
"""Gets connection to SSH server and open a new paramiko.Channel
@@ -170,8 +179,18 @@
user=self.username,
password=self.password)
- def execute_script(self, script, become_root=False,
- combine_stderr=True, shell='sh -eux'):
+ def exec_command(self, cmd, encoding="utf-8", timeout=None):
+ if timeout:
+ original_timeout = self.timeout
+ self.timeout = timeout
+ try:
+ return super(Client, self).exec_command(cmd=cmd, encoding=encoding)
+ finally:
+ if timeout:
+ self.timeout = original_timeout
+
+ def execute_script(self, script, become_root=False, combine_stderr=False,
+ shell='sh -eux', timeout=None, **params):
"""Connect to remote machine and executes script.
Implementation note: it passes script lines to shell interpreter via
@@ -191,67 +210,99 @@
variable would interrupt script execution with an error and every
command executed by the script is going to be traced to STDERR.
+ :param timeout: time in seconds to wait before brutally aborting
+ script execution.
+
+ :param **params: script parameter values to be assigned at the
+ beginning of the script.
+
:returns output written by script to STDOUT.
:raises tempest.lib.exceptions.SSHTimeout: in case it fails to connect
to remote server or it fails to open a channel.
:raises tempest.lib.exceptions.SSHExecCommandFailed: in case command
- script exits with non zero exit status.
+ script exits with non zero exit status or times out.
"""
+ if params:
+ # Append script parameters at the beginning of the script
+ header = ''.join(sorted(["{!s}={!s}\n".format(k, v)
+ for k, v in params.items()]))
+ script = header + '\n' + script
+
+ timeout = timeout or self.timeout
+ end_of_time = time.time() + timeout
+ output_data = b''
+ error_data = b''
+ exit_status = None
+
channel = self.open_session()
with channel:
# Combine STOUT and STDERR to have to handle with only one stream
channel.set_combine_stderr(combine_stderr)
- # Set default environment
- channel.update_environment({
- # Language and encoding
- 'LC_ALL': os.environ.get('LC_ALL') or self.default_ssh_lang,
- 'LANG': os.environ.get('LANG') or self.default_ssh_lang
- })
+ # Update local environment
+ lang, encoding = locale.getlocale()
+ if not lang:
+ lang, encoding = locale.getdefaultlocale()
+ _locale = '.'.join([lang, encoding])
+ channel.update_environment({'LC_ALL': _locale,
+ 'LANG': _locale})
if become_root:
shell = 'sudo ' + shell
# Spawn a Bash
channel.exec_command(shell)
+ end_of_script = False
lines_iterator = iter(script.splitlines())
- output_data = b''
- error_data = b''
-
- while not channel.exit_status_ready():
+ while (not channel.exit_status_ready() and
+ time.time() < end_of_time):
# Drain incoming data buffers
while channel.recv_ready():
output_data += channel.recv(self.buf_size)
while channel.recv_stderr_ready():
error_data += channel.recv_stderr(self.buf_size)
- if channel.send_ready():
+ if not end_of_script and channel.send_ready():
try:
line = next(lines_iterator)
except StopIteration:
# Finalize Bash script execution
channel.shutdown_write()
+ end_of_script = True
else:
# Send script to Bash STDIN line by line
- channel.send((line + '\n').encode('utf-8'))
- else:
- time.sleep(.1)
+ channel.send((line + '\n').encode(encoding))
+ continue
+
+ time.sleep(.1)
# Get exit status and drain incoming data buffers
- exit_status = channel.recv_exit_status()
+ if channel.exit_status_ready():
+ exit_status = channel.recv_exit_status()
while channel.recv_ready():
output_data += channel.recv(self.buf_size)
while channel.recv_stderr_ready():
error_data += channel.recv_stderr(self.buf_size)
- if exit_status != 0:
- raise exceptions.SSHExecCommandFailed(
- command='bash', exit_status=exit_status,
- stderr=error_data.decode('utf-8'),
- stdout=output_data.decode('utf-8'))
+ stdout = _buffer_to_string(output_data, encoding)
+ if exit_status == 0:
+ return stdout
- return output_data.decode('utf-8')
+ stderr = _buffer_to_string(error_data, encoding)
+ if exit_status is None:
+ raise exc.SSHScriptTimeoutExpired(
+ command=shell, host=self.host, script=script, stderr=stderr,
+ stdout=stdout, timeout=timeout)
+ else:
+ raise exc.SSHScriptFailed(
+ command=shell, host=self.host, script=script, stderr=stderr,
+ stdout=stdout, exit_status=exit_status)
+
+
+def _buffer_to_string(data_buffer, encoding):
+ return data_buffer.decode(encoding).replace("\r\n", "\n").replace(
+ "\r", "\n")
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index fa7bb8b..3649cb6 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -88,3 +88,17 @@
raise self.skipTest(msg)
return inner
return decor
+
+
+def override_class(overriden_class, overrider_class):
+ """Override class definition with a MixIn class
+
+ If overriden_class is not a subclass of overrider_class then it creates
+ a new class that has as bases overrider_class and overriden_class.
+ """
+
+ if not issubclass(overriden_class, overrider_class):
+ name = overriden_class.__name__
+ bases = (overrider_class, overriden_class)
+ overriden_class = type(name, bases, {})
+ return overriden_class
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index 030a126..6217456 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -21,6 +21,10 @@
cfg.ListOpt('provider_vlans',
default=[],
help='List of provider networks available in the deployment.'),
+ cfg.IntOpt('provider_net_base_segm_id',
+ default=3000,
+ help='Base segmentation ID to create provider networks. '
+ 'This value will be increased in case of conflict.'),
cfg.BoolOpt('specify_floating_ip_address_available',
default=True,
help='Allow passing an IP Address of the floating ip when '
@@ -53,6 +57,10 @@
'"mtu":<MTU> - integer '
'"cidr"<SUBNET/MASK> - string '
'"provider:segmentation_id":<VLAN_ID> - integer'),
+ cfg.StrOpt('q_agent',
+ default=None,
+ choices=['None', 'linuxbridge', 'ovs', 'sriov'],
+ help='Agent used for devstack@q-agt.service'),
# Option for feature to connect via SSH to VMs using an intermediate SSH
# server
diff --git a/neutron_tempest_plugin/exceptions.py b/neutron_tempest_plugin/exceptions.py
index c9264ca..895cb40 100644
--- a/neutron_tempest_plugin/exceptions.py
+++ b/neutron_tempest_plugin/exceptions.py
@@ -15,16 +15,86 @@
from tempest.lib import exceptions
-TempestException = exceptions.TempestException
+from neutron_tempest_plugin.common import utils
-class InvalidConfiguration(TempestException):
+class NeutronTempestPluginException(exceptions.TempestException):
+
+ def __init__(self, **kwargs):
+ super(NeutronTempestPluginException, self).__init__(**kwargs)
+ self._properties = kwargs
+
+ def __getattr__(self, name):
+ try:
+ return self._properties[name]
+ except KeyError:
+ pass
+
+ msg = ("AttributeError: {!r} object has no attribute {!r}").format(
+ self, name)
+ raise AttributeError(msg)
+
+
+class InvalidConfiguration(NeutronTempestPluginException):
message = "Invalid Configuration"
-class InvalidCredentials(TempestException):
+class InvalidCredentials(NeutronTempestPluginException):
message = "Invalid Credentials"
-class InvalidServiceTag(TempestException):
+class InvalidServiceTag(NeutronTempestPluginException):
message = "Invalid service tag"
+
+
+class SSHScriptException(exceptions.TempestException):
+ """Base class for SSH client execute_script() exceptions"""
+
+
+class ShellError(NeutronTempestPluginException):
+ pass
+
+
+class ShellCommandFailed(ShellError):
+ """Raised when shell command exited with non-zero status
+
+ """
+ message = ("Command %(command)r failed, exit status: %(exit_status)d, "
+ "stderr:\n%(stderr)s\n"
+ "stdout:\n%(stdout)s")
+
+
+class SSHScriptFailed(ShellCommandFailed):
+ message = ("Command %(command)r failed, exit status: %(exit_status)d, "
+ "host: %(host)r\n"
+ "script:\n%(script)s\n"
+ "stderr:\n%(stderr)s\n"
+ "stdout:\n%(stdout)s")
+
+
+class ShellTimeoutExpired(ShellError):
+ """Raised when shell command timeouts and has been killed before exiting
+
+ """
+ message = ("Command '%(command)s' timed out: %(timeout)d, "
+ "stderr:\n%(stderr)s\n"
+ "stdout:\n%(stdout)s")
+
+
+class SSHScriptTimeoutExpired(ShellTimeoutExpired):
+ message = ("Command '%(command)s', timed out: %(timeout)d "
+ "host: %(host)r\n"
+ "script:\n%(script)s\n"
+ "stderr:\n%(stderr)s\n"
+ "stdout:\n%(stdout)s")
+
+
+# Patch SSHExecCommandFailed exception to make sure we can access to fields
+# command, exit_status, STDOUT and STDERR when SSH client reports command
+# failure
+exceptions.SSHExecCommandFailed = utils.override_class(
+ exceptions.SSHExecCommandFailed, ShellCommandFailed)
+
+# Above code created a new SSHExecCommandFailed class based on top
+# of ShellCommandError
+assert issubclass(exceptions.SSHExecCommandFailed, ShellCommandFailed)
diff --git a/neutron_tempest_plugin/scenario/admin/test_floatingip.py b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
index 1af5502..511452c 100644
--- a/neutron_tempest_plugin/scenario/admin/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common import utils
from tempest.common import waiters
-from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.common import ssh
@@ -38,22 +37,17 @@
cls.create_router_interface(router['id'], cls.subnets[0]['id'])
# Create keypair with admin privileges
cls.keypair = cls.create_keypair(client=cls.os_admin.keypairs_client)
- # Create security group with admin privileges
- cls.secgroup = cls.os_admin.network_client.create_security_group(
- name=data_utils.rand_name('secgroup'))['security_group']
- # Execute funcs to achieve ssh and ICMP capabilities
- funcs = [cls.create_loginable_secgroup_rule,
- cls.create_pingable_secgroup_rule]
- for func in funcs:
- func(secgroup_id=cls.secgroup['id'],
- client=cls.os_admin.network_client)
- @classmethod
- def resource_cleanup(cls):
- # Cleanup for security group
- cls.os_admin.network_client.delete_security_group(
- security_group_id=cls.secgroup['id'])
- super(FloatingIpTestCasesAdmin, cls).resource_cleanup()
+ # Create security group with admin privileges
+ network_client = cls.os_admin.network_client
+ cls.secgroup = cls.create_security_group(
+ client=cls.os_admin.network_client)
+ cls.create_loginable_secgroup_rule(
+ secgroup_id=cls.secgroup['id'],
+ client=network_client)
+ cls.create_pingable_secgroup_rule(
+ secgroup_id=cls.secgroup['id'],
+ client=network_client),
def _list_hypervisors(self):
# List of hypervisors
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index a2c5c72..cc1ca4c 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -122,29 +122,24 @@
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
-
- rule_list = [{'protocol': 'tcp',
- 'direction': 'ingress',
- 'port_range_min': 22,
- 'port_range_max': 22,
- 'remote_ip_prefix': '0.0.0.0/0'}]
- client = client or cls.os_primary.network_client
- cls.create_secgroup_rules(rule_list, client=client,
- secgroup_id=secgroup_id)
+ return cls.create_security_group_rule(
+ security_group_id=secgroup_id,
+ client=client,
+ protocol=neutron_lib_constants.PROTO_NAME_TCP,
+ direction=neutron_lib_constants.INGRESS_DIRECTION,
+ port_range_min=22,
+ port_range_max=22)
@classmethod
def create_pingable_secgroup_rule(cls, secgroup_id=None,
client=None):
- """This rule is intended to permit inbound ping"""
+ """This rule is intended to permit inbound ping
- rule_list = [{'protocol': 'icmp',
- 'direction': 'ingress',
- 'port_range_min': 8, # type
- 'port_range_max': 0, # code
- 'remote_ip_prefix': '0.0.0.0/0'}]
- client = client or cls.os_primary.network_client
- cls.create_secgroup_rules(rule_list, client=client,
- secgroup_id=secgroup_id)
+ """
+ return cls.create_security_group_rule(
+ security_group_id=secgroup_id, client=client,
+ protocol=neutron_lib_constants.PROTO_NAME_ICMP,
+ direction=neutron_lib_constants.INGRESS_DIRECTION)
@classmethod
def create_router_by_client(cls, is_admin=False, **kwargs):
@@ -176,13 +171,13 @@
client.delete_interface(server_id, port_id=port_id)
def setup_network_and_server(
- self, router=None, server_name=None, **kwargs):
+ self, router=None, server_name=None, network=None, **kwargs):
"""Create network resources and a server.
Creating a network, subnet, router, keypair, security group
and a server.
"""
- self.network = self.create_network()
+ self.network = network or self.create_network()
LOG.debug("Created network %s", self.network['name'])
self.subnet = self.create_subnet(self.network)
LOG.debug("Created subnet %s", self.subnet['id'])
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
new file mode 100644
index 0000000..3385a04
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -0,0 +1,111 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import compute
+from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class NetworkConnectivityTest(base.BaseTempestTestCase):
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ @utils.requires_ext(extension="router", service="network")
+ def resource_setup(cls):
+ super(NetworkConnectivityTest, cls).resource_setup()
+ # Create keypair with admin privileges
+ cls.keypair = cls.create_keypair()
+ # Create security group with admin privileges
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name('secgroup'))
+ # Execute funcs to achieve ssh and ICMP capabilities
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+ def _create_servers(self, port_1, port_2):
+ params = {
+ 'flavor_ref': CONF.compute.flavor_ref,
+ 'image_ref': CONF.compute.image_ref,
+ 'key_name': self.keypair['name']
+ }
+ vm1 = self.create_server(networks=[{'port': port_1['id']}], **params)
+
+ if (CONF.compute.min_compute_nodes > 1 and
+ compute.is_scheduler_filter_enabled("DifferentHostFilter")):
+ params['scheduler_hints'] = {
+ 'different_host': [vm1['server']['id']]}
+
+ self.create_server(networks=[{'port': port_2['id']}], **params)
+
+ @decorators.idempotent_id('8944b90d-1766-4669-bd8a-672b5d106bb7')
+ def test_connectivity_through_2_routers(self):
+ ap1_net = self.create_network()
+ ap2_net = self.create_network()
+ wan_net = self.create_network()
+ ap1_subnet = self.create_subnet(
+ ap1_net, cidr="10.10.210.0/24", gateway="10.10.210.254")
+ ap2_subnet = self.create_subnet(
+ ap2_net, cidr="10.10.220.0/24", gateway="10.10.220.254")
+ self.create_subnet(
+ wan_net, cidr="10.10.200.0/24", gateway="10.10.200.254")
+
+ ap1_rt = self.create_router(
+ router_name=data_utils.rand_name("ap1_rt"),
+ admin_state_up=True,
+ external_network_id=CONF.network.public_network_id)
+ ap2_rt = self.create_router(
+ router_name=data_utils.rand_name("ap2_rt"),
+ admin_state_up=True)
+
+ ap1_internal_port = self.create_port(
+ ap1_net, security_groups=[self.secgroup['id']])
+ ap2_internal_port = self.create_port(
+ ap2_net, security_groups=[self.secgroup['id']])
+ ap1_wan_port = self.create_port(wan_net)
+ ap2_wan_port = self.create_port(wan_net)
+
+ self._create_servers(ap1_internal_port, ap2_internal_port)
+
+ self.client.add_router_interface_with_port_id(
+ ap1_rt['id'], ap1_wan_port['id'])
+ self.client.add_router_interface_with_port_id(
+ ap2_rt['id'], ap2_wan_port['id'])
+ self.create_router_interface(ap1_rt['id'], ap1_subnet['id'])
+ self.create_router_interface(ap2_rt['id'], ap2_subnet['id'])
+
+ self.client.update_router(
+ ap1_rt['id'],
+ routes=[{"destination": ap2_subnet['cidr'],
+ "nexthop": ap2_wan_port['fixed_ips'][0]['ip_address']}])
+ self.client.update_router(
+ ap2_rt['id'],
+ routes=[{"destination": ap1_subnet['cidr'],
+ "nexthop": ap1_wan_port['fixed_ips'][0]['ip_address']}])
+
+ ap1_fip = self.create_and_associate_floatingip(
+ ap1_internal_port['id'])
+ ap1_sshclient = ssh.Client(
+ ap1_fip['floating_ip_address'], CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ self.check_remote_connectivity(
+ ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'])
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index dd89727..fadabb0 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -27,16 +27,17 @@
@utils.requires_ext(extension="dns-integration", service="network")
@decorators.idempotent_id('988347de-07af-471a-abfa-65aea9f452a6')
- def test_dns_name(self):
+ def test_dns_domain_and_name(self):
"""Test the ability to ping a VM's hostname from another VM.
1) Create two VMs on the same network, giving each a name
2) SSH in to the first VM:
2.1) ping the other VM's internal IP
- 2.2) ping the otheR VM's hostname
+ 2.2) ping the other VM's hostname
"""
- self.setup_network_and_server(server_name='luke')
+ network = self.create_network(dns_domain='starwars.')
+ self.setup_network_and_server(network=network, server_name='luke')
self.create_pingable_secgroup_rule(
secgroup_id=self.security_groups[-1]['id'])
self.check_connectivity(self.fip['floating_ip_address'],
@@ -70,4 +71,8 @@
self.check_remote_connectivity(
ssh_client, leia_port['fixed_ips'][0]['ip_address'],
timeout=CONF.validation.ping_timeout * 10)
+ self.assertIn(
+ 'starwars', ssh_client.exec_command('cat /etc/resolv.conf'))
+
self.check_remote_connectivity(ssh_client, 'leia')
+ self.check_remote_connectivity(ssh_client, 'leia.starwars')
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index ebdcf93..7b43a7e 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -137,16 +137,18 @@
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d864')
def test_protocol_number_rule(self):
# protocol number is added instead of str in security rule creation
- server_ssh_clients, fips, _ = self.create_vm_testing_sec_grp(
- num_servers=1)
+ name = data_utils.rand_name("test_protocol_number_rule")
+ security_group = self.create_security_group(name=name)
+ port = self.create_port(network=self.network, name=name,
+ security_groups=[security_group['id']])
+ _, fips, _ = self.create_vm_testing_sec_grp(num_servers=1,
+ ports=[port])
self.ping_ip_address(fips[0]['floating_ip_address'],
should_succeed=False)
rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
'direction': constants.INGRESS_DIRECTION,
'remote_ip_prefix': '0.0.0.0/0'}]
- secgroup_id = self.os_primary.network_client.list_security_groups()[
- 'security_groups'][0]['id']
- self.create_secgroup_rules(rule_list, secgroup_id=secgroup_id)
+ self.create_secgroup_rules(rule_list, secgroup_id=security_group['id'])
self.ping_ip_address(fips[0]['floating_ip_address'])
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d964')
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index 1903180..7ca5d29 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -12,28 +12,30 @@
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+
+from neutron_lib import constants
from oslo_log import log as logging
from tempest.common import utils as tutils
-from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
import testtools
+from neutron_tempest_plugin.common import ip
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
from neutron_tempest_plugin.scenario import base
-from neutron_tempest_plugin.scenario import constants
+
LOG = logging.getLogger(__name__)
CONF = config.CONF
-CONFIGURE_VLAN_INTERFACE_COMMANDS = (
- 'IFACE=$(PATH=$PATH:/usr/sbin ip l | grep "^[0-9]*: e"|cut -d \: -f 2) &&'
- 'sudo ip l a link $IFACE name $IFACE.%(tag)d type vlan id %(tag)d &&'
- 'sudo ip l s up dev $IFACE.%(tag)d && '
- 'ps -ef | grep -q "[d]hclient .*$IFACE.%(tag)d" || '
- 'sudo dhclient $IFACE.%(tag)d;')
+
+ServerWithTrunkPort = collections.namedtuple(
+ 'ServerWithTrunkPort',
+ ['port', 'subport', 'trunk', 'floating_ip', 'server',
+ 'ssh_client'])
class TrunkTest(base.BaseTempestTestCase):
@@ -45,99 +47,140 @@
def resource_setup(cls):
super(TrunkTest, cls).resource_setup()
# setup basic topology for servers we can log into
- cls.network = cls.create_network()
- cls.subnet = cls.create_subnet(cls.network)
- router = cls.create_router_by_client()
- cls.create_router_interface(router['id'], cls.subnet['id'])
- cls.keypair = cls.create_keypair()
- cls.secgroup = cls.os_primary.network_client.create_security_group(
- name=data_utils.rand_name('secgroup'))
- cls.security_groups.append(cls.secgroup['security_group'])
- cls.create_loginable_secgroup_rule(
- secgroup_id=cls.secgroup['security_group']['id'])
+ cls.rand_name = data_utils.rand_name(
+ cls.__name__.rsplit('.', 1)[-1])
+ cls.network = cls.create_network(name=cls.rand_name)
+ cls.subnet = cls.create_subnet(network=cls.network,
+ name=cls.rand_name)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.keypair = cls.create_keypair(name=cls.rand_name)
- def _create_server_with_trunk_port(self):
- port = self.create_port(self.network, security_groups=[
- self.secgroup['security_group']['id']])
- trunk = self.create_trunk(port)
- server, fip = self._create_server_with_fip(port['id'])
- return {'port': port, 'trunk': trunk, 'fip': fip,
- 'server': server}
+ def setUp(self):
+ super(TrunkTest, self).setUp()
+ self.security_group = self.create_security_group(name=self.rand_name)
+ self.create_loginable_secgroup_rule(self.security_group['id'])
- def _create_server_with_fip(self, port_id, use_advanced_image=False,
- **server_kwargs):
- fip = self.create_floatingip(port_id=port_id)
- flavor_ref = CONF.compute.flavor_ref
- image_ref = CONF.compute.image_ref
+ def _create_server_with_network(self, network, use_advanced_image=False):
+ port = self._create_server_port(network=network)
+ floating_ip = self.create_floatingip(port=port)
+ ssh_client = self._create_ssh_client(
+ floating_ip=floating_ip, use_advanced_image=use_advanced_image)
+ server = self._create_server(port=port,
+ use_advanced_image=use_advanced_image)
+ return ServerWithTrunkPort(port=port, subport=None, trunk=None,
+ floating_ip=floating_ip, server=server,
+ ssh_client=ssh_client)
+
+ def _create_server_with_trunk_port(self, subport_network=None,
+ segmentation_id=None,
+ use_advanced_image=False):
+ port = self._create_server_port()
+ floating_ip = self.create_floatingip(port=port)
+ ssh_client = self._create_ssh_client(
+ floating_ip=floating_ip, use_advanced_image=use_advanced_image)
+
+ subport = None
+ subports = None
+ if subport_network:
+ subport = self._create_server_port(
+ network=subport_network, mac_address=port['mac_address'])
+ subports = [{'port_id': subport['id'],
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': segmentation_id}]
+ trunk = self.create_trunk(port=port, subports=subports)
+
+ server = self._create_server(port=port,
+ use_advanced_image=use_advanced_image)
+ return ServerWithTrunkPort(port=port, subport=subport, trunk=trunk,
+ floating_ip=floating_ip, server=server,
+ ssh_client=ssh_client)
+
+ def _create_server_port(self, network=None, **params):
+ network = network or self.network
+ return self.create_port(network=network, name=self.rand_name,
+ security_groups=[self.security_group['id']],
+ **params)
+
+ def _create_server(self, port, use_advanced_image=False, **params):
if use_advanced_image:
flavor_ref = CONF.neutron_plugin_options.advanced_image_flavor_ref
image_ref = CONF.neutron_plugin_options.advanced_image_ref
- return (
- self.create_server(
- flavor_ref=flavor_ref,
- image_ref=image_ref,
- key_name=self.keypair['name'],
- networks=[{'port': port_id}],
- security_groups=[{'name': self.secgroup[
- 'security_group']['name']}],
- **server_kwargs)['server'],
- fip)
+ else:
+ flavor_ref = CONF.compute.flavor_ref
+ image_ref = CONF.compute.image_ref
+ return self.create_server(flavor_ref=flavor_ref,
+ image_ref=image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'port': port['id']}],
+ **params)['server']
- def _is_port_down(self, port_id):
- p = self.client.show_port(port_id)['port']
- return p['status'] == 'DOWN'
+ def _show_port(self, port, update=False):
+ observed = self.client.show_port(port['id'])['port']
+ if update:
+ port.update(observed)
+ return observed
- def _is_port_active(self, port_id):
- p = self.client.show_port(port_id)['port']
- return p['status'] == 'ACTIVE'
+ def _show_trunk(self, trunk, update=False):
+ observed = self.client.show_trunk(trunk['id'])['trunk']
+ if update:
+ trunk.update(observed)
+ return observed
- def _is_trunk_active(self, trunk_id):
- t = self.client.show_trunk(trunk_id)['trunk']
- return t['status'] == 'ACTIVE'
+ def _is_trunk_status(self, trunk, status, update=False):
+ return self._show_trunk(trunk, update)['status'] == status
- def _create_server_with_port_and_subport(self, vlan_network, vlan_tag,
- use_advanced_image=False):
- parent_port = self.create_port(self.network, security_groups=[
- self.secgroup['security_group']['id']])
- port_for_subport = self.create_port(
- vlan_network,
- security_groups=[self.secgroup['security_group']['id']],
- mac_address=parent_port['mac_address'])
- subport = {
- 'port_id': port_for_subport['id'],
- 'segmentation_type': 'vlan',
- 'segmentation_id': vlan_tag}
- self.create_trunk(parent_port, [subport])
+ def _is_port_status(self, port, status, update=False):
+ return self._show_port(port, update)['status'] == status
- server, fip = self._create_server_with_fip(
- parent_port['id'], use_advanced_image=use_advanced_image)
+ def _wait_for_port(self, port, status=constants.ACTIVE):
+ utils.wait_until_true(
+ lambda: self._is_port_status(port, status),
+ exception=RuntimeError(
+ "Timed out waiting for port {!r} to transition to get "
+ "status {!r}.".format(port['id'], status)))
- ssh_user = CONF.validation.image_ssh_user
+ def _wait_for_trunk(self, trunk, status=constants.ACTIVE):
+ utils.wait_until_true(
+ lambda: self._is_trunk_status(trunk, status),
+ exception=RuntimeError(
+ "Timed out waiting for trunk {!r} to transition to get "
+ "status {!r}.".format(trunk['id'], status)))
+
+ def _create_ssh_client(self, floating_ip, use_advanced_image=False):
if use_advanced_image:
- ssh_user = CONF.neutron_plugin_options.advanced_image_ssh_user
+ username = CONF.neutron_plugin_options.advanced_image_ssh_user
+ else:
+ username = CONF.validation.image_ssh_user
+ return ssh.Client(host=floating_ip['floating_ip_address'],
+ username=username,
+ pkey=self.keypair['private_key'])
- server_ssh_client = ssh.Client(
- fip['floating_ip_address'],
- ssh_user,
- pkey=self.keypair['private_key'])
+ def _assert_has_ssh_connectivity(self, ssh_client):
+ ssh_client.exec_command("true")
- return {
- 'server': server,
- 'fip': fip,
- 'ssh_client': server_ssh_client,
- 'subport': port_for_subport,
- }
+ def _configure_vlan_subport(self, vm, vlan_tag, vlan_subnet):
+ self.wait_for_server_active(server=vm.server)
+ self._wait_for_trunk(trunk=vm.trunk)
+ self._wait_for_port(port=vm.port)
+ self._wait_for_port(port=vm.subport)
- def _wait_for_server(self, server, advanced_image=False):
- ssh_user = CONF.validation.image_ssh_user
- if advanced_image:
- ssh_user = CONF.neutron_plugin_options.advanced_image_ssh_user
- waiters.wait_for_server_status(self.os_primary.servers_client,
- server['server']['id'],
- constants.SERVER_STATUS_ACTIVE)
- self.check_connectivity(server['fip']['floating_ip_address'],
- ssh_user,
- self.keypair['private_key'])
+ ip_command = ip.IPCommand(ssh_client=vm.ssh_client)
+ for address in ip_command.list_addresses(port=vm.port):
+ port_iface = address.device.name
+ break
+ else:
+ self.fail("Parent port fixed IP not found on server.")
+
+ subport_iface = ip_command.configure_vlan_subport(
+ port=vm.port, subport=vm.subport, vlan_tag=vlan_tag,
+ subnets=[vlan_subnet])
+ for address in ip_command.list_addresses(port=vm.subport):
+ self.assertEqual(subport_iface, address.device.name)
+ self.assertEqual(port_iface, address.device.parent)
+ break
+ else:
+ self.fail("Sub-port fixed IP not found on server.")
@decorators.idempotent_id('bb13fe28-f152-4000-8131-37890a40c79e')
def test_trunk_subport_lifecycle(self):
@@ -152,111 +195,149 @@
wired the port correctly and that the trunk port itself maintains
connectivity.
"""
- server1 = self._create_server_with_trunk_port()
- server2 = self._create_server_with_trunk_port()
- for server in (server1, server2):
- self._wait_for_server(server)
- trunk1_id, trunk2_id = server1['trunk']['id'], server2['trunk']['id']
- # trunks should transition to ACTIVE without any subports
- utils.wait_until_true(
- lambda: self._is_trunk_active(trunk1_id),
- exception=RuntimeError("Timed out waiting for trunk %s to "
- "transition to ACTIVE." % trunk1_id))
- utils.wait_until_true(
- lambda: self._is_trunk_active(trunk2_id),
- exception=RuntimeError("Timed out waiting for trunk %s to "
- "transition to ACTIVE." % trunk2_id))
+ vm1 = self._create_server_with_trunk_port()
+ vm2 = self._create_server_with_trunk_port()
+ for vm in (vm1, vm2):
+ self.wait_for_server_active(server=vm.server)
+ self._wait_for_trunk(vm.trunk)
+ self._assert_has_ssh_connectivity(vm.ssh_client)
+
# create a few more networks and ports for subports
# check limit of networks per project
- max_vlan = 3 + CONF.neutron_plugin_options.max_networks_per_project
- allowed_vlans = range(3, max_vlan)
- subports = [{'port_id': self.create_port(self.create_network())['id'],
- 'segmentation_type': 'vlan', 'segmentation_id': seg_id}
- for seg_id in allowed_vlans]
- # add all subports to server1
- self.client.add_subports(trunk1_id, subports)
- # ensure trunk transitions to ACTIVE
- utils.wait_until_true(
- lambda: self._is_trunk_active(trunk1_id),
- exception=RuntimeError("Timed out waiting for trunk %s to "
- "transition to ACTIVE." % trunk1_id))
- # ensure all underlying subports transitioned to ACTIVE
- for s in subports:
- utils.wait_until_true(lambda: self._is_port_active(s['port_id']))
- # ensure main dataplane wasn't interrupted
- self.check_connectivity(server1['fip']['floating_ip_address'],
- CONF.validation.image_ssh_user,
- self.keypair['private_key'])
- # move subports over to other server
- self.client.remove_subports(trunk1_id, subports)
- # ensure all subports go down
- for s in subports:
- utils.wait_until_true(
- lambda: self._is_port_down(s['port_id']),
- exception=RuntimeError("Timed out waiting for subport %s to "
- "transition to DOWN." % s['port_id']))
- self.client.add_subports(trunk2_id, subports)
- # wait for both trunks to go back to ACTIVE
- utils.wait_until_true(
- lambda: self._is_trunk_active(trunk1_id),
- exception=RuntimeError("Timed out waiting for trunk %s to "
- "transition to ACTIVE." % trunk1_id))
- utils.wait_until_true(
- lambda: self._is_trunk_active(trunk2_id),
- exception=RuntimeError("Timed out waiting for trunk %s to "
- "transition to ACTIVE." % trunk2_id))
- # ensure subports come up on other trunk
- for s in subports:
- utils.wait_until_true(
- lambda: self._is_port_active(s['port_id']),
- exception=RuntimeError("Timed out waiting for subport %s to "
- "transition to ACTIVE." % s['port_id']))
- # final connectivity check
- self.check_connectivity(server1['fip']['floating_ip_address'],
- CONF.validation.image_ssh_user,
- self.keypair['private_key'])
- self.check_connectivity(server2['fip']['floating_ip_address'],
- CONF.validation.image_ssh_user,
- self.keypair['private_key'])
+ segment_ids = range(
+ 3, 3 + CONF.neutron_plugin_options.max_networks_per_project)
+ tagged_networks = [self.create_network() for _ in segment_ids]
+ tagged_ports = [self.create_port(network=network)
+ for network in tagged_networks]
+ subports = [{'port_id': tagged_ports[i]['id'],
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': segment_id}
+ for i, segment_id in enumerate(segment_ids)]
- @testtools.skipUnless(
- CONF.neutron_plugin_options.advanced_image_ref,
- "Advanced image is required to run this test.")
+ # add all subports to server1
+ self.client.add_subports(vm1.trunk['id'], subports)
+ self._wait_for_trunk(vm1.trunk)
+ for port in tagged_ports:
+ self._wait_for_port(port)
+
+ # ensure main data-plane wasn't interrupted
+ self._assert_has_ssh_connectivity(vm1.ssh_client)
+
+ # move subports over to other server
+ self.client.remove_subports(vm1.trunk['id'], subports)
+ # ensure all subports go down
+ for port in tagged_ports:
+ self._wait_for_port(port, status=constants.DOWN)
+
+ self.client.add_subports(vm2.trunk['id'], subports)
+
+ # wait for both trunks to go back to ACTIVE
+ for vm in [vm1, vm2]:
+ self._wait_for_trunk(vm.trunk)
+
+ # ensure subports come up on other trunk
+ for port in tagged_ports:
+ self._wait_for_port(port)
+
+ # final connectivity check
+ for vm in [vm1, vm2]:
+ self._wait_for_trunk(vm.trunk)
+ self._assert_has_ssh_connectivity(vm1.ssh_client)
+
+ @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+ "Advanced image is required to run this test.")
@decorators.idempotent_id('a8a02c9b-b453-49b5-89a2-cce7da66aafb')
def test_subport_connectivity(self):
vlan_tag = 10
-
vlan_network = self.create_network()
- self.create_subnet(vlan_network, gateway=None)
+ vlan_subnet = self.create_subnet(network=vlan_network, gateway=None)
- servers = [
- self._create_server_with_port_and_subport(
- vlan_network, vlan_tag, use_advanced_image=True)
- for i in range(2)]
+ vm1 = self._create_server_with_trunk_port(subport_network=vlan_network,
+ segmentation_id=vlan_tag,
+ use_advanced_image=True)
+ vm2 = self._create_server_with_trunk_port(subport_network=vlan_network,
+ segmentation_id=vlan_tag,
+ use_advanced_image=True)
- for server in servers:
- self._wait_for_server(server, advanced_image=True)
- # Configure VLAN interfaces on server
- command = CONFIGURE_VLAN_INTERFACE_COMMANDS % {'tag': vlan_tag}
- server['ssh_client'].exec_command(command)
- out = server['ssh_client'].exec_command(
- 'PATH=$PATH:/usr/sbin;ip addr list')
- LOG.debug("Interfaces on server %s: %s", server, out)
+ for vm in [vm1, vm2]:
+ self._configure_vlan_subport(vm=vm,
+ vlan_tag=vlan_tag,
+ vlan_subnet=vlan_subnet)
# Ping from server1 to server2 via VLAN interface should fail because
# we haven't allowed ICMP
self.check_remote_connectivity(
- servers[0]['ssh_client'],
- servers[1]['subport']['fixed_ips'][0]['ip_address'],
- should_succeed=False
- )
- # allow intra-securitygroup traffic
- self.client.create_security_group_rule(
- security_group_id=self.secgroup['security_group']['id'],
- direction='ingress', ethertype='IPv4', protocol='icmp',
- remote_group_id=self.secgroup['security_group']['id'])
+ vm1.ssh_client,
+ vm2.subport['fixed_ips'][0]['ip_address'],
+ should_succeed=False)
+
+ # allow intra-security-group traffic
+ self.create_pingable_secgroup_rule(self.security_group['id'])
self.check_remote_connectivity(
- servers[0]['ssh_client'],
- servers[1]['subport']['fixed_ips'][0]['ip_address'],
- should_succeed=True
- )
+ vm1.ssh_client,
+ vm2.subport['fixed_ips'][0]['ip_address'])
+
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.advanced_image_ref,
+ "Advanced image is required to run this test.")
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.q_agent == "linuxbridge",
+ "Linux bridge agent is required to run this test.")
+ @decorators.idempotent_id('d61cbdf6-1896-491c-b4b4-871caf7fbffe')
+ def test_parent_port_connectivity_after_trunk_deleted_lb(self):
+ vlan_tag = 10
+ vlan_network = self.create_network()
+ vlan_subnet = self.create_subnet(vlan_network)
+ self.create_router_interface(self.router['id'], vlan_subnet['id'])
+
+ # Create servers
+ trunk_network_server = self._create_server_with_trunk_port(
+ subport_network=vlan_network,
+ segmentation_id=vlan_tag,
+ use_advanced_image=True)
+ normal_network_server = self._create_server_with_network(self.network)
+ vlan_network_server = self._create_server_with_network(vlan_network)
+
+ self._configure_vlan_subport(vm=trunk_network_server,
+ vlan_tag=vlan_tag,
+ vlan_subnet=vlan_subnet)
+ for vm in [normal_network_server, vlan_network_server]:
+ self.wait_for_server_active(vm.server)
+
+ # allow ICMP traffic
+ self.create_pingable_secgroup_rule(self.security_group['id'])
+
+ # Ping from trunk_network_server to normal_network_server
+ # via parent port
+ self.check_remote_connectivity(
+ trunk_network_server.ssh_client,
+ normal_network_server.port['fixed_ips'][0]['ip_address'],
+ should_succeed=True)
+
+ # Ping from trunk_network_server to vlan_network_server via VLAN
+ # interface should success
+ self.check_remote_connectivity(
+ trunk_network_server.ssh_client,
+ vlan_network_server.port['fixed_ips'][0]['ip_address'],
+ should_succeed=True)
+
+ # Delete the trunk
+ self.delete_trunk(
+ trunk_network_server.trunk,
+ detach_parent_port=False)
+ LOG.debug("Trunk %s is deleted.",
+ trunk_network_server.trunk['id'])
+
+ # Ping from trunk_network_server to normal_network_server
+ # via parent port success after trunk deleted
+ self.check_remote_connectivity(
+ trunk_network_server.ssh_client,
+ normal_network_server.port['fixed_ips'][0]['ip_address'],
+ should_succeed=True)
+
+ # Ping from trunk_network_server to vlan_network_server via VLAN
+ # interface should fail after trunk deleted
+ self.check_remote_connectivity(
+ trunk_network_server.ssh_client,
+ vlan_network_server.port['fixed_ips'][0]['ip_address'],
+ should_succeed=False)
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 58dfbf4..25fc8c1 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -41,21 +41,18 @@
# The following list represents resource names that do not require
# changing underscore to a hyphen
- hyphen_exceptions = ["service_profiles"]
- # the following map is used to construct proper URI
- # for the given neutron resource
+ hyphen_exceptions = ["service_profiles", "availability_zones"]
+ # The following map is used to construct proper URI
+ # for the given neutron resource.
+ # No need to populate this map if the neutron resource
+ # doesn't have a URI prefix.
service_resource_prefix_map = {
- 'networks': '',
- 'subnets': '',
- 'subnetpools': '',
- 'ports': '',
'metering_labels': 'metering',
'metering_label_rules': 'metering',
'policies': 'qos',
'bandwidth_limit_rules': 'qos',
'minimum_bandwidth_rules': 'qos',
'rule_types': 'qos',
- 'rbac-policies': '',
'logs': 'log',
'loggable_resources': 'log',
}
@@ -375,6 +372,8 @@
update_body['distributed'] = kwargs['distributed']
if 'ha' in kwargs:
update_body['ha'] = kwargs['ha']
+ if 'routes' in kwargs:
+ update_body['routes'] = kwargs['routes']
update_body = dict(router=update_body)
update_body = jsonutils.dumps(update_body)
resp, body = self.put(uri, update_body)
@@ -446,6 +445,9 @@
body = jsonutils.loads(body)
return service_client.ResponseBody(resp, body)
+ def remove_router_extra_routes(self, router_id):
+ self.update_router(router_id, routes=None)
+
def update_agent(self, agent_id, agent_info):
"""Update an agent
@@ -871,6 +873,13 @@
body = jsonutils.loads(body)
return service_client.ResponseBody(resp, body)
+ def delete_security_group_rule(self, security_group_rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix,
+ security_group_rule_id)
+ resp, body = self.delete(uri)
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
+
def list_security_groups(self, **kwargs):
post_body = {'security_groups': kwargs}
body = jsonutils.dumps(post_body)
diff --git a/setup.cfg b/setup.cfg
index c6a1fad..d7790d6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,7 +4,7 @@
description-file =
README.rst
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://git.openstack.org/cgit/openstack/neutron-tempest-plugin
classifier =
Environment :: OpenStack