Kevin Benton | a305d59 | 2016-09-19 04:26:10 -0700 | [diff] [blame] | 1 | # All Rights Reserved. |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 4 | # not use this file except in compliance with the License. You may obtain |
| 5 | # a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | # License for the specific language governing permissions and limitations |
| 13 | # under the License. |
| 14 | |
| 15 | from oslo_log import log as logging |
| 16 | from tempest.common import waiters |
| 17 | from tempest import test |
| 18 | |
| 19 | from neutron.common import utils |
| 20 | from neutron.tests.tempest import config |
| 21 | from neutron.tests.tempest.scenario import base |
| 22 | from neutron.tests.tempest.scenario import constants |
| 23 | |
| 24 | CONF = config.CONF |
| 25 | LOG = logging.getLogger(__name__) |
| 26 | |
| 27 | |
| 28 | class TrunkTest(base.BaseTempestTestCase): |
| 29 | credentials = ['primary'] |
| 30 | force_tenant_isolation = False |
| 31 | |
| 32 | @classmethod |
| 33 | @test.requires_ext(extension="trunk", service="network") |
| 34 | def resource_setup(cls): |
| 35 | super(TrunkTest, cls).resource_setup() |
| 36 | # setup basic topology for servers we can log into |
| 37 | cls.network = cls.create_network() |
| 38 | cls.subnet = cls.create_subnet(cls.network) |
| 39 | cls.create_router_and_interface(cls.subnet['id']) |
| 40 | cls.keypair = cls.create_keypair() |
| 41 | cls.create_loginable_secgroup_rule() |
| 42 | |
| 43 | def _create_server_with_trunk_port(self): |
| 44 | port = self.create_port(self.network) |
| 45 | trunk = self.client.create_trunk(port['id'], subports=[])['trunk'] |
| 46 | fip = self.create_and_associate_floatingip(port['id']) |
| 47 | server = self.create_server( |
| 48 | flavor_ref=CONF.compute.flavor_ref, |
| 49 | image_ref=CONF.compute.image_ref, |
| 50 | key_name=self.keypair['name'], |
| 51 | networks=[{'port': port['id']}])['server'] |
| 52 | self.addCleanup(self._detach_and_delete_trunk, server, trunk) |
| 53 | return {'port': port, 'trunk': trunk, 'fip': fip, |
| 54 | 'server': server} |
| 55 | |
| 56 | def _detach_and_delete_trunk(self, server, trunk): |
| 57 | # we have to detach the interface from the server before |
| 58 | # the trunk can be deleted. |
| 59 | self.manager.compute.InterfacesClient().delete_interface( |
| 60 | server['id'], trunk['port_id']) |
| 61 | |
| 62 | def is_port_detached(): |
| 63 | p = self.client.show_port(trunk['port_id'])['port'] |
| 64 | return p['device_id'] == '' |
| 65 | utils.wait_until_true(is_port_detached) |
| 66 | self.client.delete_trunk(trunk['id']) |
| 67 | |
| 68 | def _is_port_down(self, port_id): |
| 69 | p = self.client.show_port(port_id)['port'] |
| 70 | return p['status'] == 'DOWN' |
| 71 | |
| 72 | def _is_port_active(self, port_id): |
| 73 | p = self.client.show_port(port_id)['port'] |
| 74 | return p['status'] == 'ACTIVE' |
| 75 | |
| 76 | def _is_trunk_active(self, trunk_id): |
| 77 | t = self.client.show_trunk(trunk_id)['trunk'] |
| 78 | return t['status'] == 'ACTIVE' |
| 79 | |
| 80 | @test.idempotent_id('bb13fe28-f152-4000-8131-37890a40c79e') |
| 81 | def test_trunk_subport_lifecycle(self): |
| 82 | """Test trunk creation and subport transition to ACTIVE status. |
| 83 | |
| 84 | This is a basic test for the trunk extension to ensure that we |
| 85 | can create a trunk, attach it to a server, add/remove subports, |
| 86 | while ensuring the status transitions as appropriate. |
| 87 | |
| 88 | This test does not assert any dataplane behavior for the subports. |
| 89 | It's just a high-level check to ensure the agents claim to have |
| 90 | wired the port correctly and that the trunk port itself maintains |
| 91 | connectivity. |
| 92 | """ |
| 93 | server1 = self._create_server_with_trunk_port() |
| 94 | server2 = self._create_server_with_trunk_port() |
| 95 | for server in (server1, server2): |
| 96 | waiters.wait_for_server_status(self.manager.servers_client, |
| 97 | server['server']['id'], |
| 98 | constants.SERVER_STATUS_ACTIVE) |
| 99 | self.check_connectivity(server['fip']['floating_ip_address'], |
| 100 | CONF.validation.image_ssh_user, |
| 101 | self.keypair['private_key']) |
| 102 | trunk1_id, trunk2_id = server1['trunk']['id'], server2['trunk']['id'] |
| 103 | # trunks should transition to ACTIVE without any subports |
| 104 | utils.wait_until_true( |
| 105 | lambda: self._is_trunk_active(trunk1_id), |
| 106 | exception=RuntimeError("Timed out waiting for trunk %s to " |
| 107 | "transition to ACTIVE." % trunk1_id)) |
| 108 | utils.wait_until_true( |
| 109 | lambda: self._is_trunk_active(trunk2_id), |
| 110 | exception=RuntimeError("Timed out waiting for trunk %s to " |
| 111 | "transition to ACTIVE." % trunk2_id)) |
| 112 | # create a few more networks and ports for subports |
| 113 | subports = [{'port_id': self.create_port(self.create_network())['id'], |
| 114 | 'segmentation_type': 'vlan', 'segmentation_id': seg_id} |
| 115 | for seg_id in range(3, 7)] |
| 116 | # add all subports to server1 |
| 117 | self.client.add_subports(trunk1_id, subports) |
| 118 | # ensure trunk transitions to ACTIVE |
| 119 | utils.wait_until_true( |
| 120 | lambda: self._is_trunk_active(trunk1_id), |
| 121 | exception=RuntimeError("Timed out waiting for trunk %s to " |
| 122 | "transition to ACTIVE." % trunk1_id)) |
| 123 | # ensure all underlying subports transitioned to ACTIVE |
| 124 | for s in subports: |
| 125 | utils.wait_until_true(lambda: self._is_port_active(s['port_id'])) |
| 126 | # ensure main dataplane wasn't interrupted |
| 127 | self.check_connectivity(server1['fip']['floating_ip_address'], |
| 128 | CONF.validation.image_ssh_user, |
| 129 | self.keypair['private_key']) |
| 130 | # move subports over to other server |
| 131 | self.client.remove_subports(trunk1_id, subports) |
| 132 | # ensure all subports go down |
| 133 | for s in subports: |
| 134 | utils.wait_until_true( |
| 135 | lambda: self._is_port_down(s['port_id']), |
| 136 | exception=RuntimeError("Timed out waiting for subport %s to " |
| 137 | "transition to DOWN." % s['port_id'])) |
| 138 | self.client.add_subports(trunk2_id, subports) |
| 139 | # wait for both trunks to go back to ACTIVE |
| 140 | utils.wait_until_true( |
| 141 | lambda: self._is_trunk_active(trunk1_id), |
| 142 | exception=RuntimeError("Timed out waiting for trunk %s to " |
| 143 | "transition to ACTIVE." % trunk1_id)) |
| 144 | utils.wait_until_true( |
| 145 | lambda: self._is_trunk_active(trunk2_id), |
| 146 | exception=RuntimeError("Timed out waiting for trunk %s to " |
| 147 | "transition to ACTIVE." % trunk2_id)) |
| 148 | # ensure subports come up on other trunk |
| 149 | for s in subports: |
| 150 | utils.wait_until_true( |
| 151 | lambda: self._is_port_active(s['port_id']), |
| 152 | exception=RuntimeError("Timed out waiting for subport %s to " |
| 153 | "transition to ACTIVE." % s['port_id'])) |
| 154 | # final connectivity check |
| 155 | self.check_connectivity(server1['fip']['floating_ip_address'], |
| 156 | CONF.validation.image_ssh_user, |
| 157 | self.keypair['private_key']) |
| 158 | self.check_connectivity(server2['fip']['floating_ip_address'], |
| 159 | CONF.validation.image_ssh_user, |
| 160 | self.keypair['private_key']) |