Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 1 | # Copyright 2017 Mirantis, Inc. |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 4 | # not use this file except in compliance with the License. You may obtain |
| 5 | # a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | # License for the specific language governing permissions and limitations |
| 13 | # under the License. |
| 14 | |
| 15 | import pytest |
Vladimir Jigulin | 34dfa94 | 2018-07-23 21:05:48 +0400 | [diff] [blame] | 16 | import netaddr |
| 17 | import os |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 18 | |
| 19 | from tcp_tests import logger |
| 20 | from tcp_tests import settings |
| 21 | |
| 22 | LOG = logger.logger |
| 23 | |
| 24 | |
| 25 | class TestMCPK8sActions(object): |
| 26 | """Test class for different k8s actions""" |
| 27 | |
Tatyana Leontovich | c411ec3 | 2017-10-09 14:48:00 +0300 | [diff] [blame] | 28 | @pytest.mark.grab_versions |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 29 | @pytest.mark.fail_snapshot |
Tatyana Leontovich | 071ce6a | 2017-10-24 18:08:10 +0300 | [diff] [blame] | 30 | @pytest.mark.cz8116 |
Dennis Dmitriev | 0f624a8 | 2018-06-11 12:57:13 +0300 | [diff] [blame] | 31 | @pytest.mark.k8s_calico |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 32 | def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed): |
| 33 | """Test externaldns integration with coredns |
| 34 | |
| 35 | Scenario: |
| 36 | 1. Install k8s with externaldns addon enabled(including etcd, coredns) |
| 37 | 2. Start simple service |
| 38 | 3. Expose deployment |
| 39 | 4. Annotate service with domain name |
| 40 | 5. Try to get service using nslookup |
| 41 | """ |
| 42 | |
| 43 | if not (config.k8s_deploy.kubernetes_externaldns_enabled and |
| 44 | config.k8s_deploy.kubernetes_coredns_enabled): |
| 45 | pytest.skip("Test requires Externaldns and coredns addons enabled") |
| 46 | |
| 47 | show_step(1) |
| 48 | k8sclient = k8s_deployed.api |
| 49 | assert k8sclient.nodes.list() is not None, "Can not get nodes list" |
| 50 | |
| 51 | show_step(2) |
| 52 | name = 'test-nginx' |
| 53 | k8s_deployed.kubectl_run(name, 'nginx', '80') |
| 54 | |
| 55 | show_step(3) |
| 56 | k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP') |
| 57 | |
Victor Ryzhenkin | 66d3937 | 2017-09-28 19:25:48 +0400 | [diff] [blame] | 58 | hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME) |
Victor Ryzhenkin | 14354ac | 2017-09-27 17:42:30 +0400 | [diff] [blame] | 59 | annotation = "\"external-dns.alpha.kubernetes.io/" \ |
| 60 | "hostname={0}\"".format(hostname) |
| 61 | show_step(4) |
| 62 | k8s_deployed.kubectl_annotate('service', name, annotation) |
| 63 | |
| 64 | show_step(5) |
| 65 | dns_host = k8s_deployed.get_svc_ip('coredns') |
| 66 | k8s_deployed.nslookup(hostname, dns_host) |
Victor Ryzhenkin | 87a3142 | 2018-03-16 22:25:27 +0400 | [diff] [blame] | 67 | |
| 68 | @pytest.mark.grab_versions |
| 69 | @pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt', |
| 70 | 'cncf_results.tar.gz']) |
| 71 | @pytest.mark.fail_snapshot |
| 72 | def test_k8s_cncf_certification(self, show_step, config, k8s_deployed, |
| 73 | cncf_log_helper): |
| 74 | """Run cncf e2e suite and provide files needed for pull request |
| 75 | to the CNCF repo |
| 76 | |
| 77 | Scenario: |
| 78 | 1. Run cncf from https://github.com/cncf/k8s-conformance |
| 79 | """ |
| 80 | |
| 81 | show_step(1) |
| 82 | k8s_deployed.start_k8s_cncf_verification() |
Vladimir Jigulin | 62bcf46 | 2018-05-28 18:17:01 +0400 | [diff] [blame] | 83 | |
| 84 | @pytest.mark.grap_versions |
| 85 | @pytest.mark.fail_snapshot |
| 86 | def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed, |
| 87 | k8s_chain_update_log_helper): |
| 88 | """Test for chain-upgrading k8s hypercube pool and checking it |
| 89 | |
| 90 | Scenario: |
| 91 | 1. Prepare salt on hosts |
| 92 | 2. Setup controller nodes |
| 93 | 3. Setup compute nodes |
| 94 | 4. Setup Kubernetes cluster |
| 95 | 5. Run and expose sample test service |
| 96 | 6. Run conformance to check consistency |
| 97 | 7. For every version in update chain: |
| 98 | Update cluster to new version, check test sample service |
| 99 | availability, run conformance |
| 100 | """ |
| 101 | |
Vladimir Jigulin | 62bcf46 | 2018-05-28 18:17:01 +0400 | [diff] [blame] | 102 | show_step(5) |
Vladimir Jigulin | a6b018b | 2018-07-18 15:19:01 +0400 | [diff] [blame] | 103 | sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade') |
| 104 | sample.run() |
| 105 | sample.expose() |
| 106 | sample.wait_for_ready() |
Vladimir Jigulin | 62bcf46 | 2018-05-28 18:17:01 +0400 | [diff] [blame] | 107 | |
Vladimir Jigulin | a6b018b | 2018-07-18 15:19:01 +0400 | [diff] [blame] | 108 | assert sample.is_service_available() |
Vladimir Jigulin | 62bcf46 | 2018-05-28 18:17:01 +0400 | [diff] [blame] | 109 | |
| 110 | show_step(6) |
| 111 | k8s_deployed.run_conformance(log_out="k8s_conformance.log") |
| 112 | |
| 113 | show_step(7) |
| 114 | chain_versions = config.k8s.k8s_update_chain.split(" ") |
| 115 | for version in chain_versions: |
| 116 | LOG.info("Chain update to '{}' version".format(version)) |
| 117 | k8s_deployed.update_k8s_images(version) |
| 118 | |
| 119 | LOG.info("Checking test service availability") |
Vladimir Jigulin | a6b018b | 2018-07-18 15:19:01 +0400 | [diff] [blame] | 120 | assert sample.is_service_available() |
Vladimir Jigulin | 62bcf46 | 2018-05-28 18:17:01 +0400 | [diff] [blame] | 121 | |
| 122 | LOG.info("Running conformance on {} version".format(version)) |
| 123 | log_name = "k8s_conformance_{}.log".format(version) |
| 124 | k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False) |
Vladimir Jigulin | a6b018b | 2018-07-18 15:19:01 +0400 | [diff] [blame] | 125 | |
| 126 | @pytest.mark.grap_versions |
| 127 | @pytest.mark.fail_snapshot |
| 128 | def test_k8s_metallb(self, show_step, config, k8s_deployed): |
| 129 | """Enable metallb in cluster and do basic tests |
| 130 | |
| 131 | Scenario: |
| 132 | 1. Setup Kubernetes cluster with enabled metallb |
| 133 | 2. Check that metallb pods created in metallb-system namespace |
| 134 | 3. Run 5 sample deployments |
| 135 | 4. Expose deployments with type=LoadBalancer |
| 136 | 5. Check services availability from outside of cluster |
| 137 | 6. Run conformance |
| 138 | 7. Check services availability from outside of cluster |
| 139 | """ |
| 140 | show_step(1) |
| 141 | if not config.k8s_deploy.kubernetes_metallb_enabled: |
| 142 | pytest.skip("Test requires metallb addon enabled") |
| 143 | |
| 144 | show_step(2) |
Vladimir Jigulin | 34dfa94 | 2018-07-23 21:05:48 +0400 | [diff] [blame] | 145 | ns = "metallb-system" |
| 146 | assert k8s_deployed.is_pod_exists_with_prefix("controller", ns) |
| 147 | assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns) |
Vladimir Jigulin | a6b018b | 2018-07-18 15:19:01 +0400 | [diff] [blame] | 148 | |
| 149 | show_step(3) |
| 150 | samples = [] |
| 151 | for i in range(5): |
| 152 | name = 'test-dep-metallb-{}'.format(i) |
| 153 | sample = k8s_deployed.get_sample_deployment(name) |
| 154 | sample.run() |
| 155 | samples.append(sample) |
| 156 | |
| 157 | show_step(4) |
| 158 | for sample in samples: |
| 159 | sample.expose('LoadBalancer') |
| 160 | for sample in samples: |
| 161 | sample.wait_for_ready() |
| 162 | |
| 163 | show_step(5) |
| 164 | for sample in samples: |
| 165 | assert sample.is_service_available(external=True) |
| 166 | |
| 167 | show_step(6) |
| 168 | k8s_deployed.run_conformance() |
| 169 | |
| 170 | show_step(7) |
| 171 | for sample in samples: |
| 172 | assert sample.is_service_available(external=True) |
Vladimir Jigulin | 34dfa94 | 2018-07-23 21:05:48 +0400 | [diff] [blame] | 173 | |
| 174 | @pytest.mark.grap_versions |
| 175 | @pytest.mark.fail_snapshot |
Vladimir Jigulin | ed72aa8 | 2018-07-30 15:33:29 +0400 | [diff] [blame] | 176 | def test_k8s_genie_flannel(self, show_step, salt_deployed, k8s_deployed): |
Vladimir Jigulin | 34dfa94 | 2018-07-23 21:05:48 +0400 | [diff] [blame] | 177 | """Test genie-cni+flannel cni setup |
| 178 | |
| 179 | Scenario: |
| 180 | 1. Setup Kubernetes cluster with genie cni and flannel |
| 181 | 2. Check that flannel pods created in kube-system namespace |
| 182 | 3. Create sample deployment with flannel cni annotation |
| 183 | 4. Check that the deployment have 1 ip addresses from cni provider |
| 184 | 5. Create sample deployment with calico cni annotation |
| 185 | 6. Check that the deployment have 1 ip addresses from cni provider |
| 186 | 7. Create sample deployment with multi-cni annotation |
| 187 | 8. Check that the deployment have 2 ip addresses from different |
| 188 | cni providers |
| 189 | 9. Create sample deployment without cni annotation |
| 190 | 10. Check that the deployment have 1 ip address |
| 191 | 11. Check pods availability |
| 192 | 12. Run conformance |
| 193 | 13. Check pods availability |
| 194 | """ |
| 195 | show_step(1) |
| 196 | |
| 197 | # Find out calico and flannel networks |
| 198 | tgt_k8s_control = "I@kubernetes:control:enabled:True" |
| 199 | |
| 200 | flannel_pillar = salt_deployed.get_pillar( |
| 201 | tgt=tgt_k8s_control, |
| 202 | pillar="kubernetes:master:network:flannel:private_ip_range")[0] |
| 203 | flannel_network = netaddr.IPNetwork(flannel_pillar.values()[0]) |
| 204 | LOG.info("Flannel network: {}".format(flannel_network)) |
| 205 | |
| 206 | calico_network_pillar = salt_deployed.get_pillar( |
| 207 | tgt=tgt_k8s_control, pillar="_param:calico_private_network")[0] |
| 208 | calico_netmask_pillar = salt_deployed.get_pillar( |
| 209 | tgt=tgt_k8s_control, pillar="_param:calico_private_netmask")[0] |
| 210 | calico_network = netaddr.IPNetwork( |
| 211 | "{0}/{1}".format(calico_network_pillar.values()[0], |
| 212 | calico_netmask_pillar.values()[0])) |
| 213 | LOG.info("Calico network: {}".format(calico_network)) |
| 214 | |
| 215 | show_step(2) |
| 216 | assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-", |
| 217 | "kube-system") |
| 218 | |
| 219 | data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s') |
| 220 | show_step(3) |
| 221 | flannel_pod = k8s_deployed.create_pod_from_file( |
| 222 | os.path.join(data_dir, 'pod-sample-flannel.yaml')) |
| 223 | |
| 224 | show_step(4) |
| 225 | flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name) |
| 226 | assert len(flannel_ips) == 1 |
| 227 | assert netaddr.IPAddress(flannel_ips[0]) in flannel_network |
| 228 | |
| 229 | show_step(5) |
| 230 | calico_pod = k8s_deployed.create_pod_from_file( |
| 231 | os.path.join(data_dir, 'pod-sample-calico.yaml')) |
| 232 | |
| 233 | show_step(6) |
| 234 | calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name) |
| 235 | assert len(calico_ips) == 1 |
| 236 | assert netaddr.IPAddress(calico_ips[0]) in calico_network |
| 237 | |
| 238 | show_step(7) |
| 239 | multicni_pod = k8s_deployed.create_pod_from_file( |
| 240 | os.path.join(data_dir, 'pod-sample-multicni.yaml')) |
| 241 | |
| 242 | show_step(8) |
| 243 | multicni_ips = \ |
| 244 | k8s_deployed.get_pod_ips_from_container(multicni_pod.name) |
| 245 | assert len(multicni_ips) == 2 |
| 246 | for net in [calico_network, flannel_network]: |
| 247 | assert netaddr.IPAddress(multicni_ips[0]) in net or \ |
| 248 | netaddr.IPAddress(multicni_ips[1]) in net |
| 249 | |
| 250 | show_step(9) |
| 251 | nocni_pod = k8s_deployed.create_pod_from_file( |
| 252 | os.path.join(data_dir, 'pod-sample.yaml')) |
| 253 | |
| 254 | show_step(10) |
| 255 | nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name) |
| 256 | assert len(nocni_ips) == 1 |
| 257 | assert (netaddr.IPAddress(nocni_ips[0]) in calico_network or |
| 258 | netaddr.IPAddress(nocni_ips[0]) in flannel_network) |
| 259 | |
| 260 | show_step(11) |
| 261 | |
| 262 | def check_pod_availability(ip): |
| 263 | assert "Hello Kubernetes!" in k8s_deployed.curl( |
| 264 | "http://{}:8080".format(ip)) |
| 265 | |
| 266 | def check_pods_availability(): |
| 267 | check_pod_availability(flannel_ips[0]) |
| 268 | check_pod_availability(calico_ips[0]) |
| 269 | check_pod_availability(multicni_ips[0]) |
| 270 | check_pod_availability(multicni_ips[1]) |
| 271 | check_pod_availability(nocni_ips[0]) |
| 272 | |
| 273 | check_pods_availability() |
| 274 | |
| 275 | show_step(12) |
| 276 | k8s_deployed.run_conformance() |
| 277 | |
| 278 | show_step(13) |
| 279 | check_pods_availability() |