Merge "Run 3rd-party test suites without excess dependences"
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index 5a5c09e..5a43b42 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -23,7 +23,6 @@
Run tempest tests
"""
tempest_threads = settings.TEMPEST_THREADS
- tempest_exclude_test_args = settings.TEMPEST_EXCLUDE_TEST_ARGS
tempest_pattern = settings.TEMPEST_PATTERN
cluster_name = settings.LAB_CONFIG_NAME
domain_name = settings.DOMAIN_NAME
@@ -33,7 +32,6 @@
cluster_name=cluster_name,
domain_name=domain_name,
tempest_threads=tempest_threads,
- tempest_exclude_test_args=tempest_exclude_test_args,
tempest_pattern=tempest_pattern,
target=target)
return runtest
diff --git a/tcp_tests/managers/k8s/cluster.py b/tcp_tests/managers/k8s/cluster.py
index 8ffb4d1..db7bb18 100644
--- a/tcp_tests/managers/k8s/cluster.py
+++ b/tcp_tests/managers/k8s/cluster.py
@@ -42,6 +42,8 @@
from tcp_tests.managers.k8s.services import K8sServiceManager
from tcp_tests.managers.k8s.replicasets import K8sReplicaSetManager
from tcp_tests.managers.k8s.networkpolicies import K8sNetworkPolicyManager
+from tcp_tests.managers.k8s.clusterrolebindings import \
+ K8sClusterRoleBindingManager
class K8sCluster(object):
@@ -89,6 +91,7 @@
self.api_extensions = client.ExtensionsV1beta1Api(api_client)
self.api_autoscaling = client.AutoscalingV1Api(api_client)
self.api_batch = client.BatchV1Api(api_client)
+ self.api_rbac_auth = client.RbacAuthorizationV1Api(api_client)
self.nodes = K8sNodeManager(self)
self.pods = K8sPodManager(self)
@@ -111,3 +114,4 @@
self.pvolumes = K8sPersistentVolumeManager(self)
self.replicasets = K8sReplicaSetManager(self)
self.networkpolicies = K8sNetworkPolicyManager(self)
+ self.clusterrolebindings = K8sClusterRoleBindingManager(self)
diff --git a/tcp_tests/managers/k8s/clusterrolebindings.py b/tcp_tests/managers/k8s/clusterrolebindings.py
new file mode 100644
index 0000000..d958817
--- /dev/null
+++ b/tcp_tests/managers/k8s/clusterrolebindings.py
@@ -0,0 +1,54 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+
+
+from kubernetes import client
+
+from tcp_tests.managers.k8s.base import K8sBaseResource
+from tcp_tests.managers.k8s.base import K8sBaseManager
+
+
+class K8sClusterRoleBinding(K8sBaseResource):
+ resource_type = 'clusterrolebindings'
+
+ def _read(self, **kwargs):
+ return self._manager.api.read_cluster_role_binding(self.name, **kwargs)
+
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_cluster_role_binding(body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_cluster_role_binding(
+ self.name, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_cluster_role_binding(
+ self.name, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_cluster_role_binding(
+ self.name, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sClusterRoleBindingManager(K8sBaseManager):
+ resource_class = K8sClusterRoleBinding
+
+ @property
+ def api(self):
+ return self._cluster.api_rbac_auth
+
+ def _list(self, namespace, **kwargs):
+ return self.api.list_cluster_role_binding(**kwargs)
+
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/serviceaccounts.py b/tcp_tests/managers/k8s/serviceaccounts.py
index 3b779eb..bc0db30 100644
--- a/tcp_tests/managers/k8s/serviceaccounts.py
+++ b/tcp_tests/managers/k8s/serviceaccounts.py
@@ -14,6 +14,8 @@
from kubernetes import client
+from devops.helpers import helpers
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
@@ -41,6 +43,13 @@
self._manager.api.delete_namespaced_service_account(
self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+ def wait_secret_generation(self, timeout=90, interval=2):
+ def is_secret_generated():
+ secrets = self.read().secrets
+ return secrets is not None and len(secrets) > 0
+ helpers.wait(lambda: is_secret_generated(),
+ timeout=timeout, interval=interval)
+
class K8sServiceAccountManager(K8sBaseManager):
resource_class = K8sServiceAccount
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index a4c22ca..96d60a0 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -364,15 +364,19 @@
return self.controller_check_call("nslookup {0} {1}".format(host, src))
@retry(300, exception=DevopsCalledProcessError)
- def curl(self, url):
+ def curl(self, url, *args):
"""
Run curl on controller and return stdout
:param url: url to curl
- :return: response string
+ :return: list of strings (with /n at end of every line)
"""
- result = self.controller_check_call("curl -s -S \"{}\"".format(url))
- LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
+ args = list(args)
+ args.append(url)
+ cmd = "curl -s -S {}".format(
+ " ".join(["'{}'".format(a.replace("'", "\\'")) for a in args]))
+ result = self.controller_check_call(cmd)
+ LOG.debug("{0}\nresult:\n{1}".format(cmd, result['stdout']))
return result['stdout']
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index b24cba2..c400556 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -102,8 +102,8 @@
class_name = "runtest"
run_cmd = '/bin/bash -c "run-tempest"'
- def __init__(self, underlay, salt_api, cluster_name, domain_name,
- tempest_threads, tempest_exclude_test_args,
+ def __init__(self, underlay, salt_api, cluster_name,
+ domain_name, tempest_threads,
tempest_pattern=settings.TEMPEST_PATTERN,
run_cmd=None, target='gtw01'):
self.underlay = underlay
@@ -112,7 +112,6 @@
self.cluster_name = cluster_name
self.domain_name = domain_name
self.tempest_threads = tempest_threads
- self.tempest_exclude_test_args = tempest_exclude_test_args
self.tempest_pattern = tempest_pattern
self.run_cmd = run_cmd or self.run_cmd
@@ -244,11 +243,9 @@
"image": "{}:{}".format(self.image_name, self.image_version),
"environment": {
"ARGS": "-r {tempest_pattern} -w "
- "{tempest_threads} "
- "{tempest_exclude_test_args}".format(
+ "{tempest_threads} ".format(
tempest_pattern=self.tempest_pattern,
- tempest_threads=self.tempest_threads,
- tempest_exclude_test_args=self.tempest_exclude_test_args) # noqa
+ tempest_threads=self.tempest_threads) # noqa
},
"binds": [
"{cfg_dir}/tempest.conf:/etc/tempest/tempest.conf".format(cfg_dir=TEMPEST_CFG_DIR), # noqa
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 9ddabd7..0d79cc4 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -79,9 +79,6 @@
TEMPEST_PATTERN = os.environ.get('TEMPEST_PATTERN', 'tempest')
TEMPEST_TIMEOUT = int(os.environ.get('TEMPEST_TIMEOUT', 60 * 60 * 5))
TEMPEST_THREADS = int(os.environ.get('TEMPEST_THREADS', 2))
-TEMPEST_EXCLUDE_TEST_ARGS = os.environ.get(
- 'TEMPEST_EXCLUDE_TEST_ARGS',
- '--blacklist-file mcp_pike_lvm_skip.list')
TEMPEST_TARGET = os.environ.get('TEMPEST_TARGET', 'gtw01')
SALT_VERSION = os.environ.get('SALT_VERSION', '2017.7')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 7613a2c..037dbd8 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -336,6 +336,8 @@
help="", default=False),
ct.Cfg('kubernetes_virtlet_image', ct.String(),
help="", default='mirantis/virtlet:v1.1.0'),
+ ct.Cfg('kubernetes_dns', ct.Boolean(),
+ help="", default=True),
ct.Cfg('kubernetes_externaldns_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_externaldns_image', ct.String(),
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index 9e0a1ff..c10ad43 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -28,14 +28,14 @@
- description: Restart apache due to PROD-10477
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
- cmd.run "systemctl restart apache2"
+ cmd.run "service apache2 restart"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 15}
skip_fail: false
- description: Check apache status to PROD-10477
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
- cmd.run "systemctl status apache2"
+ cmd.run "service apache2 status"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 15}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
index 74d77b2..938b11f 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
@@ -1,5 +1,7 @@
{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
+
# Install support services
- description: Install keepalived on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -15,26 +17,7 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
index ea53986..3833315 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -13,171 +13,9 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Install glance on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server:role:primary' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
-- description: Install glance on other controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server:role:secondary' state.sls glance -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install keystone service (note that different fernet keys are created on different nodes)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 15}
- skip_fail: false
-
-- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 restart"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 status"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 15}
- skip_fail: false
-
-- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glance:server' state.sls glusterfs.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Update fernet keys for keystone server on the mounted glusterfs volume
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' state.sls keystone.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Populate keystone services/tenants/admins
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:client' state.sls keystone.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check keystone service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check glance image-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install nova on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nova:controller' state.sls nova -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: Check nova service-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
- skip_fail: false
-
-
-- description: Install cinder
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@cinder:controller' state.sls cinder -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check cinder list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install neutron service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:server' state.sls neutron -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Workaround for PROD-21105
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' grains.set noservices True
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install neutron on gtw node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@neutron:gateway' state.sls neutron
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# install designate
-- description: Install bind
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@bind:server' state.sls bind
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Install designate
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@designate:server' state.sls designate -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Check neutron agent-list
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-- description: Install heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@heat:server' state.sls heat -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check heat service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 10}
- skip_fail: false
-
-
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
+# Deploy nginx before openstack services (PROD-22740)
- description: Deploy nginx proxy
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@nginx:server' state.sls nginx
@@ -185,6 +23,19 @@
retry: {count: 1, delay: 5}
skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
# Install compute node
@@ -200,6 +51,12 @@
retry: {count: 1, delay: 5}
skip_fail: true
+- description: Restart libvirtd on compute nodes (PROD-23034)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' service.restart libvirtd
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
'ip a'
@@ -207,8 +64,7 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
+# Upload cirros image
- description: Upload cirros image on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index e625ef2..67695d9 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -57,3 +57,19 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+# WORKAROUND PROD-21071
+- description: Set correct pin for openstack repository
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin: release l=mitaka/Pin: release l=trusty\/openstack\/mitaka testing/g' /etc/apt/preferences.d/mirantis_openstack"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# WORKAROUND PROD-22827
+- description: Set Pin-Priority up to 1200 for openstack repository
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run "sed -i -e 's/Pin-Priority: 1100/Pin-Priority: 1200/g' /etc/apt/preferences.d/mirantis_openstack"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 11cb56a..7f01cf5 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -15,6 +15,7 @@
import pytest
import netaddr
import os
+import json
from tcp_tests import logger
from tcp_tests import settings
@@ -27,6 +28,10 @@
class TestMCPK8sActions(object):
"""Test class for different k8s actions"""
+ def __read_testdata_yaml(self, name):
+ dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ return read_yaml_file(dir, name)
+
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
@@ -236,9 +241,8 @@
namespace="kube-system", name_prefix="kube-flannel-") > 0
show_step(3)
- data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
flannel_pod = k8s_deployed.api.pods.create(
- body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+ body=self.__read_testdata_yaml('pod-sample-flannel.yaml'))
flannel_pod.wait_running()
show_step(4)
@@ -248,7 +252,7 @@
show_step(5)
calico_pod = k8s_deployed.api.pods.create(
- body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+ body=self.__read_testdata_yaml('pod-sample-calico.yaml'))
calico_pod.wait_running()
show_step(6)
@@ -258,7 +262,7 @@
show_step(7)
multicni_pod = k8s_deployed.api.pods.create(
- body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+ body=self.__read_testdata_yaml('pod-sample-multicni.yaml'))
multicni_pod.wait_running()
show_step(8)
@@ -271,7 +275,7 @@
show_step(9)
nocni_pod = k8s_deployed.api.pods.create(
- body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+ body=self.__read_testdata_yaml('pod-sample.yaml'))
nocni_pod.wait_running()
show_step(10)
@@ -306,3 +310,78 @@
calico_pod.delete()
multicni_pod.delete()
nocni_pod.delete()
+
+ @pytest.mark.grap_versions
+ @pytest.mark.fail_snapshot
+ def test_k8s_dashboard(self, show_step, config,
+ salt_deployed, k8s_deployed):
+ """Test dashboard setup
+
+ Scenario:
+ 1. Setup Kubernetes cluster
+ 2. Try to curl login status api
+ 3. Create a test-admin-user account
+ 4. Try to login in dashboard using test-admin-user account
+ 5. Get and check list of namespaces using dashboard api
+ """
+ show_step(1)
+
+ show_step(2)
+ system_ns = 'kube-system'
+ dashboard_service = \
+ k8s_deployed.api.services.get('kubernetes-dashboard', system_ns)
+ dashboard_url = 'https://{}'.format(dashboard_service.get_ip())
+
+ def dashboard_curl(url, data=None, headers=None):
+ """ Using curl command on controller node. Alternatives:
+ - connect_{get,post}_namespaced_service_proxy_with_path -
+ k8s lib does not provide way to pass headers or POST data
+ - raw rest k8s api - need to auth somehow
+ - new load-balancer svc for dashboard + requests python lib -
+ requires working metallb or other load-balancer
+ """
+ args = ['--insecure']
+ for name in headers or {}:
+ args.append('--header')
+ args.append("{0}: {1}".format(name, headers[name]))
+ if data is not None:
+ args.append('--data')
+ args.append(data)
+ return ''.join(k8s_deployed.curl(dashboard_url + url, *args))
+
+ assert 'tokenPresent' in \
+ json.loads(dashboard_curl('/api/v1/login/status'))
+
+ show_step(3)
+ account = k8s_deployed.api.serviceaccounts.create(
+ namespace=system_ns,
+ body=self.__read_testdata_yaml('test-admin-user-account.yaml'))
+ account.wait_secret_generation()
+
+ k8s_deployed.api.clusterrolebindings.create(
+ body=self.__read_testdata_yaml(
+ 'test-admin-user-cluster-role-bind.yaml'))
+
+ account_secret = account.read().secrets[0]
+ account_token = k8s_deployed.api.secrets.get(
+ namespace=system_ns, name=account_secret.name).read().data['token']
+
+ show_step(4)
+ csrf_token = \
+ json.loads(dashboard_curl('/api/v1/csrftoken/login'))['token']
+ login_headers = {'X-CSRF-TOKEN': csrf_token,
+ 'Content-Type': 'application/json'}
+ jwe_token = json.loads(dashboard_curl(
+ '/api/v1/login', headers=login_headers,
+ data=json.dumps({'token': account_token.decode('base64')})
+ ))['jweToken']
+ headers = {'jweToken': jwe_token}
+
+ show_step(5)
+ dashboard_namespaces = json.loads(
+ dashboard_curl('/api/v1/namespace', headers=headers))['namespaces']
+
+ namespaces_names_list = \
+ [ns.name for ns in k8s_deployed.api.namespaces.list()]
+ for namespace in dashboard_namespaces:
+ assert namespace['objectMeta']['name'] in namespaces_names_list
diff --git a/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml b/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml
new file mode 100644
index 0000000..889fa48
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/test-admin-user-account.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: test-admin-user
+ namespace: kube-system
diff --git a/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml b/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml
new file mode 100644
index 0000000..cad78ce
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/test-admin-user-cluster-role-bind.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: test-admin-user
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- kind: ServiceAccount
+ name: test-admin-user
+ namespace: kube-system