Add 'k8s chain update' test
Change-Id: Ica0cdf83e3a3b3a74e1eef8557d7064e97db1967
Related-PROD: PROD-17808
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index fbfe644..7ec1a19 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -167,3 +167,28 @@
k8s_deployed.download_k8s_logs(files)
request.addfinalizer(test_fin)
+
+
+@pytest.fixture(scope='function')
+def k8s_chain_update_log_helper(request, config, k8s_deployed):
+ def test_fin():
+ if hasattr(request.node, 'rep_call') and \
+ (request.node.rep_call.passed or request.node.rep_call.failed):
+
+ chain_versions = config.k8s.k8s_update_chain.split(" ")
+ for version in chain_versions:
+ container_name = "k8s-conformance:{}".format(version)
+ tmp_report_dir = "/root/report_{}".format(version)
+ report_path = "/root/report_{}.xml".format(version)
+ conformance_log_path = "k8s_conformance_{}.log".format(version)
+
+ k8s_deployed.extract_file_to_node(
+ system='docker', container=container_name,
+ out_dir=tmp_report_dir, file_path='report'
+ )
+ k8s_deployed.combine_xunit(tmp_report_dir, report_path)
+
+ k8s_deployed.download_k8s_logs(
+ [report_path, conformance_log_path])
+
+ request.addfinalizer(test_fin)
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 7353d6b..1a6144f 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -342,14 +342,15 @@
for pod in self.get_running_pods(pod_name, namespace)]
return sum(pods)
- def run_conformance(self, timeout=60 * 60):
+ def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
+ raise_on_err=True):
with self.__underlay.remote(
node_name=self.ctl_host) as remote:
result = remote.check_call(
"set -o pipefail; docker run --net=host -e API_SERVER="
- "'http://127.0.0.1:8080' {} | tee k8s_conformance.log".format(
- self.__config.k8s.k8s_conformance_image),
- timeout=timeout)['stdout']
+ "'http://127.0.0.1:8080' {0} | tee {1}".format(
+ self.__config.k8s.k8s_conformance_image, log_out),
+ timeout=timeout, raise_on_err=raise_on_err)['stdout']
return result
def get_k8s_masters(self):
@@ -393,7 +394,7 @@
node_name=self.ctl_host) as remote:
result = remote.check_call(
"kubectl get svc {0} -n {1} | "
- "awk '{{print $2}}' | tail -1".format(name, namespace)
+ "awk '{{print $3}}' | tail -1".format(name, namespace)
)
return result['stdout'][0].strip()
@@ -403,6 +404,17 @@
node_name=self.ctl_host) as remote:
remote.check_call("nslookup {0} {1}".format(host, src))
+ @retry(300, exception=DevopsCalledProcessError)
+ def curl(self, url):
+ """
+ Run curl on controller and return stdout
+
+ :param url: url to curl
+ :return: response string
+ """
+ with self.__underlay.remote(node_name=self.ctl_host) as r:
+ return r.check_call("curl -s -S \"{}\"".format(url))['stdout']
+
# ---------------------------- Virtlet methods -------------------------------
def install_jq(self):
"""Install JQuery on node. Required for changing yamls on the fly.
@@ -553,7 +565,9 @@
def extract_file_to_node(self, system='docker',
container='virtlet',
- file_path='report.xml', **kwargs):
+ file_path='report.xml',
+ out_dir='.',
+ **kwargs):
"""
Download file from docker or k8s container to node
@@ -561,12 +575,13 @@
:param container: Full name of part of name
:param file_path: File path in container
:param kwargs: Used to control pod and namespace
+ :param out_dir: Output directory
:return:
"""
with self.__underlay.remote(
node_name=self.ctl_host) as remote:
if system is 'docker':
- cmd = ("docker ps --all | grep {0} |"
+ cmd = ("docker ps --all | grep \"{0}\" |"
" awk '{{print $1}}'".format(container))
result = remote.check_call(cmd, raise_on_err=False)
if result['stdout']:
@@ -576,14 +591,15 @@
return
cmd = "docker start {}".format(container_id)
remote.check_call(cmd, raise_on_err=False)
- cmd = "docker cp {0}:/{1} .".format(container_id, file_path)
+ cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
+ container_id, file_path, out_dir)
remote.check_call(cmd, raise_on_err=False)
else:
# system is k8s
pod_name = kwargs.get('pod_name')
pod_namespace = kwargs.get('pod_namespace')
- cmd = 'kubectl cp {0}/{1}:/{2} .'.format(
- pod_namespace, pod_name, file_path)
+ cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
+ pod_namespace, pod_name, file_path, out_dir)
remote.check_call(cmd, raise_on_err=False)
def download_k8s_logs(self, files):
@@ -595,8 +611,8 @@
master_host = self.__config.salt.salt_master_host
with self.__underlay.remote(host=master_host) as r:
for log_file in files:
- cmd = "rsync -r {0}:/root/{1} /root/".format(self.ctl_host,
- log_file)
+ cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
+ self.ctl_host, log_file)
r.check_call(cmd, raise_on_err=False)
LOG.info("Downloading the artifact {0}".format(log_file))
r.download(destination=log_file, target=os.getcwd())
@@ -653,3 +669,38 @@
# how possible apply fixture arg dynamically from test.
rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
remote.check_call(rename_tar, raise_on_err=False)
+
+ def update_k8s_images(self, tag):
+ """
+ Update k8s images tag version in cluster meta and apply required
+ for update states
+
+ :param tag: New version tag of k8s images
+ :return:
+ """
+ master_host = self.__config.salt.salt_master_host
+
+ def update_image_tag_meta(config, image_name):
+ image_old = config.get(image_name)
+ image_base = image_old.split(':')[0]
+ image_new = "{}:{}".format(image_base, tag)
+ LOG.info("Changing k8s '{0}' image cluster meta to '{1}'".format(
+ image_name, image_new))
+
+ with self.__underlay.remote(host=master_host) as r:
+ cmd = "salt-call reclass.cluster_meta_set" \
+ " name={0} value={1}".format(image_name, image_new)
+ r.check_call(cmd)
+ return image_new
+
+ cfg = self.__config
+
+ update_image_tag_meta(cfg.k8s_deploy, "kubernetes_hyperkube_image")
+ update_image_tag_meta(cfg.k8s_deploy, "kubernetes_pause_image")
+ cfg.k8s.k8s_conformance_image = update_image_tag_meta(
+ cfg.k8s, "k8s_conformance_image")
+
+ steps_path = cfg.k8s_deploy.k8s_update_steps_path
+ update_commands = self.__underlay.read_template(steps_path)
+ self.execute_commands(
+ update_commands, label="Updating kubernetes to '{}'".format(tag))
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 9082301..7abbe2a 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -66,6 +66,9 @@
_default_k8s_steps = pkg_resources.resource_filename(
__name__, 'templates/{0}/k8s.yaml'.format(
settings.LAB_CONFIG_NAME))
+_default_k8s_update_steps = pkg_resources.resource_filename(
+ __name__, 'templates/{0}/k8s-update.yaml'.format(
+ settings.LAB_CONFIG_NAME))
_default_net_mgm = os.environ.get("NET_MGMT", "admin-pool01")
@@ -285,6 +288,9 @@
ct.Cfg('k8s_steps_path', ct.String(),
help="Path to YAML with steps to deploy Kubernetes",
default=_default_k8s_steps),
+ ct.Cfg('k8s_update_steps_path', ct.String(),
+ help="Path to YAML with steps to update Kubernetes",
+ default=_default_k8s_update_steps),
ct.Cfg('kubernetes_admin_user', ct.String(), default='admin'),
ct.Cfg('kubernetes_admin_password', ct.String(),
default='sbPfel23ZigJF3Bm'),
@@ -346,7 +352,9 @@
default=False),
ct.Cfg('k8s_conformance_image', ct.String(),
default='docker-prod-virtual.docker.mirantis.net/mirantis/'
- 'kubernetes/k8s-conformance:v1.8.13-11')
+ 'kubernetes/k8s-conformance:v1.8.13-11'),
+ ct.Cfg('k8s_update_chain', ct.String(),
+ default='v1.9.8-4 v1.10.3-3')
]
_group_opts = [
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
new file mode 100644
index 0000000..1f27c73
--- /dev/null
+++ b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
@@ -0,0 +1,30 @@
+{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CTL01 with context %}
+
+# Kubernetes
+- description: Update hypercube image
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:pool' state.sls kubernetes.pool
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- description: Run whole master to check consistency
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# TODO: sync with PROD-20441
+- desciption: Restart kube-apiserver
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:control' service.restart kube-apiserver
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+- desciption: Print kubernetes version
+ cmd: kubectl version
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 30dcba1..a8dbe72 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -77,3 +77,53 @@
show_step(1)
k8s_deployed.start_k8s_cncf_verification()
+
+ @pytest.mark.grap_versions
+ @pytest.mark.fail_snapshot
+ def test_k8s_chain_update(self, show_step, underlay, config, k8s_deployed,
+ k8s_chain_update_log_helper):
+ """Test for chain-upgrading k8s hypercube pool and checking it
+
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Setup Kubernetes cluster
+ 5. Run and expose sample test service
+ 6. Run conformance to check consistency
+ 7. For every version in update chain:
+ Update cluster to new version, check test sample service
+ availability, run conformance
+ """
+
+ deployment_name = 'test-dep-chain-upgrade'
+
+ show_step(5)
+ k8s_deployed.kubectl_run(
+ deployment_name, 'gcr.io/google-samples/node-hello:1.0', '8080')
+ k8s_deployed.kubectl_expose(
+ 'deployment', deployment_name, '8080', 'ClusterIP')
+ sample_service_ip = k8s_deployed.get_svc_ip(deployment_name, 'default')
+ k8s_deployed.wait_deploy_ready(deployment_name)
+
+ def check_is_test_service_available():
+ assert "Hello Kubernetes!" in k8s_deployed.curl(
+ "http://{}:{}".format(sample_service_ip, 8080))
+
+ check_is_test_service_available()
+
+ show_step(6)
+ k8s_deployed.run_conformance(log_out="k8s_conformance.log")
+
+ show_step(7)
+ chain_versions = config.k8s.k8s_update_chain.split(" ")
+ for version in chain_versions:
+ LOG.info("Chain update to '{}' version".format(version))
+ k8s_deployed.update_k8s_images(version)
+
+ LOG.info("Checking test service availability")
+ check_is_test_service_available()
+
+ LOG.info("Running conformance on {} version".format(version))
+ log_name = "k8s_conformance_{}.log".format(version)
+ k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)