Merge "Implement automated tests for MAAS backup/restore"
diff --git a/.gitignore b/.gitignore
index 52f9578..775d920 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,11 +1,13 @@
*.py[cod]
-
+.git
# C extensions
*.so
# Packages
*.egg
*.egg-info
+__pycache__
+.pytest_cache/
dist
build
include
@@ -20,6 +22,7 @@
local
lib64
venv
+venv27
MANIFEST
TAGS
diff --git a/tcp_tests/tests/system/test_update.py b/tcp_tests/tests/system/test_mcp_update.py
similarity index 64%
rename from tcp_tests/tests/system/test_update.py
rename to tcp_tests/tests/system/test_mcp_update.py
index 1493fd8..67499b8 100644
--- a/tcp_tests/tests/system/test_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -1,11 +1,64 @@
import pytest
+import sys
+import os
from tcp_tests import logger
from tcp_tests import settings
+sys.path.append(os.getcwd())
+try:
+ from tcp_tests.fixtures import config_fixtures
+ from tcp_tests.managers import underlay_ssh_manager
+ from tcp_tests.managers import saltmanager as salt_manager
+except ImportError:
+ print("ImportError: Run the application from the tcp-qa directory or "
+ "set the PYTHONPATH environment variable to directory which contains"
+ " ./tcp_tests")
+ sys.exit(1)
LOG = logger.logger
+def has_only_similar(values_by_nodes):
+ """
+ :param values_by_nodes: dict
+ :return: bool, True if all items in the dict have similar values
+ """
+ values = list(values_by_nodes.values())
+ return all(value == values[0] for value in values)
+
+
+def get_control_plane_targets():
+ config = config_fixtures.config()
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+ saltmanager = salt_manager.SaltManager(config, underlay)
+
+ targets = saltmanager.run_state(
+ "I@keystone:server", 'test.ping')[0]['return'][0].keys()
+ targets += saltmanager.run_state(
+ "I@nginx:server and not I@salt:master",
+ "test.ping")[0]['return'][0].keys()
+
+ # TODO: add check for Manila existence
+ # # Commented to avoid fails during OpenStack updates.
+ # # Anyway we don't have deployments with Manila yet
+ # targets.append('share*')
+ # TODO: add check for Tenant Telemetry existence
+ targets.append('mdb*')
+ # TODO: add check for Barbican existence
+ targets.append('kmn*')
+ return targets
+
+
+@pytest.fixture
+def switch_to_proposed_pipelines(reclass_actions, salt_actions):
+ reclass_actions.add_key(
+ "parameters._param.jenkins_pipelines_branch",
+ "release/proposed/2019.2.0",
+ "cluster/*/infra/init.yml"
+ )
+ salt_actions.enforce_state("I@jenkins:client", "jenkins.client")
+
+
class TestUpdateMcpCluster(object):
"""
Following the steps in
@@ -16,7 +69,7 @@
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_mcp_update
def test_update_drivetrain(self, salt_actions, drivetrain_actions,
- show_step, _):
+ show_step, _, switch_to_proposed_pipelines):
"""Updating DriveTrain component to release/proposed/2019.2.0 version
Scenario:
@@ -25,7 +78,7 @@
3. Run job git-mirror-downstream-pipeline-library
4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
- Duration: ~35 min
+ Duration: ~70 min
"""
salt = salt_actions
dt = drivetrain_actions
@@ -66,13 +119,14 @@
job_name = 'upgrade-mcp-release'
job_parameters = {
+ 'GIT_REFSPEC': 'release/proposed/2019.2.0',
'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
'TARGET_MCP_VERSION': '2019.2.0'
}
update_drivetrain = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters,
- build_timeout=3600)
+ build_timeout=90*60)
assert update_drivetrain == 'SUCCESS'
@@ -94,16 +148,6 @@
reclass = reclass_actions
dt = drivetrain_actions
- def has_only_similar(param_by_nodes):
- """
- :param param_by_nodes: dict
- :return: bool, True if all items in the dict have similar keys
- """
- params = list(param_by_nodes.values())
-
- def are_similar(x): return x == params[0]
-
- return all(map(are_similar, params)),
# ############## Change reclass ######################################
show_step(1)
reclass.add_key(
@@ -204,32 +248,32 @@
On each OpenStack controller node, modify the neutron.conf file
Restart the neutron-server service
"""
- def comment_line(node, file, word):
+ def comment_line(node, file_name, word):
"""
Adds '#' before the specific line in specific file
:param node: string, salt target of node where the file locates
- :param file: string, full path to the file
+ :param file_name: string, full path to the file
:param word: string, the begin of line which should be commented
:return: None
"""
salt_actions.cmd_run(node,
"sed -i 's/^{word}/#{word}/' {file}".
format(word=word,
- file=file))
+ file=file_name))
- def add_line(node, file, line):
+ def add_line(node, file_name, line):
"""
Appends line to the end of file
:param node: string, salt target of node where the file locates
- :param file: string, full path to the file
+ :param file_name: string, full path to the file
:param line: string, line that should be added
:return: None
"""
salt_actions.cmd_run(node, "echo {line} >> {file}".format(
line=line,
- file=file))
+ file=file_name))
neutron_conf = '/etc/neutron/neutron.conf'
neutron_server = "I@neutron:server"
@@ -249,10 +293,10 @@
"allow_automatic_l3agent_failover = false")
# ## Apply changed config to the neutron-server service
- salt_actions.cmd_run(neutron_server,
- "service neutron-server restart")
+ result = salt_actions.cmd_run(neutron_server,
+ "service neutron-server restart")
# TODO: add check that neutron-server is up and running
- yield True
+ yield result
# ## Revert file changes
salt_actions.cmd_run(
neutron_server,
@@ -263,16 +307,17 @@
@pytest.fixture
def disable_neutron_agents_for_test(self, salt_actions):
"""
- Restart the neutron-server service
+ Disable the neutron services before the test and
+ enable it after test
"""
- salt_actions.cmd_run("I@neutron:server", """
+ result = salt_actions.cmd_run("I@neutron:server", """
service neutron-dhcp-agent stop && \
service neutron-l3-agent stop && \
service neutron-metadata-agent stop && \
service neutron-openvswitch-agent stop
""")
- yield True
- # Revert file changes
+ yield result
+ #
salt_actions.cmd_run("I@neutron:server", """
service neutron-dhcp-agent start && \
service neutron-l3-agent start && \
@@ -329,3 +374,104 @@
build_timeout=40 * 60
)
assert update_rabbit == 'SUCCESS'
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_ceph(self, salt_actions, drivetrain_actions, show_step, _):
+ """ Updates Ceph to the latest minor version
+
+ Scenario:
+ 1. Add workaround for unhealth Ceph
+ 2. Start ceph-upgrade job with default parameters
+ 3. Check Ceph version for all nodes
+
+ https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update/ceph-update.html
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ # ###################### Add workaround for unhealth Ceph ############
+ show_step(1)
+ salt.cmd_run("I@ceph:radosgw",
+ "ceph config set 'mon pg warn max object skew' 20")
+ # ###################### Start ceph-upgrade pipeline #################
+ show_step(2)
+ job_parameters = {}
+
+ update_ceph = dt.start_job_on_cid_jenkins(
+ job_name='ceph-update',
+ job_parameters=job_parameters)
+
+ assert update_ceph == 'SUCCESS'
+
+ # ########## Verify Ceph version #####################################
+ show_step(3)
+
+ ceph_version_by_nodes = salt.cmd_run(
+ "I@ceph:* and not I@ceph:monitoring and not I@ceph:backup:server",
+ "ceph version")[0]
+
+ assert has_only_similar(ceph_version_by_nodes), ceph_version_by_nodes
+
+
+class TestOpenstackUpdate(object):
+
+ @pytest.mark.grab_versions
+ @pytest.mark.run_mcp_update
+ def test__pre_update__enable_pipeline_job(self,
+ reclass_actions, salt_actions,
+ show_step):
+ """ Enable pipeline in the Drivetrain
+
+ Scenario:
+ 1. Add deploy.update.* classes to the reclass
+ 2. Start jenkins.client salt state
+
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ show_step(1)
+ reclass.add_class("system.jenkins.client.job.deploy.update.upgrade",
+ "cluster/*/cicd/control/leader.yml")
+
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_ovs_gateway",
+ "cluster/*/cicd/control/leader.yml")
+
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_compute",
+ "cluster/*/cicd/control/leader.yml")
+
+ show_step(2)
+ r, errors = salt.enforce_state("I@jenkins:client", "jenkins.client")
+ assert errors is None
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize('target', get_control_plane_targets())
+ @pytest.mark.run_mcp_update
+ def test__update__control_plane(self, drivetrain_actions,
+ switch_to_proposed_pipelines, target):
+ """Start 'Deploy - upgrade control VMs' for specific node
+ """
+ job_parameters = {
+ "TARGET_SERVERS": target,
+ "INTERACTIVE": False}
+ upgrade_control_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
+ job_name="deploy-upgrade-control",
+ job_parameters=job_parameters)
+
+ assert upgrade_control_pipeline == 'SUCCESS'
+
+ @pytest.mark.grab_versions
+ @pytest.mark.run_mcp_update
+ def test__update__data_plane(self, drivetrain_actions):
+ """Start 'Deploy - upgrade OVS gateway'
+ """
+ job_parameters = {
+ "INTERACTIVE": False}
+ upgrade_data_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
+ job_name="deploy-upgrade-ovs-gateway",
+ job_parameters=job_parameters)
+
+ assert upgrade_data_pipeline == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_security_updates.py b/tcp_tests/tests/system/test_security_updates.py
new file mode 100644
index 0000000..db1d7a7
--- /dev/null
+++ b/tcp_tests/tests/system/test_security_updates.py
@@ -0,0 +1,316 @@
+# Copyright 2019 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import json
+import pytest
+
+from devops.helpers import helpers
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class TestUbuntuSecurityUpdates(object):
+ """Test class for verification of obtaining Ubuntu security updates"""
+
+ ENV_NAME = settings.ENV_NAME
+ UPGRADE_CMD = (
+ 'export DEBIAN_FRONTEND=noninteractive && '
+ 'apt-get update && '
+ 'apt-get -y upgrade && '
+ 'apt-get -y -o Dpkg::Options::="--force-confdef" '
+ ' -o Dpkg::Options::="--force-confnew" dist-upgrade'
+ )
+ INST_LINUX_HEADERS_CMD = (
+ "export DEBIAN_FRONTEND=noninteractive && "
+ "apt-get -y install linux-headers-generic"
+ )
+
+ UPDATE_JOB_NAME = "deploy-update-package"
+ UPDATE_JOB_PARAMETERS = {
+ "ASK_CONFIRMATION": False,
+ "TARGET_SERVERS": ''
+ }
+
+ SANITY_JOB_NAME = 'cvp-sanity'
+ SANITY_JOB_PARAMETERS = {
+ 'EXTRA_PARAMS': {
+ 'envs': ["tests_set=-k 'not test_ceph_health'"]
+ }
+ }
+
+ JENKINS_START_TIMEOUT = 60
+
+ def get_available_pkg_updates(self, nodes, salt):
+ """Collect available package updates for given nodes
+
+ :param nodes: list, nodes to collect available updates for
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :return: dict, update candidates for nodes
+ """
+ updates = {}
+ for node in nodes:
+ updates[node] = salt.local(
+ node, "pkg.list_upgrades")['return'][0][node]
+ return updates
+
+ def run_cvp_sanity(self, dt):
+ """A wrapper for executing cvp-sanity pipeline
+
+ :param dt: DrivetrainManager, tcp-qa Drivetrain manager instance
+ :return: str, build execution status of cvp-sanity pipeline
+ """
+ return dt.start_job_on_cid_jenkins(
+ job_name=self.SANITY_JOB_NAME,
+ job_parameters=self.SANITY_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=60 * 15
+ )
+
+ def reboot_hw_node(self, ssh, salt, node):
+ """Reboot the given node and wait for it to start back
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :param node: str, name of the node to reboot
+ """
+ LOG.info("Sending reboot command to '{}' node.".format(node))
+ remote = ssh.remote(node_name=node)
+ remote.execute_async("/sbin/shutdown -r now")
+
+ # Wait for restarted node to boot and become accessible
+ helpers.wait_pass(
+ lambda: salt.local(node, "test.ping", timeout=5),
+ timeout=60 * 10, interval=5)
+
+ # TODO: finish the test once ASK_CONFIRMATION option is added to
+ # 'deploy-update-package' pipeline
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_pipeline
+ def _test_obtaining_ubuntu_security_updates_via_pipeline(
+ self, salt_actions, drivetrain_actions, show_step):
+ """Test obtaining Ubuntu security updates using Jenkins
+
+ Scenario:
+ 1. Collect available package upgrades for nodes of the given server
+ role
+ 2. Execute deploy-update-package pipeline for the given server role
+ 3. Collect available package upgrades for server role nodes again
+ 4. Check that there is no candidates for upgrade
+ 5. Run cvp-sanity tests
+
+ Duration: ~ min
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ role = "mon*"
+ nodes = salt.local(role, "test.ping")['return'][0].keys()
+
+ # Collect available package upgrades for nodes
+ show_step(1)
+ updates = self.get_available_pkg_updates(nodes, salt)
+ LOG.info("Packages to be updated on nodes:\n{}".format(
+ json.dumps(updates, indent=4)))
+
+ # Execute 'deploy-update-package' pipeline to upgrade packages on nodes
+ show_step(2)
+ self.UPDATE_JOB_PARAMETERS["TARGET_SERVERS"] = role
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.UPDATE_JOB_NAME,
+ job_parameters=self.UPDATE_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=60 * 15
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after upgrading packages on {} nodes. "
+ "Please check the build and executed stages.".format(
+ self.UPDATE_JOB_NAME, status, role)
+ )
+
+ # Collect available package upgrades for nodes again
+ show_step(3)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no available package upgrades
+ show_step(4)
+ for node in nodes:
+ assert not post_upgrade[node], (
+ "{} node still has upgrade candidates. Please check the "
+ "following packages and the reason why they are not "
+ "updated:\n{}".format(node, post_upgrade[node])
+ )
+
+ # Execute cvp-sanity tests
+ show_step(5)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity "
+ "tests".format(
+ self.SANITY_JOB_NAME, status)
+ )
+
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_manual_infra_vms
+ def test_obtaining_ubuntu_security_updates_manual_infra_vms(
+ self, salt_actions, drivetrain_actions, show_step):
+ """Test obtaining Ubuntu security updates on virtual infra nodes.
+ Repeat the scenario for 01, 02 and 03 indexes of nodes.
+
+ Scenario:
+ 1. Select set of virtual nodes for upgrade
+ 2. Collect available package upgrades for the nodes
+ 3. Upgrade the nodes
+ 4. Collect available package upgrades for the nodes again
+ 5. Check that there is no candidates for upgrade on the nodes
+ 6. Run cvp-sanity tests
+
+ Duration: ~ 100 min
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ for index in ('01', '02', '03'):
+ msg = ("# Executing scenario for '{i}' index of nodes #".format(
+ i=index))
+ LOG.info(
+ "\n\n{pad}\n{msg}\n{pad}".format(pad="#" * len(msg), msg=msg))
+
+ # Select set of nodes for current iteration of updates
+ show_step(1)
+ tgt = "*{}* and E@^(?!kvm|cfg|cmp|osd).*$".format(index)
+ nodes = salt.local(tgt, "test.ping")['return'][0].keys()
+ LOG.info("Nodes to be upgraded:\n{}".format(
+ json.dumps(nodes, indent=4)))
+
+ # Collect available package upgrades for the nodes
+ show_step(2)
+ updates = self.get_available_pkg_updates(nodes, salt)
+
+ # Upgrade the selected nodes
+ show_step(3)
+ for node in nodes:
+ LOG.info(
+ "Starting upgrade of '{}' node.\nThe following packages "
+ "will be updated:\n{}".format(
+ node, json.dumps(updates[node], indent=4))
+ )
+ salt.cmd_run(node, self.UPGRADE_CMD)
+
+ # Collect available package upgrades for the nodes again
+ show_step(4)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no package upgrades candidates on the nodes
+ show_step(5)
+ missed_upd = {
+ node: pkgs for (node, pkgs) in post_upgrade.items() if pkgs}
+ assert not missed_upd, (
+ "{} nodes still have upgrade candidates. Please check the "
+ "nodes and reason why the listed packages are not "
+ "updated:\n{}".format(
+ missed_upd.keys(), json.dumps(missed_upd, indent=4))
+ )
+
+ # Execute cvp-sanity tests
+ show_step(6)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity smoke "
+ "tests".format(self.SANITY_JOB_NAME, status))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_manual_hw_nodes
+ def test_obtaining_ubuntu_security_updates_manual_hw_nodes(
+ self,
+ salt_actions,
+ underlay_actions,
+ drivetrain_actions,
+ show_step):
+ """Test obtaining Ubuntu security updates on HW nodes.
+ Repeat the scenario for 01, 02 and 03 indexes of nodes.
+
+ Scenario:
+ 1. Select set HW nodes for upgrade
+ 2. Collect available package upgrades for the nodes
+ 3. Upgrade the nodes
+ 4. Collect available package upgrades for the nodes again
+ 5. Check that there is no candidates for upgrade on the nodes
+ 6. Run cvp-sanity tests
+
+ Duration: ~ 70 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ dt = drivetrain_actions
+
+ for index in ('01', '02', '03'):
+ msg = ("# Executing scenario for '{i}' index of nodes #".format(
+ i=index))
+ LOG.info(
+ "\n\n{pad}\n{msg}\n{pad}".format(pad="#" * len(msg), msg=msg))
+
+ # Select set of nodes for current iteration of updates
+ show_step(1)
+ tgt = "E@^(kvm|cmp).?{}.*$".format(index)
+ nodes = salt.local(tgt, "test.ping")['return'][0].keys()
+ LOG.info("Nodes to be upgraded:\n{}".format(
+ json.dumps(nodes, indent=4)))
+
+ # Collect available package upgrades for the nodes
+ show_step(2)
+ updates = self.get_available_pkg_updates(nodes, salt)
+
+ # Upgrade the selected nodes
+ show_step(3)
+ for node in nodes:
+ LOG.info(
+ "Starting upgrade of '{}' node.\nThe following packages "
+ "will be updated:\n{}".format(
+ node, json.dumps(updates[node], indent=4))
+ )
+ salt.cmd_run(node, self.UPGRADE_CMD)
+ # Update Linux headers on compute nodes
+ if "cmp" in node:
+ LOG.info(
+ "Updating linux headers on '{}' node.".format(node))
+ salt.cmd_run(node, self.INST_LINUX_HEADERS_CMD)
+
+ # Reboot the node after upgrade
+ LOG.info("Starting reboot of '{}' node.".format(node))
+ self.reboot_hw_node(ssh, salt, node)
+ LOG.info("'{}' node is back after reboot.".format(node))
+
+ # Collect available package upgrades for the nodes again
+ show_step(4)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no package upgrades candidates on the nodes
+ show_step(5)
+ missed_upd = {
+ node: pkgs for (node, pkgs) in post_upgrade.items() if pkgs}
+ assert not missed_upd, (
+ "{} nodes still have upgrade candidates. Please check the "
+ "nodes and reason why the listed packages are not "
+ "updated:\n{}".format(
+ missed_upd.keys(), json.dumps(missed_upd, indent=4))
+ )
+
+ # Execute cvp-sanity tests
+ show_step(6)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity "
+ "tests".format(self.SANITY_JOB_NAME, status))