Merge "Add zookeeper automation test"
diff --git a/.gitignore b/.gitignore
index 52f9578..775d920 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,11 +1,13 @@
*.py[cod]
-
+.git
# C extensions
*.so
# Packages
*.egg
*.egg-info
+__pycache__
+.pytest_cache/
dist
build
include
@@ -20,6 +22,7 @@
local
lib64
venv
+venv27
MANIFEST
TAGS
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 0e0fdda..ddb82bb 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -215,6 +215,7 @@
def deploy_network_mask = env.DEPLOY_NETWORK_NETMASK ?: ''
def env_ipmi_user = env.IPMI_USER ?: ''
def env_ipmi_pass = env.IPMI_PASS ?: ''
+ def env_cisco_pass = env.CISCO_PASS ?: ''
def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
@@ -240,6 +241,7 @@
string(name: 'DEPLOY_NETWORK_NETMASK', value: "${deploy_network_mask}"),
string(name: 'IPMI_USER', value: env_ipmi_user),
string(name: 'IPMI_PASS', value: env_ipmi_pass),
+ string(name: 'CISCO_PASS', value: env_cisco_pass),
string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
@@ -268,6 +270,7 @@
def mcp_salt_repo_key = env.MCP_SALT_REPO_KEY ?: ''
def env_ipmi_user = env.IPMI_USER ?: ''
def env_ipmi_pass = env.IPMI_PASS ?: ''
+ def env_cisco_pass = env.CISCO_PASS ?: ''
def env_lab_mgm_iface = env.LAB_MANAGEMENT_IFACE ?: ''
def env_lab_ctl_iface = env.LAB_CONTROL_IFACE ?: ''
def update_repo_custom_tag = env.UPDATE_REPO_CUSTOM_TAG ?: ''
@@ -295,6 +298,7 @@
string(name: 'UPDATE_VERSION', value: "${update_version}"),
string(name: 'IPMI_USER', value: env_ipmi_user),
string(name: 'IPMI_PASS', value: env_ipmi_pass),
+ string(name: 'CISCO_PASS', value: env_cisco_pass),
string(name: 'LAB_MANAGEMENT_IFACE', value: env_lab_mgm_iface),
string(name: 'LAB_CONTROL_IFACE', value: env_lab_ctl_iface),
string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
@@ -447,6 +451,7 @@
string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
string(name: 'IPMI_USER', value: env.IPMI_USER),
string(name: 'IPMI_PASS', value: env.IPMI_PASS),
+ string(name: 'CISCO_PASS', value: env.CISCO_PASS),
string(name: 'UPDATE_REPO_CUSTOM_TAG', value: "${update_repo_custom_tag}"),
string(name: 'JENKINS_PIPELINE_BRANCH', value: "${jenkins_pipelines_branch}"),
string(name: 'IMAGE_PATH_CFG01_DAY01', value: env.IMAGE_PATH_CFG01_DAY01),
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index dc45be1..8448169 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -69,6 +69,19 @@
path=short_path
))
+ def get_key(self, key, short_path):
+ """Find a key in a YAML
+
+ :param key: string, parameter to add
+ :param short_path: path to reclass yaml file.
+ It takes into account default path where the reclass is located.
+ May look like cluster/*/cicd/control/leader.yml
+ :return: str, key if found
+ """
+ return self.ssh.check_call(
+ "{reclass_tools} get-key {key} /srv/salt/reclass/classes".format(
+ reclass_tools=self.reclass_tools_cmd, key=key))
+
def add_bool_key(self, key, value, short_path):
"""
Shows alert if key exists
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 2ca0126..0ecef46 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -271,6 +271,14 @@
result = self.local(tgt=tgt, fun='service.stop', args=service)
return result['return']
+ def pkg_install(self, tgt, pkg):
+ result = self.local(tgt=tgt, fun='pkg.install', args=pkg)
+ return result['return']
+
+ def pkg_info_installed(self, tgt, pkg):
+ result = self.local(tgt=tgt, fun='pkg.info_installed', args=pkg)
+ return result['return']
+
def cmd_run(self, tgt, cmd):
result = self.local(tgt=tgt, fun='cmd.run', args=cmd)
return result['return']
@@ -428,11 +436,6 @@
password=jenkins_pass)
)
- def add_cluster_reclass(self, key, value, path):
- # TODO : add reclass tools as a library to tcp-qa
- self.cmd_run('I@salt:master',
- "reclass-tools add-key {key} {value} {path}")
-
def create_env_k8s(self):
"""Creates static utils/env_k8s file"""
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 759a449..e9c2917 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -27,5 +27,5 @@
# For Queens: https://github.com/openstack/requirements/blob/stable/queens/global-requirements.txt
python-heatclient>=1.10.0
python-glanceclient>=2.8.0
-python-openstackclient>=3.12.0
+python-openstackclient==3.18.0
keystoneauth1>=3.3.0
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
index 4e66daa..706e8ab 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
@@ -3,6 +3,7 @@
{% set DOMAIN_NAME='bm-cicd-pike-ovs-maas.local' %}
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
- description: Wait for salt-master is ready after configdrive user-data
cmd: |
@@ -76,3 +77,5 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 6, delay: 5}
skip_fail: false
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
index f73de65..1cefe2f 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
@@ -5,6 +5,7 @@
- openstack_control_leader
- linux_system_codename_xenial
- features_ironic_baremetal_nodes
+ - features_ironic_switch
interfaces:
ens2:
role: single_dhcp
@@ -16,6 +17,7 @@
roles:
- openstack_control
- linux_system_codename_xenial
+ - features_ironic_switch
interfaces:
ens2:
role: single_dhcp
@@ -27,6 +29,7 @@
roles:
- openstack_control
- linux_system_codename_xenial
+ - features_ironic_switch
interfaces:
ens2:
role: single_dhcp
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
index 5f7d480..3d1dbfe 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
@@ -3,6 +3,7 @@
{% set DOMAIN_NAME='bm-cicd-queens-ovs-maas.local' %}
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
- description: Wait for salt-master is ready after configdrive user-data
cmd: |
@@ -76,3 +77,5 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 6, delay: 5}
skip_fail: false
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
index 2d2db12..5aee981 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
@@ -13,6 +13,7 @@
{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+{%- set CISCO_PASS = os_env('CISCO_PASS', 'cisco_pass') %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -48,6 +49,8 @@
sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
sed -i 's/==IPMI_PASS==/${_param:power_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ sed -i 's/==IPMI_USER==/${_param:power_user}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ sed -i 's/==CISCO_PASS==/${_param:cisco_password}/g' /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -61,6 +64,8 @@
reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/maas_machines.yml;
reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
reclass-tools add-key parameters._param.power_password {{ IPMI_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/baremetal_nodes.yml;
+ reclass-tools add-key parameters._param.power_user {{ IPMI_USER }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
+ reclass-tools add-key parameters._param.cisco_password {{ CISCO_PASS }} /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/features/ironic/switch.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
index eb65ef0..f8368a3 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
@@ -5,6 +5,7 @@
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
- description: Wait for salt-master is ready after configdrive user-data
cmd: |
@@ -73,3 +74,4 @@
retry: {count: 6, delay: 5}
skip_fail: false
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
index fa3aa30..0e1931f 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
@@ -5,6 +5,7 @@
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
- description: Wait for salt-master is ready after configdrive user-data
cmd: |
@@ -73,3 +74,4 @@
retry: {count: 6, delay: 5}
skip_fail: false
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
\ No newline at end of file
diff --git a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
index 9bbc435..b22b9f3 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-contrail41-sl/salt.yaml
@@ -5,6 +5,7 @@
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -12,3 +13,4 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
diff --git a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
index 4bc1252..fad7ca6 100644
--- a/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
+++ b/tcp_tests/templates/released-heat-cicd-pike-dvr-sl/salt.yaml
@@ -5,6 +5,7 @@
# Other salt model repository parameters see in shared-salt.yaml
{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -13,3 +14,5 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
{{SHARED.MACRO_IPFLUSH_TENANTS_IFACES()}}
+
+{{SHARED_TEST_TOOLS.MACRO_INSTALL_RECLASS_TOOLS()}}
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index 4e9c46a..f351ba3 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import itertools
import pytest
from devops.helpers import helpers
@@ -27,107 +28,123 @@
"""Test class for testing backup restore of master node"""
ENV_NAME = settings.ENV_NAME
-
BCKP_SERVER_DIR = "/srv/volumes/backup/backupninja"
- RECLASS_DIR = "/srv/salt/reclass"
- FILES_TO_DELETE = [
- "nodes/_generated/log02.{}.local.yml".format(ENV_NAME),
- "classes/cluster/{}/stacklight/telemetry.yml".format(ENV_NAME),
- "classes/service/barbican",
- "classes/system/prometheus/alertmanager/container.yml"
- ]
- FILES_TO_UPDATE = [
- "nodes/_generated/mtr02.{}.local.yml".format(ENV_NAME),
- "classes/cluster/{}/ceph/rgw.yml".format(ENV_NAME),
- "classes/system/grafana/client/single.yml"
- ]
- BACKUP_JOB_NAME = 'backupninja_backup'
+ # Salt master backup/restore related data
+ SM_DIRS = ["/srv/salt/reclass", "/etc/pki/ca", "/etc/salt/pki"]
+ SM_FILE_TO_DELETE = "sm_file_to_delete.txt"
+ SM_FILE_TO_UPDATE = "sm_file_to_update.txt"
+ SM_FLAG_FILES = ["/srv/salt/master-restored", "/srv/salt/minion-restored"]
+ SM_BACKUP_DIRS = ["etc/pki", "etc/salt", "srv/salt"]
+ SM_YAML = "cluster/*/infra/config/init.yml"
+
+ # MAAS backup/restore related data
+ MAAS_DIRS = ["/var/lib/maas", "/etc/maas"]
+ MAAS_FILE_TO_DELETE = "maas_file_to_delete.txt"
+ MAAS_FILE_TO_UPDATE = "maas_file_to_update.txt"
+ MAAS_FLAG_FILES = ["/root/maas/flags/*"]
+ MAAS_BACKUP_DIRS = ["etc/maas", "var/backups/postgresql", "var/lib/maas"]
+ MAAS_SERVICES = ["maas-dhcpd", "maas-proxy", "maas-rackd", "maas-regiond"]
+ MAAS_YAML = "cluster/*/infra/maas.yml"
+
+ # Jenkins pipeline data
+ BACKUP_JOB_NAME = "backupninja_backup"
BACKUP_JOB_PARAMETERS = {
- "ASK_CONFIRMATION": False
+ "ASK_CONFIRMATION": False,
+ "BACKUP_DOGTAG": False,
+ "BACKUP_SALTMASTER_AND_MAAS": True
}
- RESTORE_JOB_NAME = 'backupninja_restore'
+ RESTORE_JOB_NAME = "backupninja_restore"
JENKINS_START_TIMEOUT = 60
JENKINS_BUILD_TIMEOUT = 60 * 30
@pytest.fixture
- def delete_backup(self, underlay_actions, salt_actions):
- """Remove Salt master backup and/or restore flag files
+ def cleanup_actions(self, underlay_actions, salt_actions, reclass_actions):
+ """Cleanup/restore actions for backup/restore scenarios
- If exists, remove existing backup(s) form backup server.
- If exists, remove '/srv/salt/master-restored' and
- '/srv/salt/minion-restored' flag files, which indicate that Salt master
+ - If exists, remove flag files, which indicate that
backup restore procedure has already been executed.
-
- Execute cleanup before a test (to prepare clean environment) and after
- the test (to not affect any later activities on the environment).
+ - Set backup schedule to default (1.00 AM) value.
:param underlay_actions: UnderlaySSHManager, tcp-qa SSH manager
instance
:param salt_actions: SaltManager, tcp-qa Salt manager instance
+ :param reclass_actions: ReclassManager, tcp-qa Reclass-tools manager
"""
- client = salt_actions.local(
- "I@backupninja:client", "test.ping")['return'][0].keys()[0]
+ sm = salt_actions.local(
+ "I@salt:master", "test.ping")['return'][0].keys()[0]
server = salt_actions.local(
"I@backupninja:server", "test.ping")['return'][0].keys()[0]
+ flag_files = self.SM_FLAG_FILES + self.MAAS_FLAG_FILES
- def cleanup(underlay_actions, server, client):
- # Delete backups, if any, from backup server
- path = "{}/{}".format(self.BCKP_SERVER_DIR, client)
- underlay_actions.check_call(
- "rm -rf {}".format(path), node_name=server, raise_on_err=False)
-
- # Delete restore flag files from backup client, if exist
- for f in ("minion-restored", "master-restored"):
+ def cleanup(underlay_actions, server, sm, flag_files):
+ # Delete restore flag files from Salt master, if exist
+ for f in flag_files:
underlay_actions.check_call(
- "rm /srv/salt/{}".format(f),
- node_name=client,
+ "rm -rf {}".format(f),
+ node_name=sm,
raise_on_err=False)
- cleanup(underlay_actions, server, client)
+ cleanup(underlay_actions, server, sm, flag_files)
yield
- cleanup(underlay_actions, server, client)
+ cleanup(underlay_actions, server, sm, flag_files)
- def check_salt_master_backup(self, ssh, server, path, client):
+ # Change backup schedule to default values
+ for path in (self.MAAS_YAML, self.SM_YAML):
+ reclass_actions.add_key(
+ "parameters.backupninja.client.backup_times.hour",
+ "\"'1'\"",
+ path)
+ reclass_actions.add_key(
+ "parameters.backupninja.client.backup_times.minute",
+ "\"'0'\"",
+ path)
+
+ def check_backup(self, ssh, server, path, client_name, dirs):
"""Check that data directories exist in backup on backup server
:param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
:param server: string, backup server node where backup is stored
:param path: string, path to backupninja inventory of backups on server
- :param client: string, backup client node name, which indicates the
- name of backup on backup server
+ :param client_name: string, backup client node name, which indicates
+ the name of backup on backup server
+ :param dirs: list, list of data directories of the backup on server
"""
- for subdir in ("etc", "srv", "var"):
- cmd = "test -d {}/{}/{}".format(path, client, subdir)
+ for d in dirs:
+ cmd = "test -d {}/{}/{}".format(path, client_name, d)
result = ssh.check_call(
cmd, node_name=server, raise_on_err=False)['exit_code']
assert result == ExitCodes.EX_OK, (
- "'{}' data from Salt master is not in backup.".format(subdir))
+ "'{}' data from {} is not in backup.".format(d, client_name))
- def delete_reclass_files(self, ssh, client):
- """Delete several reclass files
+ def delete_files(self, ssh, client, base_dirs, file_to_delete):
+ """Delete files from the given location
:param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
:param client: string, backup client node where files are deleted
+ :param base_dirs: list, directories from where to delete the given file
+ :param file_to_delete: string, name of the file to be deleted
"""
- files_to_delete = " ".join(self.FILES_TO_DELETE)
- ssh.check_call(
- "cd {}; rm {}".format(self.RECLASS_DIR, files_to_delete),
- node_name=client,
- raise_on_err=False)
+ for base_dir in base_dirs:
+ ssh.check_call(
+ "rm {}/{}".format(base_dir, file_to_delete),
+ node_name=client,
+ raise_on_err=False)
- def update_reclass_files(self, ssh, client):
- """Update several reclass files
+ def update_files(self, ssh, client, base_dirs, file_to_update):
+ """Update given files
:param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
:param client: string, backup client node where files are updated
+ :param base_dirs: list, directories where to update the given file
+ :param file_to_update: list, name of the file to be updated
:return: dict, key-value pairs of files and their hashes before update
"""
hashes = {}
- for f in self.FILES_TO_UPDATE:
- path = "{}/{}".format(self.RECLASS_DIR, f)
+ for base_dir in base_dirs:
+ path = "{}/{}".format(base_dir, file_to_update)
# Calculate hash of a file
- hashes[f] = ssh.check_call(
+ hashes[path] = ssh.check_call(
"sha1sum {} | awk '{{print $1}}'".format(path),
node_name=client,
raise_on_err=False)['stdout']
@@ -138,49 +155,141 @@
raise_on_err=False)
return hashes
- def update_backup_schedule(self, reclass):
+ def update_backup_schedule(self, reclass, path):
"""Update backup schedule on backupninja client
:param reclass: ReclassManager, tcp-qa Reclass-tools manager
+ :param path: str, path to YAML file to update
"""
- path = "cluster/*/infra/config/init.yml"
reclass.add_bool_key("parameters.backupninja.enabled", "True", path)
reclass.add_key(
+ "parameters.backupninja.client.backup_times.hour",
+ "\"'*'\"",
+ path)
+ reclass.add_key(
"parameters.backupninja.client.backup_times.minute",
"\"'*/10'\"",
path)
- def verify_restored_data(self, ssh, client, hashes):
- """Verify restore of deleted and updated reclass files
+ def _precreate_test_files(self, salt, ssh, base_dirs, test_files):
+ """Prepare test files for scenarios
+
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param base_dirs: list, list of paths
+ :param test_files: list, list of file names - test files to be created
+ """
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ paths = list(itertools.product(base_dirs, list(test_files)))
+ for base_dir, filename in paths:
+ ssh.check_call(
+ "echo 'Test file' > {}/{}".format(base_dir, filename),
+ node_name=sm,
+ raise_on_err=False)
+
+ @pytest.fixture
+ def maas_test_setup(self, underlay_actions, salt_actions, reclass_actions):
+ """Setup for MAAS backup/restore tests
+
+ :param underlay_actions: UnderlaySSHManager, tcp-qa SSH manager
+ instance
+ :param salt_actions: SaltManager, tcp-qa Salt manager instance
+ :param reclass_actions: ReclassManager, tcp-qa Reclass-tools manager
+ """
+ # Check if 'postgresql-client-9.6' package is installed on backupninja
+ # client node. Install if necessary.
+ postgresql_pkg = "postgresql-client"
+ postgresql_pkg_ver = "9.6"
+ sm = salt_actions.local(
+ "I@salt:master", "test.ping")['return'][0].keys()[0]
+
+ result = salt_actions.local(
+ sm, "pkg.info_installed", postgresql_pkg)['return'][0]
+ installed_ver = result[sm][postgresql_pkg].get('version')
+ if not(installed_ver and postgresql_pkg_ver in installed_ver):
+ pkg = "{pkg},{pkg}-{ver}".format(
+ pkg=postgresql_pkg, ver=postgresql_pkg_ver)
+ salt_actions.local(sm, "pkg.install", pkg)
+
+ # Precreate test files for MAAS backup/restore test scenarios
+ self._precreate_test_files(
+ salt_actions,
+ underlay_actions,
+ self.MAAS_DIRS,
+ [self.MAAS_FILE_TO_DELETE, self.MAAS_FILE_TO_UPDATE])
+
+ # Enable MAAS restore in reclass
+ restore_class = "system.maas.region.restoredb"
+ if restore_class not in reclass_actions.get_key(
+ "classes", self.MAAS_YAML):
+ reclass_actions.add_class(restore_class, self.MAAS_YAML)
+
+ @pytest.fixture
+ def precreate_sm_test_files(self, underlay_actions, salt_actions):
+ """Create test files before executing Salt Master backup
+
+ :param underlay_actions: UnderlaySSHManager, tcp-qa SSH manager
+ instance
+ :param salt_actions: SaltManager, tcp-qa Salt manager instance
+ """
+ self._precreate_test_files(
+ salt_actions,
+ underlay_actions,
+ self.SM_DIRS,
+ [self.SM_FILE_TO_DELETE, self.SM_FILE_TO_UPDATE])
+
+ def verify_restored_data(
+ self, ssh, client, base_dirs, deleted_file, updated_file, hashes):
+ """Verify restore of deleted/updated files
:param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
:param client: string, backup client node where files are updated
+ :param deleted_file: string, name of the deleted file
+ :param updated_file: string, name of the updated file
:param hashes: dict, key-value pairs of files and their hashes
before update
"""
- # Verify that deleted files are restored
- for f in self.FILES_TO_DELETE:
- path = "{}/{}".format(self.RECLASS_DIR, f)
+ for base_dir in base_dirs:
+ # Verify that deleted files are restored
+ path = "{}/{}".format(base_dir, deleted_file)
result = ssh.check_call(
"test -f {}".format(path),
node_name=client,
raise_on_err=False)['exit_code']
assert result == ExitCodes.EX_OK, (
- "'{}' data is not in restored on Salt master.".format(path))
- # Verify that changed files are restored
- for f in self.FILES_TO_UPDATE:
- path = "{}/{}".format(self.RECLASS_DIR, f)
+ "'{}' data is not in restored on {}.".format(path, client))
+
+ # Verify that changed files are reverted
+ path = "{}/{}".format(base_dir, updated_file)
f_hash = ssh.check_call(
"sha1sum {} | awk '{{print $1}}'".format(path),
node_name=client,
raise_on_err=False)['stdout']
- assert hashes[f] == f_hash, (
- "'{}' data is not in restored on Salt master.".format(path))
+ assert hashes[path] == f_hash, (
+ "'{}' data is not in restored on {}.".format(path, client))
+
+ def get_maas_svc_status(self, salt, client):
+ """Get status of MAAS services
+
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :param client: string, backup client node where files are updated
+ :return: dict, statuses of MAAS services
+ """
+ statuses = {}
+ for svc in self.MAAS_SERVICES:
+ statuses[svc] = salt.service_status(
+ "I@maas:region", svc)[0][client]
+ return statuses
@pytest.mark.grab_versions
@pytest.mark.salt_master_manual_backup_restore
def test_salt_master_manual_backup_restore(
- self, underlay_actions, salt_actions, show_step, delete_backup):
+ self,
+ underlay_actions,
+ salt_actions,
+ show_step,
+ precreate_sm_test_files,
+ cleanup_actions):
"""Test manual backup restore of Salt master data
Scenario:
@@ -197,39 +306,45 @@
salt = salt_actions
ssh = underlay_actions
- backup_client = salt.local(
- "I@backupninja:client", "test.ping")['return'][0].keys()[0]
- backup_server = salt.local(
+ sm = salt.local(
+ "I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
"I@backupninja:server", "test.ping")['return'][0].keys()[0]
# Create backup by moving local files to the 'backupninja' server
show_step(1)
cmd = "backupninja -n --run /etc/backup.d/200.backup.rsync"
- ssh.check_call(
- cmd, node_name=backup_client, raise_on_err=False, timeout=60 * 4)
+ ssh.check_call(cmd, node_name=sm, raise_on_err=False, timeout=60 * 4)
# Verify that backup is created and all pieces of data are rsynced
# to backupninja server
show_step(2)
- self.check_salt_master_backup(
- ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.SM_BACKUP_DIRS)
# Simulate loss/change of some reclass data
show_step(3)
- self.delete_reclass_files(ssh, backup_client)
- hashes = self.update_reclass_files(ssh, backup_client)
+ self.delete_files(ssh, sm, self.SM_DIRS, self.SM_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.SM_DIRS, self.SM_FILE_TO_UPDATE)
# Restore the backup
show_step(4)
ssh.check_call(
"salt-call state.sls salt.master.restore,salt.minion.restore",
- node_name=backup_client,
+ node_name=sm,
raise_on_err=False,
timeout=60 * 4)
# Verify that all pieces of lost/changed data are restored
show_step(5)
- self.verify_restored_data(ssh, backup_client, hashes)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.SM_DIRS,
+ self.SM_FILE_TO_DELETE,
+ self.SM_FILE_TO_UPDATE,
+ hashes)
# Ping minions
show_step(6)
@@ -243,7 +358,8 @@
salt_actions,
drivetrain_actions,
show_step,
- delete_backup):
+ precreate_sm_test_files,
+ cleanup_actions):
"""Test manual backup restore of Salt master data using DT pipeline
Scenario:
@@ -262,9 +378,8 @@
ssh = underlay_actions
dt = drivetrain_actions
- backup_client = salt.local(
- "I@backupninja:client", "test.ping")['return'][0].keys()[0]
- backup_server = salt.local(
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
"I@backupninja:server", "test.ping")['return'][0].keys()[0]
# Execute 'backupninja_backup' pipeline to create a backup
@@ -284,13 +399,14 @@
# Verify that backup is created and all pieces of data are rsynced
# to backupninja server
show_step(2)
- self.check_salt_master_backup(
- ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.SM_BACKUP_DIRS)
# Simulate loss/change of some reclass data
show_step(3)
- self.delete_reclass_files(ssh, backup_client)
- hashes = self.update_reclass_files(ssh, backup_client)
+ self.delete_files(ssh, sm, self.SM_DIRS, self.SM_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.SM_DIRS, self.SM_FILE_TO_UPDATE)
# Restore the backup
show_step(4)
@@ -307,7 +423,13 @@
# Verify that all pieces of lost/changed data are restored
show_step(5)
- self.verify_restored_data(ssh, backup_client, hashes)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.SM_DIRS,
+ self.SM_FILE_TO_DELETE,
+ self.SM_FILE_TO_UPDATE,
+ hashes)
# Ping minions
show_step(6)
@@ -321,7 +443,8 @@
salt_actions,
reclass_actions,
show_step,
- delete_backup):
+ precreate_sm_test_files,
+ cleanup_actions):
"""Test scheduled backup restore of Salt master data
Scenario:
@@ -342,14 +465,13 @@
ssh = underlay_actions
reclass = reclass_actions
- backup_client = salt.local(
- "I@backupninja:client", "test.ping")['return'][0].keys()[0]
- backup_server = salt.local(
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
"I@backupninja:server", "test.ping")['return'][0].keys()[0]
# Re-configure backup schedule
show_step(1)
- self.update_backup_schedule(reclass)
+ self.update_backup_schedule(reclass, self.SM_YAML)
# Apply 'backupninja' state on backupninja client node
show_step(2)
@@ -359,7 +481,7 @@
show_step(3)
helpers.wait_pass(
lambda: ssh.check_call(
- cmd="pgrep backupninja && echo OK", node_name=backup_client),
+ cmd="pgrep backupninja && echo OK", node_name=sm),
timeout=60 * 11,
interval=5)
@@ -367,31 +489,38 @@
show_step(4)
ssh.check_call(
cmd="while pgrep backupninja > /dev/null; do sleep 2; done",
- node_name=backup_client,
+ node_name=sm,
timeout=60 * 5)
# Verify that backup is created and all pieces of data are rsynced
# to backupninja server
show_step(5)
- self.check_salt_master_backup(
- ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.SM_BACKUP_DIRS)
# Simulate loss/change of some reclass data
show_step(6)
- self.delete_reclass_files(ssh, backup_client)
- hashes = self.update_reclass_files(ssh, backup_client)
+ self.delete_files(ssh, sm, self.SM_DIRS, self.SM_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.SM_DIRS, self.SM_FILE_TO_UPDATE)
# Restore the backup
show_step(7)
ssh.check_call(
"salt-call state.sls salt.master.restore,salt.minion.restore",
- node_name=backup_client,
+ node_name=sm,
raise_on_err=False,
timeout=60 * 4)
# Verify that all pieces of lost/changed data are restored
show_step(8)
- self.verify_restored_data(ssh, backup_client, hashes)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.SM_DIRS,
+ self.SM_FILE_TO_DELETE,
+ self.SM_FILE_TO_UPDATE,
+ hashes)
# Ping minions
show_step(9)
@@ -399,6 +528,279 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.maas_backup_restore_manual
+ def test_maas_backup_restore_manual(
+ self,
+ salt_actions,
+ underlay_actions,
+ show_step,
+ maas_test_setup,
+ cleanup_actions):
+ """Test backup and restore of MAAS PostgreSQL DB
+
+ Scenario:
+ 1. Make backup of file permissions for MAAS
+ 2. Compress all MAAS PostgreSQL databases and store locally
+ 3. Move local backup files to backupninja server
+ 4. Verify that MAAS backup is rsynced to backupninja server
+ 5. Delete/change some MAAS data
+ 6. Restore the backup
+ 7. Verify that MAAS data backup is restored
+ 8. Verify MAAS services after restore
+
+ Duration: ~
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Make backup of file permissions for MAAS
+ show_step(1)
+ perm_file = "/var/lib/maas/file_permissions.txt"
+ ssh.check_call(
+ "which getfacl && getfacl -pR /var/lib/maas/ > {}".format(
+ perm_file),
+ node_name=sm,
+ raise_on_err=False)['stdout_str']
+ result = ssh.check_call(
+ "test -f {}".format(perm_file),
+ node_name=sm,
+ raise_on_err=False)['exit_code']
+ assert result == ExitCodes.EX_OK, (
+ "Local backup of MAAS files permissions is not created")
+
+ # Make local backup of MAAS PostgreSQL DBs
+ show_step(2)
+ cmd = "backupninja -n --run /etc/backup.d/102.pgsql"
+ ssh.check_call(cmd, node_name=sm, raise_on_err=False, timeout=60 * 5)
+ result = ssh.check_call(
+ "test -f {}".format("/var/backups/postgresql/maasdb.pg_dump.gz"),
+ node_name=sm,
+ raise_on_err=False)['exit_code']
+ assert result == ExitCodes.EX_OK, (
+ "Local backup of MAAS PostgreSQL DBs is not created")
+
+ # Rsync local backup to backupninja server
+ show_step(3)
+ cmd = "backupninja -n --run /etc/backup.d/200.backup.rsync"
+ ssh.check_call(cmd, node_name=sm, raise_on_err=False, timeout=60 * 5)
+
+ # Verify all pieces of backup data are rsynced to backupninja server
+ show_step(4)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.MAAS_BACKUP_DIRS)
+
+ # Simulate loss/change of some MAAS data
+ show_step(5)
+ self.delete_files(ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_UPDATE)
+
+ # Restore the backup
+ show_step(6)
+ salt.enforce_state("I@maas:region", "maas.region")
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(7)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.MAAS_DIRS,
+ self.MAAS_FILE_TO_DELETE,
+ self.MAAS_FILE_TO_UPDATE,
+ hashes)
+
+ # Verify that MAAS services are up and running after restore
+ show_step(8)
+ statuses = self.get_maas_svc_status(salt, sm)
+ assert all(statuses.values()), (
+ "Not all MAAS services are active after restore. Please check the "
+ "affected services (marked as 'False' below):\n{}".format(statuses)
+ )
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.maas_manual_backup_restore_pipeline
+ def test_maas_manual_backup_restore_pipeline(
+ self,
+ underlay_actions,
+ salt_actions,
+ drivetrain_actions,
+ show_step,
+ maas_test_setup,
+ cleanup_actions):
+ """Test manual backup restore of MAAS data using DT pipeline
+
+ Scenario:
+ 1. Execute 'backupninja_backup' pipeline to backup MAAS data
+ 2. Verify that MAAS backup is created on backupninja server node
+ 3. Delete/change some MAAS data
+ 4. Restore the backup
+ 5. Verify that MAAS data backup is restored
+ 6. Verify MAAS services after restore
+
+ Duration: ~ 3 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ dt = drivetrain_actions
+
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Execute 'backupninja_backup' pipeline to create a backup
+ show_step(1)
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.BACKUP_JOB_NAME,
+ job_parameters=self.BACKUP_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after creating MAAS data backup. "
+ "Please check the build and executed stages.".format(
+ self.BACKUP_JOB_NAME, status)
+ )
+
+ # Verify that backup is created and all pieces of data are rsynced
+ # to backupninja server
+ show_step(2)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.MAAS_BACKUP_DIRS)
+
+ # Simulate loss/change of some MAAS data
+ show_step(3)
+ self.delete_files(ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_UPDATE)
+
+ # Restore the backup
+ show_step(4)
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.RESTORE_JOB_NAME,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after restoring from MAAS "
+ "backup. Please check the build and executed stages.".format(
+ self.RESTORE_JOB_NAME, status)
+ )
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(5)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.MAAS_DIRS,
+ self.MAAS_FILE_TO_DELETE,
+ self.MAAS_FILE_TO_UPDATE,
+ hashes)
+
+ # Verify that MAAS services are up and running after restore
+ show_step(6)
+ statuses = self.get_maas_svc_status(salt, sm)
+ assert all(statuses.values()), (
+ "Not all MAAS services are active after restore. Please check the "
+ "affected services (marked as 'False' below):\n{}".format(statuses)
+ )
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ @pytest.mark.maas_scheduled_backup_restore
+ def test_maas_scheduled_backup_restore(
+ self,
+ underlay_actions,
+ salt_actions,
+ reclass_actions,
+ show_step,
+ cleanup_actions):
+ """Test scheduled backup restore of MAAS data
+
+ Scenario:
+ 1. Update MAAS backup schedule to run every 5 minutes
+ 2. Apply 'backupninja' state on the backupninja client node
+ 3. Wait until backup creation is triggered by schedule
+ 4. Wait until backup creation is finished
+ 5. Verify that MAAS backup is created on backupninja server node
+ 6. Delete/change some MAAS data
+ 7. Restore the backup
+ 8. Verify that MAAS data backup is restored
+ 9. Verify MAAS services after restore
+
+ Duration: ~ 3 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ reclass = reclass_actions
+
+ sm = salt.local("I@salt:master", "test.ping")['return'][0].keys()[0]
+ server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Re-configure backup schedule
+ show_step(1)
+ self.update_backup_schedule(reclass, self.MAAS_YAML)
+
+ # Apply 'backupninja' state on backupninja client node
+ show_step(2)
+ salt.enforce_state("I@backupninja:client", "backupninja")
+
+ # Wait until backup is triggered by schedule
+ show_step(3)
+ helpers.wait_pass(
+ lambda: ssh.check_call(
+ cmd="pgrep backupninja && echo OK", node_name=sm),
+ timeout=60 * 11,
+ interval=5)
+
+ # Wait until backup is finished
+ show_step(4)
+ ssh.check_call(
+ cmd="while pgrep backupninja > /dev/null; do sleep 2; done",
+ node_name=sm,
+ timeout=60 * 5)
+
+ # Verify that backup is created and all pieces of data are rsynced
+ # to backupninja server
+ show_step(5)
+ self.check_backup(
+ ssh, server, self.BCKP_SERVER_DIR, sm, self.MAAS_BACKUP_DIRS)
+
+ # Simulate loss/change of some MAAS data
+ show_step(6)
+ self.delete_files(ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_DELETE)
+ hashes = self.update_files(
+ ssh, sm, self.MAAS_DIRS, self.MAAS_FILE_TO_UPDATE)
+
+ # Restore the backup
+ show_step(7)
+ salt.enforce_state("I@maas:region", "maas.region")
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(8)
+ self.verify_restored_data(
+ ssh,
+ sm,
+ self.MAAS_DIRS,
+ self.MAAS_FILE_TO_DELETE,
+ self.MAAS_FILE_TO_UPDATE,
+ hashes)
+
+ # Verify that MAAS services are up and running after restore
+ show_step(9)
+ statuses = self.get_maas_svc_status(salt, sm)
+ assert all(statuses.values()), (
+ "Not all MAAS services are active after restore. Please check the "
+ "affected services (marked as 'False' below):\n{}".format(statuses)
+ )
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
@pytest.mark.backup_all
def _test_backup_cfg_backupninja_rsync(
self, underlay, config, openstack_deployed,
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 96ea98a..ee4043b 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -29,7 +29,7 @@
dt = drivetrain_actions
# ################## Run backup job #########################
- show_step(2)
+ show_step(1)
job_name = 'galera_backup_database'
job_parameters = {
'ASK_CONFIRMATION': False
@@ -41,7 +41,7 @@
assert backup_galera_pipeline == 'SUCCESS'
# ######################## Run CPV ###########################
- show_step(3)
+ show_step(2)
job_name = 'cvp-sanity'
job_cvp_sanity_parameters = {
'EXTRA_PARAMS': '''
@@ -65,7 +65,7 @@
assert run_cvp_sanity == 'SUCCESS'
# ######################## Run Tempest ###########################
- show_step(4)
+ show_step(3)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
@@ -76,7 +76,7 @@
assert run_cvp_tempest == 'SUCCESS'
# ######################## Run Restore ###########################
- show_step(5)
+ show_step(4)
job_name = 'galera_verify_restore'
job_parameters = {
'RESTORE_TYPE': 'ONLY_RESTORE',
@@ -88,7 +88,7 @@
assert run_galera_verify_restore == 'SUCCESS'
# ######################## Run CPV ###########################
- show_step(6)
+ show_step(5)
job_name = 'cvp-sanity'
run_cvp_sanity = dt.start_job_on_cid_jenkins(
job_name=job_name,
@@ -96,7 +96,7 @@
assert run_cvp_sanity == 'SUCCESS'
# ######################## Run Tempest ###########################
- show_step(7)
+ show_step(6)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
new file mode 100644
index 0000000..f4688b7
--- /dev/null
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -0,0 +1,482 @@
+import pytest
+import sys
+import os
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+sys.path.append(os.getcwd())
+try:
+ from tcp_tests.fixtures import config_fixtures
+ from tcp_tests.managers import underlay_ssh_manager
+ from tcp_tests.managers import saltmanager as salt_manager
+except ImportError:
+ print("ImportError: Run the application from the tcp-qa directory or "
+ "set the PYTHONPATH environment variable to directory which contains"
+ " ./tcp_tests")
+ sys.exit(1)
+LOG = logger.logger
+
+
+def has_only_similar(values_by_nodes):
+ """
+ :param values_by_nodes: dict
+ :return: bool, True if all items in the dict have similar values
+ """
+ values = list(values_by_nodes.values())
+ return all(value == values[0] for value in values)
+
+
+def get_control_plane_targets():
+ config = config_fixtures.config()
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config)
+ saltmanager = salt_manager.SaltManager(config, underlay)
+ targets = list()
+ try:
+ targets += saltmanager.run_state(
+ "I@keystone:server", 'test.ping')[0]['return'][0].keys()
+ targets += saltmanager.run_state(
+ "I@nginx:server and not I@salt:master",
+ "test.ping")[0]['return'][0].keys()
+ except BaseException as err:
+ LOG.warning("Can't retrieve data from Salt. \
+ Maybe cluster is not deployed completely.\
+ Err: {}".format(err))
+
+ # TODO: add check for Manila existence
+ # # Commented to avoid fails during OpenStack updates.
+ # # Anyway we don't have deployments with Manila yet
+ # targets.append('share*')
+ # TODO: add check for Tenant Telemetry existence
+ targets.append('mdb*')
+ # TODO: add check for Barbican existence
+ targets.append('kmn*')
+ return targets
+
+
+@pytest.fixture
+def switch_to_proposed_pipelines(reclass_actions, salt_actions):
+ reclass_actions.add_key(
+ "parameters._param.jenkins_pipelines_branch",
+ "release/proposed/2019.2.0",
+ "cluster/*/infra/init.yml"
+ )
+ salt_actions.enforce_state("I@jenkins:client", "jenkins.client")
+
+
+class TestUpdateMcpCluster(object):
+ """
+ Following the steps in
+ https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update.html#minor-update
+ """
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_drivetrain(self, salt_actions, drivetrain_actions,
+ show_step, _, switch_to_proposed_pipelines):
+ """Updating DriveTrain component to release/proposed/2019.2.0 version
+
+ Scenario:
+ 1. Add workaround for PROD-32751
+ 2. Run job git-mirror-downstream-mk-pipelines
+ 3. Run job git-mirror-downstream-pipeline-library
+ 4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
+
+ Duration: ~70 min
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ # #################### Add workaround for PROD-32751 #################
+ show_step(1)
+
+ # FIXME: workaround for PROD-32751
+ salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \
+ git commit --allow-empty -m 'Cluster model update'")
+
+ # ################### Downstream mk-pipelines #########################
+ show_step(2)
+ job_name = 'git-mirror-downstream-mk-pipelines'
+ job_parameters = {
+ 'BRANCHES': 'release/proposed/2019.2.0'
+ }
+ update_pipelines = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+
+ assert update_pipelines == 'SUCCESS'
+
+ # ################### Downstream pipeline-library ####################
+ show_step(3)
+ job_name = 'git-mirror-downstream-pipeline-library'
+ job_parameters = {
+ 'BRANCHES': 'release/proposed/2019.2.0'
+ }
+ update_pipeline_library = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+
+ assert update_pipeline_library == 'SUCCESS'
+
+ # ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
+ show_step(4)
+
+ job_name = 'upgrade-mcp-release'
+ job_parameters = {
+ 'GIT_REFSPEC': 'release/proposed/2019.2.0',
+ 'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
+ 'TARGET_MCP_VERSION': '2019.2.0'
+ }
+ update_drivetrain = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ build_timeout=90*60)
+
+ assert update_drivetrain == 'SUCCESS'
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_glusterfs(self, salt_actions, reclass_actions,
+ drivetrain_actions, show_step, _):
+ """ Upgrade GlusterFS
+ Scenario:
+ 1. In infra/init.yml in Reclass, add the glusterfs_version parameter
+ 2. Start linux.system.repo state
+ 3. Start "update-glusterfs" job
+ 4. Check version for GlusterFS servers
+ 5. Check version for GlusterFS clients
+
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ dt = drivetrain_actions
+
+ # ############## Change reclass ######################################
+ show_step(1)
+ reclass.add_key(
+ "parameters._param.linux_system_repo_mcp_glusterfs_version_number",
+ "5",
+ "cluster/*/infra/init.yml"
+ )
+ # ################# Run linux.system state ###########################
+ show_step(2)
+ salt.enforce_state("*", "linux.system.repo")
+
+ # ############## Start deploy-upgrade-galera job #####################
+ show_step(3)
+ job_name = 'update-glusterfs'
+
+ update_glusterfs = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ build_timeout=40 * 60)
+
+ assert update_glusterfs == 'SUCCESS'
+
+ # ################ Check GlusterFS version for servers ##############
+ show_step(4)
+ gluster_server_versions_by_nodes = salt.cmd_run(
+ "I@glusterfs:server",
+ "glusterd --version|head -n1")[0]
+
+ assert has_only_similar(gluster_server_versions_by_nodes),\
+ gluster_server_versions_by_nodes
+
+ # ################ Check GlusterFS version for clients ##############
+ show_step(5)
+ gluster_client_versions_by_nodes = salt.cmd_run(
+ "I@glusterfs:client",
+ "glusterfs --version|head -n1")[0]
+
+ assert has_only_similar(gluster_client_versions_by_nodes), \
+ gluster_client_versions_by_nodes
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_galera(self, salt_actions, reclass_actions,
+ drivetrain_actions, show_step, _):
+ """ Upgrade Galera automatically
+
+ Scenario:
+ 1. Include the Galera upgrade pipeline job to DriveTrain
+ 2. Apply the jenkins.client state on the Jenkins nodes
+ 3. set the openstack_upgrade_enabled parameter to true
+ 4. Refresh pillars
+ 5. Add repositories with new Galera packages
+ 6. Start job from Jenkins
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ dt = drivetrain_actions
+ # ################### Enable pipeline #################################
+ show_step(1)
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_galera",
+ "cluster/*/cicd/control/leader.yml")
+ show_step(2)
+ salt.enforce_state("I@jenkins:client", "jenkins.client")
+
+ # ############### Enable automatic upgrade ############################
+ show_step(3)
+ reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
+ "True",
+ "cluster/*/infra/init.yml")
+
+ show_step(4)
+ salt.enforce_state("dbs*", "saltutil.refresh_pillar")
+
+ # ############# Add repositories with new Galera packages #######
+ show_step(5)
+ salt.enforce_state("dbs*", "linux.system.repo")
+ salt.enforce_state("cfg*", "salt.master")
+
+ # #################### Login Jenkins on cid01 node ###################
+ show_step(6)
+
+ job_name = 'deploy-upgrade-galera'
+ job_parameters = {
+ 'INTERACTIVE': 'false'
+ }
+
+ update_galera = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters,
+ build_timeout=40 * 60)
+
+ assert update_galera == 'SUCCESS'
+
+ @pytest.fixture
+ def disable_automatic_failover_neutron_for_test(self, salt_actions):
+ """
+ On each OpenStack controller node, modify the neutron.conf file
+ Restart the neutron-server service
+ """
+ def comment_line(node, file_name, word):
+ """
+ Adds '#' before the specific line in specific file
+
+ :param node: string, salt target of node where the file locates
+ :param file_name: string, full path to the file
+ :param word: string, the begin of line which should be commented
+ :return: None
+ """
+ salt_actions.cmd_run(node,
+ "sed -i 's/^{word}/#{word}/' {file}".
+ format(word=word,
+ file=file_name))
+
+ def add_line(node, file_name, line):
+ """
+ Appends line to the end of file
+
+ :param node: string, salt target of node where the file locates
+ :param file_name: string, full path to the file
+ :param line: string, line that should be added
+ :return: None
+ """
+ salt_actions.cmd_run(node, "echo {line} >> {file}".format(
+ line=line,
+ file=file_name))
+
+ neutron_conf = '/etc/neutron/neutron.conf'
+ neutron_server = "I@neutron:server"
+ # ######## Create backup for config file #######################
+ salt_actions.cmd_run(
+ neutron_server,
+ "cp -p {file} {file}.backup".format(file=neutron_conf))
+
+ # ## Change parameters in neutron.conf'
+ comment_line(neutron_server, neutron_conf,
+ "allow_automatic_l3agent_failover",)
+ comment_line(neutron_server, neutron_conf,
+ "allow_automatic_dhcp_failover")
+ add_line(neutron_server, neutron_conf,
+ "allow_automatic_dhcp_failover = false")
+ add_line(neutron_server, neutron_conf,
+ "allow_automatic_l3agent_failover = false")
+
+ # ## Apply changed config to the neutron-server service
+ result = salt_actions.cmd_run(neutron_server,
+ "service neutron-server restart")
+ # TODO: add check that neutron-server is up and running
+ yield result
+ # ## Revert file changes
+ salt_actions.cmd_run(
+ neutron_server,
+ "cp -p {file}.backup {file}".format(file=neutron_conf))
+ salt_actions.cmd_run(neutron_server,
+ "service neutron-server restart")
+
+ @pytest.fixture
+ def disable_neutron_agents_for_test(self, salt_actions):
+ """
+ Disable the neutron services before the test and
+ enable it after test
+ """
+ result = salt_actions.cmd_run("I@neutron:server", """
+ service neutron-dhcp-agent stop && \
+ service neutron-l3-agent stop && \
+ service neutron-metadata-agent stop && \
+ service neutron-openvswitch-agent stop
+ """)
+ yield result
+ #
+ salt_actions.cmd_run("I@neutron:server", """
+ service neutron-dhcp-agent start && \
+ service neutron-l3-agent start && \
+ service neutron-metadata-agent start && \
+ service neutron-openvswitch-agent start
+ """)
+ # TODO: add check that all services are UP and running
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_rabbit(self, salt_actions, reclass_actions,
+ drivetrain_actions, show_step, _,
+ disable_automatic_failover_neutron_for_test,
+ disable_neutron_agents_for_test):
+ """ Updates RabbitMQ
+ Scenario:
+ 1. Include the RabbitMQ upgrade pipeline job to DriveTrain
+ 2. Add repositories with new RabbitMQ packages
+ 3. Start Deploy - upgrade RabbitMQ pipeline
+
+ Updating RabbitMq should be completed before the OpenStack updating
+ process starts
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ dt = drivetrain_actions
+
+ # ####### Include the RabbitMQ upgrade pipeline job to DriveTrain ####
+ show_step(1)
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_rabbitmq",
+ "cluster/*/cicd/control/leader.yml")
+ salt.enforce_state("I@jenkins:client", "jenkins.client")
+
+ reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
+ "True",
+ "cluster/*/infra/init.yml")
+ salt.run_state("I@rabbitmq:server", "saltutil.refresh_pillar")
+
+ # ########### Add repositories with new RabbitMQ packages ############
+ show_step(2)
+ salt.enforce_state("I@rabbitmq:server", "linux.system.repo")
+
+ # ########### Start Deploy - upgrade RabbitMQ pipeline ############
+ show_step(3)
+ job_parameters = {
+ 'INTERACTIVE': 'false'
+ }
+
+ update_rabbit = dt.start_job_on_cid_jenkins(
+ job_name='deploy-upgrade-rabbitmq',
+ job_parameters=job_parameters,
+ build_timeout=40 * 60
+ )
+ assert update_rabbit == 'SUCCESS'
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_ceph(self, salt_actions, drivetrain_actions, show_step, _):
+ """ Updates Ceph to the latest minor version
+
+ Scenario:
+ 1. Add workaround for unhealth Ceph
+ 2. Start ceph-upgrade job with default parameters
+ 3. Check Ceph version for all nodes
+
+ https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update/ceph-update.html
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ # ###################### Add workaround for unhealth Ceph ############
+ show_step(1)
+ salt.cmd_run("I@ceph:radosgw",
+ "ceph config set 'mon pg warn max object skew' 20")
+ # ###################### Start ceph-upgrade pipeline #################
+ show_step(2)
+ job_parameters = {}
+
+ update_ceph = dt.start_job_on_cid_jenkins(
+ job_name='ceph-update',
+ job_parameters=job_parameters)
+
+ assert update_ceph == 'SUCCESS'
+
+ # ########## Verify Ceph version #####################################
+ show_step(3)
+
+ ceph_version_by_nodes = salt.cmd_run(
+ "I@ceph:* and not I@ceph:monitoring and not I@ceph:backup:server",
+ "ceph version")[0]
+
+ assert has_only_similar(ceph_version_by_nodes), ceph_version_by_nodes
+
+
+class TestOpenstackUpdate(object):
+
+ @pytest.mark.grab_versions
+ @pytest.mark.run_mcp_update
+ def test__pre_update__enable_pipeline_job(self,
+ reclass_actions, salt_actions,
+ show_step):
+ """ Enable pipeline in the Drivetrain
+
+ Scenario:
+ 1. Add deploy.update.* classes to the reclass
+ 2. Start jenkins.client salt state
+
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ show_step(1)
+ reclass.add_class("system.jenkins.client.job.deploy.update.upgrade",
+ "cluster/*/cicd/control/leader.yml")
+
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_ovs_gateway",
+ "cluster/*/cicd/control/leader.yml")
+
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_compute",
+ "cluster/*/cicd/control/leader.yml")
+
+ show_step(2)
+ r, errors = salt.enforce_state("I@jenkins:client", "jenkins.client")
+ assert errors is None
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize('target', get_control_plane_targets())
+ @pytest.mark.run_mcp_update
+ def test__update__control_plane(self, drivetrain_actions,
+ switch_to_proposed_pipelines, target):
+ """Start 'Deploy - upgrade control VMs' for specific node
+ """
+ job_parameters = {
+ "TARGET_SERVERS": target,
+ "INTERACTIVE": False}
+ upgrade_control_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
+ job_name="deploy-upgrade-control",
+ job_parameters=job_parameters)
+
+ assert upgrade_control_pipeline == 'SUCCESS'
+
+ @pytest.mark.grab_versions
+ @pytest.mark.run_mcp_update
+ def test__update__data_plane(self, drivetrain_actions):
+ """Start 'Deploy - upgrade OVS gateway'
+ """
+ job_parameters = {
+ "INTERACTIVE": False}
+ upgrade_data_pipeline = drivetrain_actions.start_job_on_cid_jenkins(
+ job_name="deploy-upgrade-ovs-gateway",
+ job_parameters=job_parameters)
+
+ assert upgrade_data_pipeline == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_patches_mu6.py b/tcp_tests/tests/system/test_patches_mu6.py
new file mode 100644
index 0000000..5d874b1
--- /dev/null
+++ b/tcp_tests/tests/system/test_patches_mu6.py
@@ -0,0 +1,89 @@
+import pytest
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestAddressedIssuesMu6(object):
+ """ Patches for MU6
+ https://docs.mirantis.com/mcp/master/mcp-release-notes/mu/mu-6.html
+
+ Issues resolutions requiring manual application
+ """
+
+ def test_gnocchi_measurements(self, show_step,
+ salt_actions, reclass_actions):
+ """Apply fix for '[32645] Missing measurements in Gnocchi on
+ environments with Barbican'
+
+ Scenario:
+ 1. Update reclass with ks_notifications items
+ 2. Run keystone.server state for @keystone targets
+ 3. Run barbican.server state for @barbican targets
+
+ https://docs.mirantis.com/mcp/master/mcp-release-notes/mu/mu-6/mu-6-addressed/mu-6-os/mu6-os-manual.html#missing-measurements-in-gnocchi-on-environments-with-barbican
+
+ """
+ reclass = reclass_actions
+ salt = salt_actions
+ # ### Skip test if cluster without barbican ###########################
+ if not salt.get_single_pillar('I@salt:master',
+ '_param:barbican_enabled'):
+ pytest.skip("Test is skipped due to absent barbican component")
+
+ # ############# Update reclass with ks_notifications items ############
+ show_step(1)
+ reclass.add_key("parameters._param.keystone_notification_topics",
+ "${_param:openstack_notification_topics},barbican",
+ "classes/cluster/*/openstack/init.yml")
+
+ reclass.add_key("parameters.barbican.server.ks_notifications_topic",
+ "barbican",
+ "classes/cluster/*/openstack/barbican.yml")
+
+ salt.run_state("I@keystone:server", "saltutil.refresh_pillar")
+ salt.run_state("I@barbican:server", "saltutil.refresh_pillar")
+
+ # ############ Run keystone.server state for @keystone ################
+ show_step(2)
+ salt.enforce_state("I@keystone:server:role:primary",
+ "keystone.server")
+ salt.enforce_state("I@keystone:server",
+ "keystone.server")
+
+ # ############ Run barbican.server state for @barbican ################
+ show_step(3)
+ salt.enforce_state("I@barbican:server:role:primary",
+ "barbican.server")
+ salt.enforce_state("I@barbican:server",
+ "barbican.server")
+
+ def test_stacklight_sf_notifier_sfdc_sandbox_enabled(
+ self, show_step,
+ salt_actions, reclass_actions):
+ """ Apply fix for 'StackLight deployment fails with stack creation
+ failed error'
+
+ Scenario:
+ 1. Add sf_notifier_sfdc_sandbox_enabled to reclass
+ 2. Apply docker.client state for Prometheus
+
+ https://docs.mirantis.com/mcp/master/mcp-release-notes/mu/mu-6/mu-6-addressed/mu-6-sl/mu6-sl-manual.html#stacklight-deployment-fails-with-stack-creation-failed-error
+
+ """
+ reclass = reclass_actions
+ salt = salt_actions
+
+ # ##### Add sf_notifier_sfdc_sandbox_enabled to reclass ##############
+ show_step(1)
+ # Yes, here "True" should be as a string, it's not a mistake!
+ reclass.add_key("parameters._param.sf_notifier_sfdc_sandbox_enabled",
+ "True",
+ "classes/cluster/*/stacklight/server.yml")
+
+ # ############### Apply states for Prometheus ########################
+ show_step(2)
+ tgt = "I@prometheus:server and I@docker:client"
+
+ salt.run_state(tgt, "saltutil.refresh_pillar")
+ salt.enforce_state(tgt, "docker.client")
diff --git a/tcp_tests/tests/system/test_security_updates.py b/tcp_tests/tests/system/test_security_updates.py
new file mode 100644
index 0000000..db1d7a7
--- /dev/null
+++ b/tcp_tests/tests/system/test_security_updates.py
@@ -0,0 +1,316 @@
+# Copyright 2019 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import json
+import pytest
+
+from devops.helpers import helpers
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+class TestUbuntuSecurityUpdates(object):
+ """Test class for verification of obtaining Ubuntu security updates"""
+
+ ENV_NAME = settings.ENV_NAME
+ UPGRADE_CMD = (
+ 'export DEBIAN_FRONTEND=noninteractive && '
+ 'apt-get update && '
+ 'apt-get -y upgrade && '
+ 'apt-get -y -o Dpkg::Options::="--force-confdef" '
+ ' -o Dpkg::Options::="--force-confnew" dist-upgrade'
+ )
+ INST_LINUX_HEADERS_CMD = (
+ "export DEBIAN_FRONTEND=noninteractive && "
+ "apt-get -y install linux-headers-generic"
+ )
+
+ UPDATE_JOB_NAME = "deploy-update-package"
+ UPDATE_JOB_PARAMETERS = {
+ "ASK_CONFIRMATION": False,
+ "TARGET_SERVERS": ''
+ }
+
+ SANITY_JOB_NAME = 'cvp-sanity'
+ SANITY_JOB_PARAMETERS = {
+ 'EXTRA_PARAMS': {
+ 'envs': ["tests_set=-k 'not test_ceph_health'"]
+ }
+ }
+
+ JENKINS_START_TIMEOUT = 60
+
+ def get_available_pkg_updates(self, nodes, salt):
+ """Collect available package updates for given nodes
+
+ :param nodes: list, nodes to collect available updates for
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :return: dict, update candidates for nodes
+ """
+ updates = {}
+ for node in nodes:
+ updates[node] = salt.local(
+ node, "pkg.list_upgrades")['return'][0][node]
+ return updates
+
+ def run_cvp_sanity(self, dt):
+ """A wrapper for executing cvp-sanity pipeline
+
+ :param dt: DrivetrainManager, tcp-qa Drivetrain manager instance
+ :return: str, build execution status of cvp-sanity pipeline
+ """
+ return dt.start_job_on_cid_jenkins(
+ job_name=self.SANITY_JOB_NAME,
+ job_parameters=self.SANITY_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=60 * 15
+ )
+
+ def reboot_hw_node(self, ssh, salt, node):
+ """Reboot the given node and wait for it to start back
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param salt: SaltManager, tcp-qa Salt manager instance
+ :param node: str, name of the node to reboot
+ """
+ LOG.info("Sending reboot command to '{}' node.".format(node))
+ remote = ssh.remote(node_name=node)
+ remote.execute_async("/sbin/shutdown -r now")
+
+ # Wait for restarted node to boot and become accessible
+ helpers.wait_pass(
+ lambda: salt.local(node, "test.ping", timeout=5),
+ timeout=60 * 10, interval=5)
+
+ # TODO: finish the test once ASK_CONFIRMATION option is added to
+ # 'deploy-update-package' pipeline
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_pipeline
+ def _test_obtaining_ubuntu_security_updates_via_pipeline(
+ self, salt_actions, drivetrain_actions, show_step):
+ """Test obtaining Ubuntu security updates using Jenkins
+
+ Scenario:
+ 1. Collect available package upgrades for nodes of the given server
+ role
+ 2. Execute deploy-update-package pipeline for the given server role
+ 3. Collect available package upgrades for server role nodes again
+ 4. Check that there is no candidates for upgrade
+ 5. Run cvp-sanity tests
+
+ Duration: ~ min
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ role = "mon*"
+ nodes = salt.local(role, "test.ping")['return'][0].keys()
+
+ # Collect available package upgrades for nodes
+ show_step(1)
+ updates = self.get_available_pkg_updates(nodes, salt)
+ LOG.info("Packages to be updated on nodes:\n{}".format(
+ json.dumps(updates, indent=4)))
+
+ # Execute 'deploy-update-package' pipeline to upgrade packages on nodes
+ show_step(2)
+ self.UPDATE_JOB_PARAMETERS["TARGET_SERVERS"] = role
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.UPDATE_JOB_NAME,
+ job_parameters=self.UPDATE_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=60 * 15
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after upgrading packages on {} nodes. "
+ "Please check the build and executed stages.".format(
+ self.UPDATE_JOB_NAME, status, role)
+ )
+
+ # Collect available package upgrades for nodes again
+ show_step(3)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no available package upgrades
+ show_step(4)
+ for node in nodes:
+ assert not post_upgrade[node], (
+ "{} node still has upgrade candidates. Please check the "
+ "following packages and the reason why they are not "
+ "updated:\n{}".format(node, post_upgrade[node])
+ )
+
+ # Execute cvp-sanity tests
+ show_step(5)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity "
+ "tests".format(
+ self.SANITY_JOB_NAME, status)
+ )
+
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_manual_infra_vms
+ def test_obtaining_ubuntu_security_updates_manual_infra_vms(
+ self, salt_actions, drivetrain_actions, show_step):
+ """Test obtaining Ubuntu security updates on virtual infra nodes.
+ Repeat the scenario for 01, 02 and 03 indexes of nodes.
+
+ Scenario:
+ 1. Select set of virtual nodes for upgrade
+ 2. Collect available package upgrades for the nodes
+ 3. Upgrade the nodes
+ 4. Collect available package upgrades for the nodes again
+ 5. Check that there is no candidates for upgrade on the nodes
+ 6. Run cvp-sanity tests
+
+ Duration: ~ 100 min
+ """
+ salt = salt_actions
+ dt = drivetrain_actions
+
+ for index in ('01', '02', '03'):
+ msg = ("# Executing scenario for '{i}' index of nodes #".format(
+ i=index))
+ LOG.info(
+ "\n\n{pad}\n{msg}\n{pad}".format(pad="#" * len(msg), msg=msg))
+
+ # Select set of nodes for current iteration of updates
+ show_step(1)
+ tgt = "*{}* and E@^(?!kvm|cfg|cmp|osd).*$".format(index)
+ nodes = salt.local(tgt, "test.ping")['return'][0].keys()
+ LOG.info("Nodes to be upgraded:\n{}".format(
+ json.dumps(nodes, indent=4)))
+
+ # Collect available package upgrades for the nodes
+ show_step(2)
+ updates = self.get_available_pkg_updates(nodes, salt)
+
+ # Upgrade the selected nodes
+ show_step(3)
+ for node in nodes:
+ LOG.info(
+ "Starting upgrade of '{}' node.\nThe following packages "
+ "will be updated:\n{}".format(
+ node, json.dumps(updates[node], indent=4))
+ )
+ salt.cmd_run(node, self.UPGRADE_CMD)
+
+ # Collect available package upgrades for the nodes again
+ show_step(4)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no package upgrades candidates on the nodes
+ show_step(5)
+ missed_upd = {
+ node: pkgs for (node, pkgs) in post_upgrade.items() if pkgs}
+ assert not missed_upd, (
+ "{} nodes still have upgrade candidates. Please check the "
+ "nodes and reason why the listed packages are not "
+ "updated:\n{}".format(
+ missed_upd.keys(), json.dumps(missed_upd, indent=4))
+ )
+
+ # Execute cvp-sanity tests
+ show_step(6)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity smoke "
+ "tests".format(self.SANITY_JOB_NAME, status))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.ubuntu_security_updates_manual_hw_nodes
+ def test_obtaining_ubuntu_security_updates_manual_hw_nodes(
+ self,
+ salt_actions,
+ underlay_actions,
+ drivetrain_actions,
+ show_step):
+ """Test obtaining Ubuntu security updates on HW nodes.
+ Repeat the scenario for 01, 02 and 03 indexes of nodes.
+
+ Scenario:
+ 1. Select set HW nodes for upgrade
+ 2. Collect available package upgrades for the nodes
+ 3. Upgrade the nodes
+ 4. Collect available package upgrades for the nodes again
+ 5. Check that there is no candidates for upgrade on the nodes
+ 6. Run cvp-sanity tests
+
+ Duration: ~ 70 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ dt = drivetrain_actions
+
+ for index in ('01', '02', '03'):
+ msg = ("# Executing scenario for '{i}' index of nodes #".format(
+ i=index))
+ LOG.info(
+ "\n\n{pad}\n{msg}\n{pad}".format(pad="#" * len(msg), msg=msg))
+
+ # Select set of nodes for current iteration of updates
+ show_step(1)
+ tgt = "E@^(kvm|cmp).?{}.*$".format(index)
+ nodes = salt.local(tgt, "test.ping")['return'][0].keys()
+ LOG.info("Nodes to be upgraded:\n{}".format(
+ json.dumps(nodes, indent=4)))
+
+ # Collect available package upgrades for the nodes
+ show_step(2)
+ updates = self.get_available_pkg_updates(nodes, salt)
+
+ # Upgrade the selected nodes
+ show_step(3)
+ for node in nodes:
+ LOG.info(
+ "Starting upgrade of '{}' node.\nThe following packages "
+ "will be updated:\n{}".format(
+ node, json.dumps(updates[node], indent=4))
+ )
+ salt.cmd_run(node, self.UPGRADE_CMD)
+ # Update Linux headers on compute nodes
+ if "cmp" in node:
+ LOG.info(
+ "Updating linux headers on '{}' node.".format(node))
+ salt.cmd_run(node, self.INST_LINUX_HEADERS_CMD)
+
+ # Reboot the node after upgrade
+ LOG.info("Starting reboot of '{}' node.".format(node))
+ self.reboot_hw_node(ssh, salt, node)
+ LOG.info("'{}' node is back after reboot.".format(node))
+
+ # Collect available package upgrades for the nodes again
+ show_step(4)
+ post_upgrade = self.get_available_pkg_updates(nodes, salt)
+
+ # Check that there is no package upgrades candidates on the nodes
+ show_step(5)
+ missed_upd = {
+ node: pkgs for (node, pkgs) in post_upgrade.items() if pkgs}
+ assert not missed_upd, (
+ "{} nodes still have upgrade candidates. Please check the "
+ "nodes and reason why the listed packages are not "
+ "updated:\n{}".format(
+ missed_upd.keys(), json.dumps(missed_upd, indent=4))
+ )
+
+ # Execute cvp-sanity tests
+ show_step(6)
+ status = self.run_cvp_sanity(dt)
+ assert status == 'SUCCESS', (
+ "'{0}' job run status is {1} after executing CVP-Sanity "
+ "tests".format(self.SANITY_JOB_NAME, status))
diff --git a/tcp_tests/tests/system/test_update.py b/tcp_tests/tests/system/test_update.py
deleted file mode 100644
index f3834d3..0000000
--- a/tcp_tests/tests/system/test_update.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import pytest
-
-from tcp_tests import logger
-from tcp_tests import settings
-
-LOG = logger.logger
-
-
-class TestUpdateMcpCluster(object):
- """
- Following the steps in
- https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update.html#minor-update
- """
-
- @pytest.mark.grab_versions
- @pytest.mark.parametrize("_", [settings.ENV_NAME])
- @pytest.mark.run_mcp_update
- def test_update_drivetrain(self, salt_actions, drivetrain_actions,
- show_step, _):
- """Updating DriveTrain component to release/proposed/2019.2.0 version
-
- Scenario:
- 1. Add workaround for PROD-32751
- 2. Run job git-mirror-downstream-mk-pipelines
- 3. Run job git-mirror-downstream-pipeline-library
- 4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
-
- Duration: ~35 min
- """
- salt = salt_actions
- dt = drivetrain_actions
-
- # #################### Add workaround for PROD-32751 #################
- show_step(1)
-
- # FIXME: workaround for PROD-32751
- salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \
- git commit --allow-empty -m 'Cluster model update'")
-
- # ################### Downstream mk-pipelines #########################
- show_step(2)
- job_name = 'git-mirror-downstream-mk-pipelines'
- job_parameters = {
- 'BRANCHES': 'release/proposed/2019.2.0'
- }
- update_pipelines = dt.start_job_on_cid_jenkins(
- job_name=job_name,
- job_parameters=job_parameters)
-
- assert update_pipelines == 'SUCCESS'
-
- # ################### Downstream pipeline-library ####################
- show_step(3)
- job_name = 'git-mirror-downstream-pipeline-library'
- job_parameters = {
- 'BRANCHES': 'release/proposed/2019.2.0'
- }
- update_pipeline_library = dt.start_job_on_cid_jenkins(
- job_name=job_name,
- job_parameters=job_parameters)
-
- assert update_pipeline_library == 'SUCCESS'
-
- # ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
- show_step(4)
-
- job_name = 'upgrade-mcp-release'
- job_parameters = {
- 'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
- 'TARGET_MCP_VERSION': '2019.2.0'
- }
- update_drivetrain = dt.start_job_on_cid_jenkins(
- job_name=job_name,
- job_parameters=job_parameters,
- build_timeout=3600)
-
- assert update_drivetrain == 'SUCCESS'
-
- @pytest.mark.grab_versions
- @pytest.mark.parametrize("_", [settings.ENV_NAME])
- @pytest.mark.run_mcp_update
- def test_update_glusterfs(self, salt_actions, reclass_actions,
- drivetrain_actions, show_step, _):
- """ Upgrade GlusterFS
- Scenario:
- 1. In infra/init.yml in Reclass, add the glusterfs_version parameter
- 2. Start linux.system.repo state
- 3. Start "update-glusterfs" job
- 4. Check version for GlusterFS servers
- 5. Check version for GlusterFS clients
-
- """
- salt = salt_actions
- reclass = reclass_actions
- dt = drivetrain_actions
-
- def has_only_similar(param_by_nodes):
- """
- :param param_by_nodes: dict
- :return: bool, True if all items in the dict have similar keys
- """
- params = list(param_by_nodes.values())
-
- def are_similar(x): return x == params[0]
-
- return all(map(are_similar, params)),
- # ############## Change reclass ######################################
- show_step(1)
- reclass.add_key(
- "parameters._param.linux_system_repo_mcp_glusterfs_version_number",
- "5",
- "cluster/*/infra/init.yml"
- )
- # ################# Run linux.system state ###########################
- show_step(2)
- salt.enforce_state("*", "linux.system.repo")
-
- # ############## Start deploy-upgrade-galera job #####################
- show_step(3)
- job_name = 'update-glusterfs'
-
- update_glusterfs = dt.start_job_on_cid_jenkins(
- job_name=job_name,
- build_timeout=40 * 60)
-
- assert update_glusterfs == 'SUCCESS'
-
- # ################ Check GlusterFS version for servers ##############
- show_step(4)
- gluster_server_versions_by_nodes = salt.cmd_run(
- "I@glusterfs:server",
- "glusterd --version|head -n1")[0]
-
- assert has_only_similar(gluster_server_versions_by_nodes),\
- gluster_server_versions_by_nodes
-
- # ################ Check GlusterFS version for clients ##############
- show_step(5)
- gluster_client_versions_by_nodes = salt.cmd_run(
- "I@glusterfs:client",
- "glusterfs --version|head -n1")[0]
-
- assert has_only_similar(gluster_client_versions_by_nodes), \
- gluster_client_versions_by_nodes
-
- @pytest.mark.grab_versions
- @pytest.mark.parametrize("_", [settings.ENV_NAME])
- @pytest.mark.run_mcp_update
- def test_update_galera(self, salt_actions, reclass_actions,
- drivetrain_actions, show_step, _):
- """ Upgrade Galera automatically
-
- Scenario:
- 1. Include the Galera upgrade pipeline job to DriveTrain
- 2. Apply the jenkins.client state on the Jenkins nodes
- 3. set the openstack_upgrade_enabled parameter to true
- 4. Refresh pillars
- 5. Add repositories with new Galera packages
- 6. Start job from Jenkins
- """
- salt = salt_actions
- reclass = reclass_actions
- dt = drivetrain_actions
- # ################### Enable pipeline #################################
- show_step(1)
- reclass.add_class(
- "system.jenkins.client.job.deploy.update.upgrade_galera",
- "cluster/*/cicd/control/leader.yml")
- show_step(2)
- salt.enforce_state("I@jenkins:client", "jenkins.client")
-
- # ############### Enable automatic upgrade ############################
- show_step(3)
- reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
- "True",
- "cluster/*/infra/init.yml")
-
- show_step(4)
- salt.enforce_state("dbs*", "saltutil.refresh_pillar")
-
- # ############# Add repositories with new Galera packages #######
- show_step(5)
- salt.enforce_state("dbs*", "linux.system.repo")
- salt.enforce_state("cfg*", "salt.master")
-
- # #################### Login Jenkins on cid01 node ###################
- show_step(6)
-
- job_name = 'deploy-upgrade-galera'
- job_parameters = {
- 'INTERACTIVE': 'false'
- }
-
- update_galera = dt.start_job_on_cid_jenkins(
- job_name=job_name,
- job_parameters=job_parameters,
- build_timeout=40 * 60)
-
- assert update_galera == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_upgrade_pike_queens.py b/tcp_tests/tests/system/test_upgrade_pike_queens.py
new file mode 100644
index 0000000..0ddf20b
--- /dev/null
+++ b/tcp_tests/tests/system/test_upgrade_pike_queens.py
@@ -0,0 +1,203 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import pytest
+import json
+
+from tcp_tests import logger
+
+
+LOG = logger.logger
+
+
+class TestUpdatePikeToQueens(object):
+ """
+ Created by https://mirantis.jira.com/browse/PROD-32683
+ """
+ def execute_pre_post_steps(self, underlay_actions,
+ cfg_node, verbose, type):
+ ret = underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="salt 'cfg01*' config.get"
+ " orchestration:upgrade:applications --out=json")
+ cfg_nodes_list = json.loads(ret['stdout_str'])
+ services_for_upgrade = []
+ for i in cfg_nodes_list:
+ for j in cfg_nodes_list[i]:
+ services_for_upgrade.append(j)
+ LOG.info(services_for_upgrade)
+ list_nodes = underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="salt-key -l accepted | grep -v cfg01 | "
+ "grep -v Accepted")['stdout_str'].splitlines()
+ LOG.info(list_nodes)
+ for node in list_nodes:
+ salt_pillars = underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="salt {} pillar.items __reclass__:applications"
+ " --out=json".format(node))
+ node_app_output = json.loads(salt_pillars['stdout_str'])
+ need_output = '__reclass__:applications'
+ LOG.info(node_app_output)
+ if need_output in node_app_output[node]:
+ node_applications = node_app_output[node][need_output]
+ LOG.info(node_applications)
+ for service in services_for_upgrade:
+ if service in node_applications:
+ underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="salt {} state.apply "
+ "{}.upgrade.{}".format(node, service, type))
+
+ @pytest.mark.day1_underlay
+ def test_upgrade_pike_queens(self,
+ show_step,
+ underlay_actions,
+ drivetrain_actions,
+ reclass_actions,
+ salt_actions):
+ """Execute upgrade from Pike to Queens
+
+ Scenario:
+ 1. Perform the pre-upgrade activities
+ 2. Upgrade control VMs
+ 3. Upgrade gatewey nodes
+ 4. Upgrade compute nodes
+ 5. Perform the post-upgrade activities
+ 6. If jobs are passed then start tests with cvp-sanity job
+ 7. Run tests with cvp-tempest job
+ """
+ cfg_node = underlay_actions.get_target_node_names(target='cfg')[0]
+ LOG.info('cfg node is {}'.format(cfg_node))
+ verbose = True
+ dt = drivetrain_actions
+ infra_init_yaml = "cluster/*/infra/init.yml"
+ # ########## Perform the pre-upgrade activities ##########
+ show_step(1)
+ LOG.info('Add parameters to {}'.format(infra_init_yaml))
+ reclass_actions.add_bool_key(
+ 'parameters._param.openstack_upgrade_enabled',
+ 'true',
+ infra_init_yaml)
+ LOG.info('Add openstack_version: queens')
+ reclass_actions.add_key(
+ 'parameters._param.openstack_version',
+ 'queens',
+ infra_init_yaml)
+ LOG.info('Add openstack_old_version: pike')
+ reclass_actions.add_key(
+ 'parameters._param.openstack_old_version',
+ 'pike',
+ infra_init_yaml)
+ reclass_actions.add_class(
+ 'system.keystone.client.v3',
+ 'cluster/*/openstack/control_init.yml'
+ )
+ underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="cd /srv/salt/reclass; git add -u && "
+ "git commit --allow-empty -m 'Cluster model update'")
+ LOG.info('Perform refresh_pillar')
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+ self.execute_pre_post_steps(underlay_actions, cfg_node,
+ verbose, 'pre')
+ LOG.info('Perform refresh_pillar')
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+ # ########## Upgrade control VMs #########
+ show_step(2)
+ LOG.info('Upgrade control VMs')
+ job_name = 'deploy-upgrade-control'
+ job_parameters = {
+ 'INTERACTIVE': False,
+ 'OS_DIST_UPGRADE': False,
+ 'OS_UPGRADE': False
+ }
+ update_control_vms = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+ assert update_control_vms == 'SUCCESS'
+ # ########## Upgrade gatewey nodes ###########
+ show_step(3)
+ LOG.info('Upgrade gateway')
+ job_name = 'deploy-upgrade-ovs-gateway'
+ job_parameters = {
+ 'INTERACTIVE': False,
+ 'OS_DIST_UPGRADE': False,
+ 'OS_UPGRADE': False
+ }
+ update_gateway = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+ assert update_gateway == 'SUCCESS'
+ # ############ Upgrade compute nodes ############
+ show_step(4)
+ LOG.info('Upgrade compute nodes')
+ job_name = 'deploy-upgrade-compute'
+ job_parameters = {
+ 'INTERACTIVE': False,
+ 'OS_DIST_UPGRADE': False,
+ 'OS_UPGRADE': False
+ }
+ update_computes = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+ assert update_computes == 'SUCCESS'
+ # ############ Perform the post-upgrade activities ##########
+ show_step(5)
+ LOG.info('Add parameters._param.openstack_upgrade_enabled false'
+ 'to {}'.format(infra_init_yaml))
+ reclass_actions.add_bool_key(
+ 'parameters._param.openstack_upgrade_enabled',
+ 'false',
+ infra_init_yaml)
+ underlay_actions.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="cd /srv/salt/reclass; git add -u && "
+ "git commit --allow-empty -m 'Cluster model update'")
+ LOG.info('Perform refresh_pillar')
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+ self.execute_pre_post_steps(underlay_actions, cfg_node,
+ verbose, 'post')
+ LOG.info('Perform refresh_pillar')
+ salt_actions.run_state("*", "saltutil.refresh_pillar")
+ # ######################## Run CPV ##########################
+ show_step(6)
+ job_name = 'cvp-sanity'
+ job_parameters = {
+ 'EXTRA_PARAMS': '''
+ envs:
+ - skipped_packages='{},{},{},{},{},{}'
+ - skipped_modules='xunitmerge,setuptools'
+ - skipped_services='docker,containerd'
+ - ntp_skipped_nodes=''
+ - tests_set=-k "not {} and not {} and not {}"
+ '''.format('python-setuptools', 'python-pkg-resources',
+ 'xunitmerge', 'python-gnocchiclient',
+ 'python-ujson', 'python-octaviaclient',
+ 'test_ceph_status', 'test_prometheus_alert_count',
+ 'test_uncommited_changes')
+ }
+ run_cvp_sanity = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+ assert run_cvp_sanity == 'SUCCESS'
+ # ######################## Run Tempest #######################
+ show_step(7)
+ job_name = 'cvp-tempest'
+ job_parameters = {
+ 'TEMPEST_ENDPOINT_TYPE': 'internalURL'
+ }
+ run_cvp_tempest = dt.start_job_on_cid_jenkins(
+ job_name=job_name,
+ job_parameters=job_parameters)
+ assert run_cvp_tempest == 'SUCCESS'