Merge "pin Openstackclient version"
diff --git a/tcp_tests/managers/drivetrain_manager.py b/tcp_tests/managers/drivetrain_manager.py
index 6e2fbda..8ef758d 100644
--- a/tcp_tests/managers/drivetrain_manager.py
+++ b/tcp_tests/managers/drivetrain_manager.py
@@ -13,6 +13,11 @@
# under the License.
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
+from tcp_tests.utils import run_jenkins_job
+from tcp_tests.utils import get_jenkins_job_stages
+from tcp_tests import logger
+
+LOG = logger.logger
class DrivetrainManager(ExecuteCommandsMixin):
@@ -32,3 +37,70 @@
self.execute_commands(commands,
label='Install Drivetrain Tools')
self.__config.drivetrain.drivetrain_installed = True
+
+ def start_job_on_cid_jenkins(self, job_name,
+ **kwargs):
+ """
+ Starts job with specific parameters on cluster Jenkins
+
+ Method accept any param:
+ job_parameters=None,
+ job_output_prefix='',
+ start_timeout=1800,
+ build_timeout=3600 * 4,
+ verbose=False
+
+ :param job_name: string
+ :return: string, Result of passed job, "SUCCESS"| "FAILED" | "UNSTABLE"
+ """
+ jenkins_url, jenkins_user, jenkins_pass = self.get_jenkins_creds(
+ tgt='I@docker:client:stack:jenkins and cid01*')
+
+ job_result = run_jenkins_job.run_job(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ job_name=job_name,
+ **kwargs)
+
+ (description, stages) = get_jenkins_job_stages.get_deployment_result(
+ host=jenkins_url,
+ username=jenkins_user,
+ password=jenkins_pass,
+ job_name=job_name,
+ build_number='lastBuild')
+
+ LOG.info(description)
+ LOG.info('\n'.join(stages))
+
+ if job_result != 'SUCCESS':
+ LOG.warning("{0}\n{1}".format(description, '\n'.join(stages)))
+ return job_result
+
+ def start_job_on_cfg_jenkins(self):
+ pass
+
+ def get_jenkins_creds(self, tgt):
+ """
+ Requests Jenkins's login parameters from pillars from desired node
+
+ :return: tuple {jenkins_url, jenkins_user, jenkins_pass}
+ """
+ jenkins_host = self._salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:host")
+ if jenkins_host is None:
+ raise Exception(
+ "Can't find 'jenkins:client:master' pillar on {tgt} node."
+ .format(tgt=tgt))
+ jenkins_port = self._salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:port")
+ jenkins_protocol = self._salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:proto")
+ jenkins_url = '{0}://{1}:{2}'.format(jenkins_protocol,
+ jenkins_host,
+ jenkins_port)
+ jenkins_user = self._salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:username")
+ jenkins_pass = self._salt.get_single_pillar(
+ tgt=tgt, pillar="jenkins:client:master:password")
+ return jenkins_url, jenkins_user, jenkins_pass
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 84e4827..226f408 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -374,29 +374,6 @@
password=jenkins_pass)
)
- def get_cluster_jenkins_creds(self):
- """
- Requests cluster Jenkins's login parameters from pillars
- :return: dict {'url': jenkins_url,
- 'user': jenkins_user,
- 'pass': jenkins_pass}
- """
- tgt = 'I@docker:client:stack:jenkins and cid01*'
- jenkins_host = self.get_single_pillar(
- tgt=tgt, pillar="jenkins:client:master:host")
- jenkins_port = self.get_single_pillar(
- tgt=tgt, pillar="jenkins:client:master:port")
- jenkins_protocol = self.get_single_pillar(
- tgt=tgt, pillar="jenkins:client:master:proto")
- jenkins_url = '{0}://{1}:{2}'.format(jenkins_protocol,
- jenkins_host,
- jenkins_port)
- jenkins_user = self.get_single_pillar(
- tgt=tgt, pillar="jenkins:client:master:username")
- jenkins_pass = self.get_single_pillar(
- tgt=tgt, pillar="jenkins:client:master:password")
- return {'url': jenkins_url, 'user': jenkins_user, 'pass': jenkins_pass}
-
def create_env_jenkins_cicd(self):
"""Creates static utils/env_jenkins_cicd file"""
@@ -451,11 +428,6 @@
password=jenkins_pass)
)
- def add_cluster_reclass(self, key, value, path):
- # TODO : add reclass tools as a library to tcp-qa
- self.cmd_run('I@salt:master',
- "reclass-tools add-key {key} {value} {path}")
-
def create_env_k8s(self):
"""Creates static utils/env_k8s file"""
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index b99f408..4e9c46a 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -13,8 +13,12 @@
# under the License.
import pytest
+from devops.helpers import helpers
+from devops.helpers.proc_enums import ExitCodes
+
from tcp_tests import logger
from tcp_tests.managers import backup_restore_manager
+from tcp_tests import settings
LOG = logger.logger
@@ -22,10 +26,381 @@
class TestBackupRestoreMaster(object):
"""Test class for testing backup restore of master node"""
+ ENV_NAME = settings.ENV_NAME
+
+ BCKP_SERVER_DIR = "/srv/volumes/backup/backupninja"
+ RECLASS_DIR = "/srv/salt/reclass"
+ FILES_TO_DELETE = [
+ "nodes/_generated/log02.{}.local.yml".format(ENV_NAME),
+ "classes/cluster/{}/stacklight/telemetry.yml".format(ENV_NAME),
+ "classes/service/barbican",
+ "classes/system/prometheus/alertmanager/container.yml"
+ ]
+ FILES_TO_UPDATE = [
+ "nodes/_generated/mtr02.{}.local.yml".format(ENV_NAME),
+ "classes/cluster/{}/ceph/rgw.yml".format(ENV_NAME),
+ "classes/system/grafana/client/single.yml"
+ ]
+
+ BACKUP_JOB_NAME = 'backupninja_backup'
+ BACKUP_JOB_PARAMETERS = {
+ "ASK_CONFIRMATION": False
+ }
+ RESTORE_JOB_NAME = 'backupninja_restore'
+ JENKINS_START_TIMEOUT = 60
+ JENKINS_BUILD_TIMEOUT = 60 * 30
+
+ @pytest.fixture
+ def delete_backup(self, underlay_actions, salt_actions):
+ """Remove Salt master backup and/or restore flag files
+
+ If exists, remove existing backup(s) form backup server.
+ If exists, remove '/srv/salt/master-restored' and
+ '/srv/salt/minion-restored' flag files, which indicate that Salt master
+ backup restore procedure has already been executed.
+
+ Execute cleanup before a test (to prepare clean environment) and after
+ the test (to not affect any later activities on the environment).
+
+ :param underlay_actions: UnderlaySSHManager, tcp-qa SSH manager
+ instance
+ :param salt_actions: SaltManager, tcp-qa Salt manager instance
+ """
+ client = salt_actions.local(
+ "I@backupninja:client", "test.ping")['return'][0].keys()[0]
+ server = salt_actions.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ def cleanup(underlay_actions, server, client):
+ # Delete backups, if any, from backup server
+ path = "{}/{}".format(self.BCKP_SERVER_DIR, client)
+ underlay_actions.check_call(
+ "rm -rf {}".format(path), node_name=server, raise_on_err=False)
+
+ # Delete restore flag files from backup client, if exist
+ for f in ("minion-restored", "master-restored"):
+ underlay_actions.check_call(
+ "rm /srv/salt/{}".format(f),
+ node_name=client,
+ raise_on_err=False)
+
+ cleanup(underlay_actions, server, client)
+ yield
+ cleanup(underlay_actions, server, client)
+
+ def check_salt_master_backup(self, ssh, server, path, client):
+ """Check that data directories exist in backup on backup server
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param server: string, backup server node where backup is stored
+ :param path: string, path to backupninja inventory of backups on server
+ :param client: string, backup client node name, which indicates the
+ name of backup on backup server
+ """
+ for subdir in ("etc", "srv", "var"):
+ cmd = "test -d {}/{}/{}".format(path, client, subdir)
+ result = ssh.check_call(
+ cmd, node_name=server, raise_on_err=False)['exit_code']
+ assert result == ExitCodes.EX_OK, (
+ "'{}' data from Salt master is not in backup.".format(subdir))
+
+ def delete_reclass_files(self, ssh, client):
+ """Delete several reclass files
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param client: string, backup client node where files are deleted
+ """
+ files_to_delete = " ".join(self.FILES_TO_DELETE)
+ ssh.check_call(
+ "cd {}; rm {}".format(self.RECLASS_DIR, files_to_delete),
+ node_name=client,
+ raise_on_err=False)
+
+ def update_reclass_files(self, ssh, client):
+ """Update several reclass files
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param client: string, backup client node where files are updated
+ :return: dict, key-value pairs of files and their hashes before update
+ """
+ hashes = {}
+ for f in self.FILES_TO_UPDATE:
+ path = "{}/{}".format(self.RECLASS_DIR, f)
+ # Calculate hash of a file
+ hashes[f] = ssh.check_call(
+ "sha1sum {} | awk '{{print $1}}'".format(path),
+ node_name=client,
+ raise_on_err=False)['stdout']
+ # Update a file with a dummy string
+ ssh.check_call(
+ "echo '{}' >> {}".format("#" * 200, path),
+ node_name=client,
+ raise_on_err=False)
+ return hashes
+
+ def update_backup_schedule(self, reclass):
+ """Update backup schedule on backupninja client
+
+ :param reclass: ReclassManager, tcp-qa Reclass-tools manager
+ """
+ path = "cluster/*/infra/config/init.yml"
+ reclass.add_bool_key("parameters.backupninja.enabled", "True", path)
+ reclass.add_key(
+ "parameters.backupninja.client.backup_times.minute",
+ "\"'*/10'\"",
+ path)
+
+ def verify_restored_data(self, ssh, client, hashes):
+ """Verify restore of deleted and updated reclass files
+
+ :param ssh: UnderlaySSHManager, tcp-qa SSH manager instance
+ :param client: string, backup client node where files are updated
+ :param hashes: dict, key-value pairs of files and their hashes
+ before update
+ """
+ # Verify that deleted files are restored
+ for f in self.FILES_TO_DELETE:
+ path = "{}/{}".format(self.RECLASS_DIR, f)
+ result = ssh.check_call(
+ "test -f {}".format(path),
+ node_name=client,
+ raise_on_err=False)['exit_code']
+ assert result == ExitCodes.EX_OK, (
+ "'{}' data is not in restored on Salt master.".format(path))
+ # Verify that changed files are restored
+ for f in self.FILES_TO_UPDATE:
+ path = "{}/{}".format(self.RECLASS_DIR, f)
+ f_hash = ssh.check_call(
+ "sha1sum {} | awk '{{print $1}}'".format(path),
+ node_name=client,
+ raise_on_err=False)['stdout']
+ assert hashes[f] == f_hash, (
+ "'{}' data is not in restored on Salt master.".format(path))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.salt_master_manual_backup_restore
+ def test_salt_master_manual_backup_restore(
+ self, underlay_actions, salt_actions, show_step, delete_backup):
+ """Test manual backup restore of Salt master data
+
+ Scenario:
+ 1. Backup Salt master node
+ 2. Verify that Salt master backup is created on backupninja server
+ node
+ 3. Delete/change some reclass data
+ 4. Restore the backup
+ 5. Verify that Salt master data backup is restored
+ 6. Verify that minions are responding
+
+ Duration: ~ 3 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+
+ backup_client = salt.local(
+ "I@backupninja:client", "test.ping")['return'][0].keys()[0]
+ backup_server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Create backup by moving local files to the 'backupninja' server
+ show_step(1)
+ cmd = "backupninja -n --run /etc/backup.d/200.backup.rsync"
+ ssh.check_call(
+ cmd, node_name=backup_client, raise_on_err=False, timeout=60 * 4)
+
+ # Verify that backup is created and all pieces of data are rsynced
+ # to backupninja server
+ show_step(2)
+ self.check_salt_master_backup(
+ ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+
+ # Simulate loss/change of some reclass data
+ show_step(3)
+ self.delete_reclass_files(ssh, backup_client)
+ hashes = self.update_reclass_files(ssh, backup_client)
+
+ # Restore the backup
+ show_step(4)
+ ssh.check_call(
+ "salt-call state.sls salt.master.restore,salt.minion.restore",
+ node_name=backup_client,
+ raise_on_err=False,
+ timeout=60 * 4)
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(5)
+ self.verify_restored_data(ssh, backup_client, hashes)
+
+ # Ping minions
+ show_step(6)
+ salt.local('*', "test.ping", timeout=30)
+
+ @pytest.mark.grab_versions
+ @pytest.mark.salt_master_manual_backup_restore_pipeline
+ def test_salt_master_manual_backup_restore_pipeline(
+ self,
+ underlay_actions,
+ salt_actions,
+ drivetrain_actions,
+ show_step,
+ delete_backup):
+ """Test manual backup restore of Salt master data using DT pipeline
+
+ Scenario:
+ 1. Execute 'backupninja_backup' pipeline to backup Salt
+ master node
+ 2. Verify that Salt master backup is created on backupninja server
+ node
+ 3. Delete/change some reclass data
+ 4. Restore the backup
+ 5. Verify that Salt master data backup is restored
+ 6. Verify that minions are responding
+
+ Duration: ~ 3 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ dt = drivetrain_actions
+
+ backup_client = salt.local(
+ "I@backupninja:client", "test.ping")['return'][0].keys()[0]
+ backup_server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Execute 'backupninja_backup' pipeline to create a backup
+ show_step(1)
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.BACKUP_JOB_NAME,
+ job_parameters=self.BACKUP_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after creating Salt master backup. "
+ "Please check the build and executed stages.".format(
+ self.BACKUP_JOB_NAME, status)
+ )
+
+ # Verify that backup is created and all pieces of data are rsynced
+ # to backupninja server
+ show_step(2)
+ self.check_salt_master_backup(
+ ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+
+ # Simulate loss/change of some reclass data
+ show_step(3)
+ self.delete_reclass_files(ssh, backup_client)
+ hashes = self.update_reclass_files(ssh, backup_client)
+
+ # Restore the backup
+ show_step(4)
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.RESTORE_JOB_NAME,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
+ )
+ assert status == 'SUCCESS', (
+ "'{}' job run status is {} after restoring from Salt master "
+ "backup. Please check the build and executed stages.".format(
+ self.RESTORE_JOB_NAME, status)
+ )
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(5)
+ self.verify_restored_data(ssh, backup_client, hashes)
+
+ # Ping minions
+ show_step(6)
+ salt.local('*', "test.ping", timeout=30)
+
+ @pytest.mark.grab_versions
+ @pytest.mark.salt_master_scheduled_backup_restore
+ def test_salt_master_scheduled_backup_restore(
+ self,
+ underlay_actions,
+ salt_actions,
+ reclass_actions,
+ show_step,
+ delete_backup):
+ """Test scheduled backup restore of Salt master data
+
+ Scenario:
+ 1. Update Salt master backup schedule to run every 5 minutes
+ 2. Apply 'backupninja' state on the backupninja client node
+ 3. Wait until backup creation is triggered by schedule
+ 4. Wait until backup creation is finished
+ 5. Verify that Salt master backup is created on backupninja server
+ node
+ 6. Delete/change some reclass data
+ 7. Restore the backup
+ 8. Verify that Salt master data backup is restored
+ 9. Verify that minions are responding
+
+ Duration: ~ 3 min
+ """
+ salt = salt_actions
+ ssh = underlay_actions
+ reclass = reclass_actions
+
+ backup_client = salt.local(
+ "I@backupninja:client", "test.ping")['return'][0].keys()[0]
+ backup_server = salt.local(
+ "I@backupninja:server", "test.ping")['return'][0].keys()[0]
+
+ # Re-configure backup schedule
+ show_step(1)
+ self.update_backup_schedule(reclass)
+
+ # Apply 'backupninja' state on backupninja client node
+ show_step(2)
+ salt.enforce_state("I@backupninja:client", "backupninja")
+
+ # Wait until backup is triggered by schedule
+ show_step(3)
+ helpers.wait_pass(
+ lambda: ssh.check_call(
+ cmd="pgrep backupninja && echo OK", node_name=backup_client),
+ timeout=60 * 11,
+ interval=5)
+
+ # Wait until backup is finished
+ show_step(4)
+ ssh.check_call(
+ cmd="while pgrep backupninja > /dev/null; do sleep 2; done",
+ node_name=backup_client,
+ timeout=60 * 5)
+
+ # Verify that backup is created and all pieces of data are rsynced
+ # to backupninja server
+ show_step(5)
+ self.check_salt_master_backup(
+ ssh, backup_server, self.BCKP_SERVER_DIR, backup_client)
+
+ # Simulate loss/change of some reclass data
+ show_step(6)
+ self.delete_reclass_files(ssh, backup_client)
+ hashes = self.update_reclass_files(ssh, backup_client)
+
+ # Restore the backup
+ show_step(7)
+ ssh.check_call(
+ "salt-call state.sls salt.master.restore,salt.minion.restore",
+ node_name=backup_client,
+ raise_on_err=False,
+ timeout=60 * 4)
+
+ # Verify that all pieces of lost/changed data are restored
+ show_step(8)
+ self.verify_restored_data(ssh, backup_client, hashes)
+
+ # Ping minions
+ show_step(9)
+ salt.local('*', "test.ping", timeout=30)
+
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.backup_all
- def test_backup_cfg_backupninja_rsync(
+ def _test_backup_cfg_backupninja_rsync(
self, underlay, config, openstack_deployed,
salt_actions, show_step):
"""Test backup restore master node
diff --git a/tcp_tests/tests/system/test_backup_restore_galera.py b/tcp_tests/tests/system/test_backup_restore_galera.py
index 9309466..96ea98a 100644
--- a/tcp_tests/tests/system/test_backup_restore_galera.py
+++ b/tcp_tests/tests/system/test_backup_restore_galera.py
@@ -2,8 +2,6 @@
from tcp_tests import logger
from tcp_tests import settings
-from tcp_tests.utils import run_jenkins_job
-from tcp_tests.utils import get_jenkins_job_stages
LOG = logger.logger
@@ -16,29 +14,19 @@
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_galera_backup_restore
- def test_backup_restore_galera(self, salt_actions, show_step, _):
+ def test_backup_restore_galera(self, drivetrain_actions,
+ show_step, _):
"""Execute backup/restore for galera
Scenario:
- 1. Get CICD Jenkins access credentials from salt
- 2. Run job galera_backup_database
- 3. Run tests with cvp-sanity job
- 4. Run tests with cvp-tempest job
- 5. Run job galera_verify_restore
- 6. If jobs are passed then start tests with cvp-sanity job
- 7. Run tests with cvp-tempest job
+ 1. Run job galera_backup_database
+ 2. Run tests with cvp-sanity job
+ 3. Run tests with cvp-tempest job
+ 4. Run job galera_verify_restore
+ 5. If jobs are passed then start tests with cvp-sanity job
+ 6. Run tests with cvp-tempest job
"""
- salt = salt_actions
- jenkins_creds = salt.get_cluster_jenkins_creds()
-
- # ################### Login Jenkins on cid01 node ###################
- show_step(1)
-
- jenkins_url = jenkins_creds.get('url')
- jenkins_user = jenkins_creds.get('user')
- jenkins_pass = jenkins_creds.get('pass')
- jenkins_start_timeout = 60
- jenkins_build_timeout = 1800
+ dt = drivetrain_actions
# ################## Run backup job #########################
show_step(2)
@@ -46,28 +34,12 @@
job_parameters = {
'ASK_CONFIRMATION': False
}
- backup_galera_pipeline = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ backup_galera_pipeline = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
+ assert backup_galera_pipeline == 'SUCCESS'
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert backup_galera_pipeline == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
# ######################## Run CPV ###########################
show_step(3)
job_name = 'cvp-sanity'
@@ -86,57 +58,23 @@
'test_ceph_status', 'test_prometheus_alert_count',
'test_uncommited_changes')
}
- job_parameters = job_cvp_sanity_parameters
- run_cvp_sanity = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ run_cvp_sanity = dt.start_job_on_cid_jenkins(
job_name=job_name,
- job_parameters=job_parameters)
+ job_parameters=job_cvp_sanity_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
+ assert run_cvp_sanity == 'SUCCESS'
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert run_cvp_sanity == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
# ######################## Run Tempest ###########################
show_step(4)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
}
- run_cvp_tempest = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ run_cvp_tempest = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert run_cvp_tempest == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert run_cvp_tempest == 'SUCCESS'
# ######################## Run Restore ###########################
show_step(5)
job_name = 'galera_verify_restore'
@@ -144,79 +82,27 @@
'RESTORE_TYPE': 'ONLY_RESTORE',
'ASK_CONFIRMATION': False
}
- run_galera_verify_restore = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ run_galera_verify_restore = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert run_galera_verify_restore == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert run_galera_verify_restore == 'SUCCESS'
# ######################## Run CPV ###########################
show_step(6)
job_name = 'cvp-sanity'
- job_parameters = job_cvp_sanity_parameters
- run_cvp_sanity = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ run_cvp_sanity = dt.start_job_on_cid_jenkins(
job_name=job_name,
- job_parameters=job_parameters)
+ job_parameters=job_cvp_sanity_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert run_cvp_sanity == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert run_cvp_sanity == 'SUCCESS'
# ######################## Run Tempest ###########################
show_step(7)
job_name = 'cvp-tempest'
job_parameters = {
'TEMPEST_ENDPOINT_TYPE': 'internalURL'
}
- run_cvp_tempest = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ run_cvp_tempest = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert run_cvp_tempest == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert run_cvp_tempest == 'SUCCESS'
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
index 2493083..6745f80 100644
--- a/tcp_tests/tests/system/test_failover_ceph.py
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -14,10 +14,7 @@
import pytest
from devops.helpers import helpers
-
from tcp_tests import logger
-from tcp_tests.utils import get_jenkins_job_stages
-from tcp_tests.utils import run_jenkins_job
LOG = logger.logger
@@ -60,45 +57,12 @@
for node_name in node_names
}
- def run_jenkins_job(
- self, creds, name, parameters, start_timeout, build_timeout):
- """Execute a Jenkins job with provided parameters
-
- :param creds: dict, Jenkins url and user credentials
- :param name: string, Jenkins job to execute
- :param parameters: dict, parameters for Jenkins job
- :parameter start_timeout: int, timeout to wait until build is started
- :parameter build_timeout: int, timeout to wait until build is finished
- :return: tuple, Jenkins job build execution status, high level
- description of the build and verbose decription of executed job
- stages
- """
- jenkins_url, jenkins_user, jenkins_pass = (
- creds['url'], creds['user'], creds['pass'])
- build_status = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=start_timeout,
- build_timeout=build_timeout,
- verbose=False,
- job_name=name,
- job_parameters=parameters)
-
- description, stages = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=name,
- build_number='lastBuild')
-
- return build_status, description, stages
-
@pytest.mark.grab_versions
@pytest.mark.restart_osd_node
def test_restart_osd_node(
self,
salt_actions,
+ drivetrain_actions,
underlay_actions,
show_step):
"""Verify that Ceph OSD node is not affected by system restart
@@ -118,6 +82,7 @@
"""
salt = salt_actions
ssh = underlay_actions
+ dt = drivetrain_actions
# Find Ceph OSD nodes
show_step(1)
@@ -153,7 +118,7 @@
# Check Ceph cluster health after node restart
show_step(4)
- ceph_health = self.get_ceph_health(ssh, osd_hosts) # noqa
+ ceph_health = self.get_ceph_health(ssh, osd_hosts) # noqa
# FIXME: uncomment the check once PROD-31374 is fixed
# status = all(
# ["OK" in status for node, status in ceph_health.items()])
@@ -161,35 +126,31 @@
# Run Tempest smoke test suite
show_step(5)
- jenkins_creds = salt.get_cluster_jenkins_creds()
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.TEMPEST_JOB_NAME,
- self.TEMPEST_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.TEMPEST_JOB_NAME,
+ job_parameters=self.TEMPEST_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
+
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.TEMPEST_JOB_NAME, status)
)
# Run Sanity test
show_step(6)
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.SANITY_JOB_NAME,
- self.SANITY_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.SANITY_JOB_NAME,
+ job_parameters=self.SANITY_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.SANITY_JOB_NAME, status)
)
@pytest.mark.grab_versions
@@ -197,6 +158,7 @@
def test_restart_cmn_node(
self,
salt_actions,
+ drivetrain_actions,
underlay_actions,
show_step):
"""Verify that Ceph CMN node is not affected by system restart
@@ -216,6 +178,7 @@
"""
salt = salt_actions
ssh = underlay_actions
+ dt = drivetrain_actions
# Find Ceph CMN nodes
show_step(1)
@@ -259,35 +222,31 @@
# Run Tempest smoke test suite
show_step(5)
- jenkins_creds = salt.get_cluster_jenkins_creds()
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.TEMPEST_JOB_NAME,
- self.TEMPEST_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.TEMPEST_JOB_NAME,
+ job_parameters=self.TEMPEST_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
+
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.TEMPEST_JOB_NAME, status)
)
# Run Sanity test
show_step(6)
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.SANITY_JOB_NAME,
- self.SANITY_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.SANITY_JOB_NAME,
+ job_parameters=self.SANITY_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.SANITY_JOB_NAME, status)
)
@pytest.mark.grab_versions
@@ -295,6 +254,7 @@
def test_restart_rgw_node(
self,
salt_actions,
+ drivetrain_actions,
underlay_actions,
show_step):
"""Verify that Ceph RGW node is not affected by system restart
@@ -313,6 +273,7 @@
"""
salt = salt_actions
ssh = underlay_actions
+ dt = drivetrain_actions
# Find Ceph RGW nodes
show_step(1)
@@ -356,35 +317,31 @@
# Run Tempest smoke test suite
show_step(5)
- jenkins_creds = salt.get_cluster_jenkins_creds()
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.TEMPEST_JOB_NAME,
- self.TEMPEST_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.TEMPEST_JOB_NAME,
+ job_parameters=self.TEMPEST_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
+
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing Tempest smoke "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.TEMPEST_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.TEMPEST_JOB_NAME, status)
)
# Run Sanity test
show_step(6)
- status, description, stages = self.run_jenkins_job(
- jenkins_creds,
- self.SANITY_JOB_NAME,
- self.SANITY_JOB_PARAMETERS,
- self.JENKINS_START_TIMEOUT,
- self.JENKINS_BUILD_TIMEOUT
+ status = dt.start_job_on_cid_jenkins(
+ job_name=self.SANITY_JOB_NAME,
+ job_parameters=self.SANITY_JOB_PARAMETERS,
+ start_timeout=self.JENKINS_START_TIMEOUT,
+ build_timeout=self.JENKINS_BUILD_TIMEOUT
)
assert status == 'SUCCESS', (
"'{0}' job run status is {1} after executing selected sanity "
- "tests. Please check the build:\n{2}\n\nExecuted build "
- "stages:\n{3}".format(
- self.SANITY_JOB_NAME, status, description, '\n'.join(stages))
+ "tests".format(
+ self.SANITY_JOB_NAME, status)
)
# #######################################################################
diff --git a/tcp_tests/tests/system/test_update.py b/tcp_tests/tests/system/test_update.py
index 9796ade..1493fd8 100644
--- a/tcp_tests/tests/system/test_update.py
+++ b/tcp_tests/tests/system/test_update.py
@@ -2,8 +2,6 @@
from tcp_tests import logger
from tcp_tests import settings
-from tcp_tests.utils import run_jenkins_job
-from tcp_tests.utils import get_jenkins_job_stages
LOG = logger.logger
@@ -17,11 +15,12 @@
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_mcp_update
- def test_update_drivetrain(self, salt_actions, show_step, _):
+ def test_update_drivetrain(self, salt_actions, drivetrain_actions,
+ show_step, _):
"""Updating DriveTrain component to release/proposed/2019.2.0 version
Scenario:
- 1. Get CICD Jenkins access credentials from salt
+ 1. Add workaround for PROD-32751
2. Run job git-mirror-downstream-mk-pipelines
3. Run job git-mirror-downstream-pipeline-library
4. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain'
@@ -29,17 +28,11 @@
Duration: ~35 min
"""
salt = salt_actions
- jenkins_creds = salt.get_cluster_jenkins_creds()
+ dt = drivetrain_actions
- # #################### Login Jenkins on cid01 node ###################
+ # #################### Add workaround for PROD-32751 #################
show_step(1)
- jenkins_url = jenkins_creds.get('url')
- jenkins_user = jenkins_creds.get('user')
- jenkins_pass = jenkins_creds.get('pass')
- jenkins_start_timeout = 60
- jenkins_build_timeout = 1800
-
# FIXME: workaround for PROD-32751
salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \
git commit --allow-empty -m 'Cluster model update'")
@@ -50,28 +43,11 @@
job_parameters = {
'BRANCHES': 'release/proposed/2019.2.0'
}
- update_pipelines = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ update_pipelines = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert update_pipelines == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert update_pipelines == 'SUCCESS'
# ################### Downstream pipeline-library ####################
show_step(3)
@@ -79,66 +55,32 @@
job_parameters = {
'BRANCHES': 'release/proposed/2019.2.0'
}
- update_pipeline_library = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ update_pipeline_library = dt.start_job_on_cid_jenkins(
job_name=job_name,
job_parameters=job_parameters)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert update_pipeline_library == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert update_pipeline_library == 'SUCCESS'
# ################### Start 'Deploy - upgrade MCP Drivetrain' job #####
show_step(4)
- jenkins_build_timeout = 3600
job_name = 'upgrade-mcp-release'
job_parameters = {
'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
'TARGET_MCP_VERSION': '2019.2.0'
}
- update_drivetrain = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- start_timeout=jenkins_start_timeout,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ update_drivetrain = dt.start_job_on_cid_jenkins(
job_name=job_name,
- job_parameters=job_parameters)
+ job_parameters=job_parameters,
+ build_timeout=3600)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
-
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert update_drivetrain == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert update_drivetrain == 'SUCCESS'
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_mcp_update
def test_update_glusterfs(self, salt_actions, reclass_actions,
- show_step, _):
+ drivetrain_actions, show_step, _):
""" Upgrade GlusterFS
Scenario:
1. In infra/init.yml in Reclass, add the glusterfs_version parameter
@@ -150,10 +92,7 @@
"""
salt = salt_actions
reclass = reclass_actions
- jenkins_creds = salt.get_cluster_jenkins_creds()
- jenkins_url = jenkins_creds.get('url')
- jenkins_user = jenkins_creds.get('user')
- jenkins_pass = jenkins_creds.get('pass')
+ dt = drivetrain_actions
def has_only_similar(param_by_nodes):
"""
@@ -178,29 +117,13 @@
# ############## Start deploy-upgrade-galera job #####################
show_step(3)
- jenkins_build_timeout = 40 * 60
job_name = 'update-glusterfs'
- update_glusterfs = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- build_timeout=jenkins_build_timeout,
- verbose=False,
- job_name=job_name)
-
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
+ update_glusterfs = dt.start_job_on_cid_jenkins(
job_name=job_name,
- build_number='lastBuild')
+ build_timeout=40 * 60)
- LOG.info(description)
- LOG.info('\n'.join(stages))
-
- assert update_glusterfs == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ assert update_glusterfs == 'SUCCESS'
# ################ Check GlusterFS version for servers ##############
show_step(4)
@@ -223,7 +146,8 @@
@pytest.mark.grab_versions
@pytest.mark.parametrize("_", [settings.ENV_NAME])
@pytest.mark.run_mcp_update
- def test_update_galera(self, salt_actions, reclass_actions, show_step, _):
+ def test_update_galera(self, salt_actions, reclass_actions,
+ drivetrain_actions, show_step, _):
""" Upgrade Galera automatically
Scenario:
@@ -236,7 +160,7 @@
"""
salt = salt_actions
reclass = reclass_actions
- jenkins_creds = salt.get_cluster_jenkins_creds()
+ dt = drivetrain_actions
# ################### Enable pipeline #################################
show_step(1)
reclass.add_class(
@@ -262,33 +186,146 @@
# #################### Login Jenkins on cid01 node ###################
show_step(6)
- jenkins_url = jenkins_creds.get('url')
- jenkins_user = jenkins_creds.get('user')
- jenkins_pass = jenkins_creds.get('pass')
- jenkins_build_timeout = 40 * 60
job_name = 'deploy-upgrade-galera'
job_parameters = {
'INTERACTIVE': 'false'
}
- update_galera = run_jenkins_job.run_job(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- build_timeout=jenkins_build_timeout,
- verbose=False,
+ update_galera = dt.start_job_on_cid_jenkins(
job_name=job_name,
- job_parameters=job_parameters)
+ job_parameters=job_parameters,
+ build_timeout=40 * 60)
- (description, stages) = get_jenkins_job_stages.get_deployment_result(
- host=jenkins_url,
- username=jenkins_user,
- password=jenkins_pass,
- job_name=job_name,
- build_number='lastBuild')
+ assert update_galera == 'SUCCESS'
- LOG.info(description)
- LOG.info('\n'.join(stages))
+ @pytest.fixture
+ def disable_automatic_failover_neutron_for_test(self, salt_actions):
+ """
+ On each OpenStack controller node, modify the neutron.conf file
+ Restart the neutron-server service
+ """
+ def comment_line(node, file, word):
+ """
+ Adds '#' before the specific line in specific file
- assert update_galera == 'SUCCESS', "{0}\n{1}".format(
- description, '\n'.join(stages))
+ :param node: string, salt target of node where the file locates
+ :param file: string, full path to the file
+ :param word: string, the begin of line which should be commented
+ :return: None
+ """
+ salt_actions.cmd_run(node,
+ "sed -i 's/^{word}/#{word}/' {file}".
+ format(word=word,
+ file=file))
+
+ def add_line(node, file, line):
+ """
+ Appends line to the end of file
+
+ :param node: string, salt target of node where the file locates
+ :param file: string, full path to the file
+ :param line: string, line that should be added
+ :return: None
+ """
+ salt_actions.cmd_run(node, "echo {line} >> {file}".format(
+ line=line,
+ file=file))
+
+ neutron_conf = '/etc/neutron/neutron.conf'
+ neutron_server = "I@neutron:server"
+ # ######## Create backup for config file #######################
+ salt_actions.cmd_run(
+ neutron_server,
+ "cp -p {file} {file}.backup".format(file=neutron_conf))
+
+ # ## Change parameters in neutron.conf'
+ comment_line(neutron_server, neutron_conf,
+ "allow_automatic_l3agent_failover",)
+ comment_line(neutron_server, neutron_conf,
+ "allow_automatic_dhcp_failover")
+ add_line(neutron_server, neutron_conf,
+ "allow_automatic_dhcp_failover = false")
+ add_line(neutron_server, neutron_conf,
+ "allow_automatic_l3agent_failover = false")
+
+ # ## Apply changed config to the neutron-server service
+ salt_actions.cmd_run(neutron_server,
+ "service neutron-server restart")
+ # TODO: add check that neutron-server is up and running
+ yield True
+ # ## Revert file changes
+ salt_actions.cmd_run(
+ neutron_server,
+ "cp -p {file}.backup {file}".format(file=neutron_conf))
+ salt_actions.cmd_run(neutron_server,
+ "service neutron-server restart")
+
+ @pytest.fixture
+ def disable_neutron_agents_for_test(self, salt_actions):
+ """
+ Restart the neutron-server service
+ """
+ salt_actions.cmd_run("I@neutron:server", """
+ service neutron-dhcp-agent stop && \
+ service neutron-l3-agent stop && \
+ service neutron-metadata-agent stop && \
+ service neutron-openvswitch-agent stop
+ """)
+ yield True
+ # Revert file changes
+ salt_actions.cmd_run("I@neutron:server", """
+ service neutron-dhcp-agent start && \
+ service neutron-l3-agent start && \
+ service neutron-metadata-agent start && \
+ service neutron-openvswitch-agent start
+ """)
+ # TODO: add check that all services are UP and running
+
+ @pytest.mark.grab_versions
+ @pytest.mark.parametrize("_", [settings.ENV_NAME])
+ @pytest.mark.run_mcp_update
+ def test_update_rabbit(self, salt_actions, reclass_actions,
+ drivetrain_actions, show_step, _,
+ disable_automatic_failover_neutron_for_test,
+ disable_neutron_agents_for_test):
+ """ Updates RabbitMQ
+ Scenario:
+ 1. Include the RabbitMQ upgrade pipeline job to DriveTrain
+ 2. Add repositories with new RabbitMQ packages
+ 3. Start Deploy - upgrade RabbitMQ pipeline
+
+ Updating RabbitMq should be completed before the OpenStack updating
+ process starts
+ """
+ salt = salt_actions
+ reclass = reclass_actions
+ dt = drivetrain_actions
+
+ # ####### Include the RabbitMQ upgrade pipeline job to DriveTrain ####
+ show_step(1)
+ reclass.add_class(
+ "system.jenkins.client.job.deploy.update.upgrade_rabbitmq",
+ "cluster/*/cicd/control/leader.yml")
+ salt.enforce_state("I@jenkins:client", "jenkins.client")
+
+ reclass.add_bool_key("parameters._param.openstack_upgrade_enabled",
+ "True",
+ "cluster/*/infra/init.yml")
+ salt.run_state("I@rabbitmq:server", "saltutil.refresh_pillar")
+
+ # ########### Add repositories with new RabbitMQ packages ############
+ show_step(2)
+ salt.enforce_state("I@rabbitmq:server", "linux.system.repo")
+
+ # ########### Start Deploy - upgrade RabbitMQ pipeline ############
+ show_step(3)
+ job_parameters = {
+ 'INTERACTIVE': 'false'
+ }
+
+ update_rabbit = dt.start_job_on_cid_jenkins(
+ job_name='deploy-upgrade-rabbitmq',
+ job_parameters=job_parameters,
+ build_timeout=40 * 60
+ )
+ assert update_rabbit == 'SUCCESS'