| import pytest | 
 | import sys | 
 | import os | 
 |  | 
 | from tcp_tests import logger | 
 | from tcp_tests import settings | 
 |  | 
 | sys.path.append(os.getcwd()) | 
 | try: | 
 |     from tcp_tests.fixtures import config_fixtures | 
 |     from tcp_tests.managers import underlay_ssh_manager | 
 |     from tcp_tests.managers import saltmanager as salt_manager | 
 | except ImportError: | 
 |     print("ImportError: Run the application from the tcp-qa directory or " | 
 |           "set the PYTHONPATH environment variable to directory which contains" | 
 |           " ./tcp_tests") | 
 |     sys.exit(1) | 
 | LOG = logger.logger | 
 |  | 
 |  | 
 | def has_only_similar(values_by_nodes): | 
 |     """ | 
 |     :param values_by_nodes:  dict | 
 |     :return: bool, True if all items in the dict have similar values | 
 |     """ | 
 |     values = list(values_by_nodes.values()) | 
 |     return all(value == values[0] for value in values) | 
 |  | 
 |  | 
 | def get_control_plane_targets(): | 
 |     config = config_fixtures.config() | 
 |     underlay = underlay_ssh_manager.UnderlaySSHManager(config) | 
 |     saltmanager = salt_manager.SaltManager(config, underlay) | 
 |     targets = list() | 
 |     telemetry_exists = False | 
 |     barbican_exists = False | 
 |     try: | 
 |         targets += saltmanager.run_state( | 
 |             "I@keystone:server", 'test.ping')[0]['return'][0].keys() | 
 |         targets += saltmanager.run_state( | 
 |             "I@nginx:server and not I@salt:master", | 
 |             "test.ping")[0]['return'][0].keys() | 
 |         telemetry_exists = saltmanager.get_single_pillar( | 
 |             "I@salt:master", | 
 |             "_param:openstack_telemetry_hostname") | 
 |         barbican_exists = saltmanager.get_single_pillar( | 
 |             "I@salt:master", | 
 |             "_param:barbican_enabled") | 
 |     except BaseException as err: | 
 |         LOG.warning("Can't retrieve data from Salt. \ | 
 |             Maybe cluster is not deployed completely.\ | 
 |             Err: {}".format(err)) | 
 |  | 
 |     # check for Manila  existence | 
 |     # if saltmanager.get_single_pillar("I@salt:master", | 
 |     #                                  "_param:manila_service_protocol"): | 
 |     #     targets.append('share*') | 
 |  | 
 |     # check for Tenant Telemetry  existence | 
 |     if telemetry_exists: | 
 |         targets.append('mdb*') | 
 |  | 
 |     # check for Barbican existence | 
 |     if barbican_exists: | 
 |         targets.append('kmn*') | 
 |     return targets | 
 |  | 
 |  | 
 | @pytest.fixture(scope='class') | 
 | def update_to_tag(request): | 
 |     return request.config.getoption("--update-to-tag") | 
 |  | 
 |  | 
 | @pytest.fixture(scope='class') | 
 | def update_mysql_to_version(request): | 
 |     return request.config.getoption("--update-mysql-version") | 
 |  | 
 |  | 
 | @pytest.fixture(scope='class') | 
 | def switch_to_proposed_pipelines(reclass_actions, salt_actions, | 
 |                                  update_to_tag): | 
 |     if update_to_tag: | 
 |         return True | 
 |  | 
 |     reclass = reclass_actions | 
 |     proposed_repo = "http://mirror.mirantis.com/update/proposed/" | 
 |     repo_param = "parameters._param.linux_system_repo_update_url" | 
 |  | 
 |     proposed_pipeline_branch = "release/proposed/2019.2.0" | 
 |     pipeline_branch_param = "parameters._param.jenkins_pipelines_branch" | 
 |     infra_yml = "cluster/*/infra/init.yml" | 
 |  | 
 |     LOG.info("Check reclass has release/proposed/2019.2.0 branches") | 
 |     if reclass.get_key(pipeline_branch_param, | 
 |                        infra_yml) == proposed_pipeline_branch \ | 
 |             and reclass.get_key(repo_param, infra_yml) == proposed_repo: | 
 |         return True | 
 |  | 
 |     LOG.info("Switch to release/proposed/2019.2.0 branches") | 
 |     reclass.add_key(pipeline_branch_param, proposed_pipeline_branch, infra_yml) | 
 |  | 
 |     reclass.add_key(repo_param, proposed_repo, infra_yml) | 
 |     reclass.add_key(repo_param, proposed_repo, "cluster/*/openstack/init.yml") | 
 |     reclass.add_key(repo_param, proposed_repo, "cluster/*/stacklight/init.yml") | 
 |     reclass.add_key(repo_param, proposed_repo, "cluster/*/ceph/init.yml") | 
 |  | 
 |     salt_actions.run_state("*", "saltutil.refresh_pillar") | 
 |     salt_actions.enforce_state("*", "salt.minion") | 
 |     salt_actions.enforce_state("I@jenkins:client", "jenkins.client") | 
 |  | 
 |  | 
 | @pytest.fixture | 
 | def wa_for_galera_clustercheck_password_prod35705(reclass_actions, | 
 |                                                   salt_actions): | 
 |     tgt = "I@galera:master or I@galera:slave" | 
 |     if not salt_actions.get_pillar(tgt, | 
 |                                    "_param:galera_clustercheck_password")[0]: | 
 |         reclass_actions.add_key( | 
 |             "parameters._param.galera_clustercheck_password", | 
 |             "a"*32, | 
 |             "cluster/*/infra/secrets.yml") | 
 |         salt_actions.run_state(tgt, "saltutil.refresh_pillar") | 
 |         salt_actions.enforce_state(tgt, "galera") | 
 |         salt_actions.enforce_state(tgt, "haproxy") | 
 |         reclass_actions.commit( | 
 |           "[from TCP-QA] Add galera_clustercheck_password") | 
 |     else: | 
 |  | 
 |         LOG.info("Skipping WA for Galera Clustercheck Password") | 
 |  | 
 |  | 
 | @pytest.fixture | 
 | def wa_cve_2021_20288_global_id_reclaim(reclass_actions, | 
 |                                         salt_actions): | 
 |     tgt = "I@ceph:common" | 
 |     context_file = "cluster/*/ceph/common.yml" | 
 |     if not reclass_actions.check_existence( | 
 |             'parameters.ceph.common.config.mon.' + | 
 |             'auth_allow_insecure_global_id_reclaim'): | 
 |         if salt_actions.get_single_pillar( | 
 |                 "I@ceph:mgr and *01.*", | 
 |                 "_param.ceph_version") == 'luminous': | 
 |             LOG.info('Skip reclaim set') | 
 |             return | 
 |         reclass_actions.add_bool_key( | 
 |             'parameters.ceph.common.config.mon.' + | 
 |             'auth_allow_insecure_global_id_reclaim', "False", context_file) | 
 |         salt_actions.run_state(tgt, "state.apply", "ceph.common") | 
 |         salt_actions.cmd_run(tgt, "systemctl restart ceph-mon.target") | 
 |     else: | 
 |         LOG.info("Skipping WA ceph set auth_allow_insecure_global_id_reclaim") | 
 |  | 
 |  | 
 | @pytest.fixture | 
 | def wa_for_alerta_password_prod35958(reclass_actions, | 
 |                                      salt_actions): | 
 |  | 
 |     if not salt_actions.get_pillar("I@prometheus:alerta", | 
 |                                    "_param:alerta_admin_api_key_generated")[0]: | 
 |         reclass_actions.add_key( | 
 |             "parameters._param.alerta_admin_api_key_generated", | 
 |             "a"*32, | 
 |             "cluster/*/infra/secrets.yml") | 
 |         reclass_actions.add_key( | 
 |             "parameters._param.alerta_admin_key", | 
 |             "${_param:alerta_admin_api_key_generated}", | 
 |             "cluster/*/stacklight/init.yml") | 
 |         reclass_actions.commit("[from TCP-QA] Add alerta_admin_key") | 
 |         salt_actions.run_state( | 
 |             "I@prometheus:alerta or I@prometheus:alertmanager", | 
 |             "saltutil.refresh_pillar") | 
 |         salt_actions.enforce_state( | 
 |             "I@prometheus:alerta", "prometheus.alerta") | 
 |         salt_actions.enforce_state( | 
 |             "I@prometheus:alertmanager", "prometheus.alertmanager") | 
 |         salt_actions.enforce_state( | 
 |             "I@prometheus:alerta or I@prometheus:alertmanager", | 
 |             "docker.client") | 
 |     else: | 
 |         LOG.info("Skipping WA for Alerta API key") | 
 |  | 
 |  | 
 | @pytest.fixture | 
 | def wa_redis_server_version_3_prod36960(reclass_actions, salt_actions): | 
 |     version_pillar = "redis:server:version" | 
 |     tgt = "I@{}".format(version_pillar) | 
 |     pillar_file = "cluster/*/openstack/telemetry.yml" | 
 |     redis_target_version = "5.0" | 
 |  | 
 |     msg_nodes = salt_actions.get_minions_by_target(tgt) | 
 |     if not msg_nodes: | 
 |         LOG.info("dbs* nodes not found, skipping") | 
 |         return | 
 |     else: | 
 |         msg_node = msg_nodes[0] | 
 |  | 
 |     if str(salt_actions.get_single_pillar( | 
 |             msg_node, | 
 |             version_pillar)) == redis_target_version: | 
 |         LOG.info("Redis-server is already '{}', skipping".format( | 
 |             redis_target_version)) | 
 |         return | 
 |  | 
 |     LOG.info("Setting redis-server version to '{}'".format( | 
 |         redis_target_version)) | 
 |     reclass_actions.add_key( | 
 |         "parameters.{}".format(version_pillar.replace(":", ".")), | 
 |         redis_target_version, | 
 |         pillar_file) | 
 |     salt_actions.run_state(" or ".join(msg_nodes), | 
 |                            "saltutil.refresh_pillar") | 
 |  | 
 |  | 
 | @pytest.fixture(scope='class') | 
 | def enable_openstack_update(reclass_actions, salt_actions): | 
 |     param = "parameters._param.openstack_upgrade_enabled" | 
 |     context_file = "cluster/*/infra/init.yml" | 
 |  | 
 |     LOG.info("Enable openstack_upgrade_enabled in reclass") | 
 |     reclass_actions.add_bool_key(param, "True", context_file) | 
 |     salt_actions.run_state("*", "saltutil.refresh_pillar") | 
 |     yield True | 
 |     LOG.info("Disable openstack_upgrade_enabled in reclass") | 
 |     reclass_actions.add_bool_key(param, "False", context_file) | 
 |     salt_actions.run_state("*", "saltutil.refresh_pillar") | 
 |  | 
 |  | 
 | @pytest.fixture(scope='class') | 
 | def switch_to_staging_updates_mirantis_repo(update_to_tag, | 
 |                                             reclass_actions, | 
 |                                             salt_actions): | 
 |     version_param = "parameters._param.updates_mirantis_version" | 
 |     infra_yml = "cluster/*/infra/init.yml" | 
 |     current_version = reclass_actions.get_key(version_param, infra_yml) | 
 |  | 
 |     if update_to_tag is not None: | 
 |         reclass_actions.add_key(version_param, update_to_tag, infra_yml) | 
 |     if current_version == 'staging': | 
 |         LOG.info("updates.mirantis.com already set to 'staging', skipping") | 
 |         return | 
 |     if current_version is None: | 
 |         LOG.info("updates.mirantis.com is not enabled, " | 
 |                  "skipping /staging repo pillar") | 
 |         return | 
 |     if current_version is not None: | 
 |         LOG.info("updates.mirantis.com is other tag " | 
 |                  "skipping /staging repo pillar") | 
 |         salt_actions.run_state("*", "saltutil.refresh_pillar") | 
 |         return | 
 |  | 
 |     LOG.info("Switch to updates.mirantis.com/staging repo") | 
 |     reclass_actions.add_key(version_param, 'staging', infra_yml) | 
 |  | 
 |     salt_actions.run_state("*", "saltutil.refresh_pillar") | 
 |  | 
 |  | 
 | @pytest.mark.usefixtures("switch_to_proposed_pipelines", | 
 |                          "update_to_tag", | 
 |                          "wa_for_galera_clustercheck_password_prod35705", | 
 |                          "wa_for_alerta_password_prod35958", | 
 |                          "wa_redis_server_version_3_prod36960", | 
 |                          "switch_to_staging_updates_mirantis_repo") | 
 | class TestUpdateMcpCluster(object): | 
 |     """ | 
 |     Following the steps in | 
 |     https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update.html#minor-update | 
 |     """ | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_drivetrain(self, salt_actions, drivetrain_actions, | 
 |                                show_step, update_to_tag, _): | 
 |         """Updating DriveTrain component to release/proposed/2019.2.0 version | 
 |  | 
 |         Scenario: | 
 |             1. Add workaround for PROD-32751 | 
 |             2. Run job git-mirror-downstream-mk-pipelines | 
 |             3. Run job git-mirror-downstream-pipeline-library | 
 |             4. Run job pre-upgrade-verify | 
 |             5. If jobs are passed then start 'Deploy - upgrade MCP Drivetrain' | 
 |             6. Wait till salt-master completes all async jobs | 
 |  | 
 |         Duration: ~70 min | 
 |         """ | 
 |         salt = salt_actions | 
 |         dt = drivetrain_actions | 
 |  | 
 |         # #################### Add workaround for PROD-32751 ################# | 
 |         show_step(1) | 
 |  | 
 |         # FIXME: workaround for PROD-32751 | 
 |         salt.cmd_run("cfg01*", "cd /srv/salt/reclass; git add -u && \ | 
 |                         git commit --allow-empty -m 'Cluster model update'") | 
 |  | 
 |         # ################### Downstream mk-pipelines ######################### | 
 |         show_step(2) | 
 |         job_name = 'git-mirror-downstream-mk-pipelines' | 
 |         job_parameters = { | 
 |             'BRANCHES': '*' or | 
 |                         'release/proposed/2019.2.0' | 
 |         } | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name=job_name, | 
 |             job_parameters=job_parameters, | 
 |             verbose=True) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |         # ################### Downstream pipeline-library #################### | 
 |         show_step(3) | 
 |         job_name = 'git-mirror-downstream-pipeline-library' | 
 |         job_parameters = { | 
 |             'BRANCHES': '*' or | 
 |                         'release/proposed/2019.2.0' | 
 |         } | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name=job_name, | 
 |             job_parameters=job_parameters, | 
 |             verbose=True) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |         # ############### Start 'Pre-upgrade verify' job (since 2019.2.17)##### | 
 |         show_step(4) | 
 |         job_name = 'pre-upgrade-verify' | 
 |         job_parameters = {} | 
 |  | 
 |         if (dt.check_job_exists_on_jenkins(job_name)): | 
 |             LOG.info("Pre-upgrade verify job exists on Jenkins!") | 
 |             job_result, job_description = dt.start_job_on_jenkins( | 
 |                 job_name=job_name, | 
 |                 job_parameters=job_parameters, | 
 |                 verbose=True, | 
 |                 build_timeout=4 * 60 * 60) | 
 |  | 
 |             assert job_result == 'SUCCESS', job_description | 
 |  | 
 |         # ################### Start 'Deploy - upgrade MCP Drivetrain' job ##### | 
 |         show_step(5) | 
 |  | 
 |         job_name = 'upgrade-mcp-release' | 
 |         job_parameters = { | 
 |             'GIT_REFSPEC': update_to_tag or 'release/proposed/2019.2.0', | 
 |             'MK_PIPELINES_REFSPEC': | 
 |                 update_to_tag or 'release/proposed/2019.2.0', | 
 |             'TARGET_MCP_VERSION': update_to_tag or '2019.2.0', | 
 |             "DRIVE_TRAIN_PARAMS": { | 
 |                         "OS_DIST_UPGRADE": True, | 
 |                         "OS_UPGRADE": True, | 
 |                         "BATCH_SIZE": 10 | 
 |                         } | 
 |         } | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name=job_name, | 
 |             job_parameters=job_parameters, | 
 |             verbose=True, | 
 |             build_timeout=4 * 60 * 60) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |         # ############### Wait till salt-master completes all async jobs ##### | 
 |         show_step(5) | 
 |         salt.wait_jobs_completed(timeout=20*60) | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_glusterfs(self, salt_actions, reclass_actions, | 
 |                               drivetrain_actions, show_step, | 
 |                               update_mysql_to_version, _): | 
 |         """ Upgrade GlusterFS | 
 |         Scenario: | 
 |         1. In infra/init.yml in Reclass, add the glusterfs_version parameter | 
 |         2. Start linux.system.repo state | 
 |         3. Start "update-glusterfs" job | 
 |         4. Check version for GlusterFS servers | 
 |         5. Check version for GlusterFS clients | 
 |  | 
 |         """ | 
 |  | 
 |         salt = salt_actions | 
 |         reclass = reclass_actions | 
 |         dt = drivetrain_actions | 
 |  | 
 |         # ############## Change reclass ###################################### | 
 |         show_step(1) | 
 |         reclass.add_key( | 
 |             "parameters._param.linux_system_repo_mcp_glusterfs_version_number", | 
 |             "5", | 
 |             "cluster/*/infra/init.yml" | 
 |         ) | 
 |         # ################# Run linux.system state ########################### | 
 |         show_step(2) | 
 |         salt.enforce_state("*", "linux.system.repo") | 
 |  | 
 |         # ############## Start deploy-upgrade-galera job ##################### | 
 |         show_step(3) | 
 |         job_name = 'update-glusterfs' | 
 |  | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name=job_name, | 
 |             build_timeout=40 * 60) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |         # ################ Check GlusterFS version for servers ############## | 
 |         show_step(4) | 
 |         gluster_server_versions_by_nodes = salt.cmd_run( | 
 |             "I@glusterfs:server", | 
 |             "glusterd --version|head -n1")[0] | 
 |  | 
 |         assert has_only_similar(gluster_server_versions_by_nodes), \ | 
 |             gluster_server_versions_by_nodes | 
 |  | 
 |         # ################ Check GlusterFS version for clients ############## | 
 |         show_step(5) | 
 |         gluster_client_versions_by_nodes = salt.cmd_run( | 
 |             "I@glusterfs:client", | 
 |             "glusterfs --version|head -n1")[0] | 
 |  | 
 |         assert has_only_similar(gluster_client_versions_by_nodes), \ | 
 |             gluster_client_versions_by_nodes | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_galera(self, salt_actions, reclass_actions, | 
 |                            drivetrain_actions, show_step, | 
 |                            update_mysql_to_version, _): | 
 |         """ Upgrade Galera automatically | 
 |  | 
 |         Scenario: | 
 |             1. Include the Galera upgrade pipeline job to DriveTrain | 
 |             2. Add fix for mysql 5.7 | 
 |             3. Apply the jenkins.client state on the Jenkins nodes | 
 |             4. set the openstack_upgrade_enabled parameter to true | 
 |             5. Refresh pillars | 
 |             6. Add repositories with new Galera packages | 
 |             7. Start job from Jenkins | 
 |         """ | 
 |         salt = salt_actions | 
 |         reclass = reclass_actions | 
 |         dt = drivetrain_actions | 
 |         # ################### Enable pipeline ################################# | 
 |         show_step(1) | 
 |         reclass.add_class( | 
 |             "system.jenkins.client.job.deploy.update.upgrade_galera", | 
 |             "cluster/*/cicd/control/leader.yml") | 
 |         # ################### Fix for Mysql 5.7  ############################## | 
 |         show_step(2) | 
 |         if (update_mysql_to_version == '5.7'): | 
 |             mysql_class = "cluster.{env_name}.openstack." \ | 
 |                           "database.mysql_version"\ | 
 |                 .format(env_name=settings.ENV_NAME) | 
 |             reclass.add_class( | 
 |                 mysql_class, | 
 |                 "cluster/*/openstack/database/master.yml") | 
 |             reclass.add_class( | 
 |                 mysql_class, | 
 |                 "cluster/*/openstack/database/slave.yml") | 
 |             mysql_path = "cluster/{env_name}" \ | 
 |                          "/openstack/database/mysql_version.yml"\ | 
 |                 .format(env_name=settings.ENV_NAME) | 
 |             galera_mysql_version = '5.7' | 
 |             if update_mysql_to_version != '5.7': | 
 |                 galera_mysql_version = update_mysql_to_version | 
 |             mysql_version = "parameters:\n" \ | 
 |                             "  _param:\n" \ | 
 |                             "    galera_mysql_version: {}"\ | 
 |                 .format(galera_mysql_version) | 
 |             if not os.path.isfile("/srv/salt/reclass/classes/{path}".format( | 
 |                                       path=mysql_path)): | 
 |                 reclass.create_yaml_with_context(mysql_version, | 
 |                                                  mysql_path) | 
 |             else: | 
 |                 reclass.merge_context(mysql_version, | 
 |                                       mysql_path) | 
 |             reclass_actions.commit( | 
 |                 "[from TCP-QA] Change for mysql new version") | 
 |  | 
 |             show_step(3) | 
 |             salt.run_state("I@jenkins:client and not I@salt:master", | 
 |                            "saltutil.refresh_pillar") | 
 |             salt.enforce_state("I@jenkins:client and not I@salt:master", | 
 |                                "jenkins.client.job") | 
 |         else: | 
 |             LOG.info("Skip mysql upgrade fix") | 
 |         # ############### Enable automatic upgrade ############################ | 
 |         show_step(4) | 
 |         reclass.add_bool_key("parameters._param.openstack_upgrade_enabled", | 
 |                              "True", | 
 |                              "cluster/*/infra/init.yml") | 
 |  | 
 |         show_step(5) | 
 |         salt.run_state("dbs*", "saltutil.refresh_pillar") | 
 |  | 
 |         # ############# Add repositories with new Galera packages ####### | 
 |         show_step(6) | 
 |         salt.enforce_state("dbs*", "linux.system.repo") | 
 |         salt.enforce_state("cfg*", "salt.master") | 
 |  | 
 |         # #################### Login Jenkins on cid01 node ################### | 
 |         show_step(7) | 
 |  | 
 |         job_name = 'deploy-upgrade-galera' | 
 |         job_parameters = { | 
 |             'INTERACTIVE': 'false', | 
 |             'OS_DIST_UPGRADE': 'true', | 
 |             'OS_UPGRADE': 'true', | 
 |         } | 
 |         if (update_mysql_to_version == '5.7'): | 
 |             job_parameters.update({'UPDATE_TO_MYSQL57': 'true'}) | 
 |  | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name=job_name, | 
 |             job_parameters=job_parameters, | 
 |             build_timeout=60 * 60) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |     @pytest.fixture | 
 |     def disable_automatic_failover_neutron_for_test(self, salt_actions): | 
 |         """ | 
 |         On each OpenStack controller node, modify the neutron.conf file | 
 |         Restart the neutron-server service | 
 |         """ | 
 |  | 
 |         def comment_line(node, file_name, word): | 
 |             """ | 
 |             Adds '#' before the specific line in specific file | 
 |  | 
 |             :param node: string, salt target of node where the file locates | 
 |             :param file_name: string, full path to the file | 
 |             :param word: string, the begin of line which should be commented | 
 |             :return: None | 
 |             """ | 
 |             salt_actions.cmd_run(node, | 
 |                                  "sed -i 's/^{word}/#{word}/' {file}". | 
 |                                  format(word=word, | 
 |                                         file=file_name)) | 
 |  | 
 |         def add_line(node, file_name, line): | 
 |             """ | 
 |             Appends line to the end of file | 
 |  | 
 |             :param node: string, salt target of node where the file locates | 
 |             :param file_name: string, full path to the file | 
 |             :param line: string, line that should be added | 
 |             :return: None | 
 |             """ | 
 |             salt_actions.cmd_run(node, "echo {line} >> {file}".format( | 
 |                 line=line, | 
 |                 file=file_name)) | 
 |  | 
 |         neutron_conf = '/etc/neutron/neutron.conf' | 
 |         neutron_server = "I@neutron:server" | 
 |         # ########  Create backup for config file ####################### | 
 |         salt_actions.cmd_run( | 
 |             neutron_server, | 
 |             "cp -p {file} {file}.backup".format(file=neutron_conf)) | 
 |  | 
 |         # ## Change parameters in neutron.conf' | 
 |         comment_line(neutron_server, neutron_conf, | 
 |                      "allow_automatic_l3agent_failover", ) | 
 |         comment_line(neutron_server, neutron_conf, | 
 |                      "allow_automatic_dhcp_failover") | 
 |         add_line(neutron_server, neutron_conf, | 
 |                  "allow_automatic_dhcp_failover = false") | 
 |         add_line(neutron_server, neutron_conf, | 
 |                  "allow_automatic_l3agent_failover = false") | 
 |  | 
 |         # ## Apply changed config to the neutron-server service | 
 |         result = salt_actions.cmd_run(neutron_server, | 
 |                                       "service neutron-server restart") | 
 |         # TODO: add check that neutron-server is up and running | 
 |         yield result | 
 |         # ## Revert file changes | 
 |         salt_actions.cmd_run( | 
 |             neutron_server, | 
 |             "cp -p {file}.backup {file}".format(file=neutron_conf)) | 
 |         salt_actions.cmd_run(neutron_server, | 
 |                              "service neutron-server restart") | 
 |  | 
 |     @pytest.fixture | 
 |     def disable_neutron_agents_for_test(self, salt_actions): | 
 |         """ | 
 |         Disable the neutron services before the test and | 
 |         enable it after test | 
 |         """ | 
 |         result = salt_actions.cmd_run("I@neutron:server", """ | 
 |                 service neutron-dhcp-agent stop && \ | 
 |                 service neutron-l3-agent stop && \ | 
 |                 service neutron-metadata-agent stop && \ | 
 |                 service neutron-openvswitch-agent stop | 
 |                 """) | 
 |         yield result | 
 |         # | 
 |         salt_actions.cmd_run("I@neutron:server", """ | 
 |                 service neutron-dhcp-agent start && \ | 
 |                 service neutron-l3-agent start && \ | 
 |                 service neutron-metadata-agent start && \ | 
 |                 service neutron-openvswitch-agent start | 
 |                 """) | 
 |         # TODO: add check that all services are UP and running | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_rabbit(self, salt_actions, reclass_actions, | 
 |                            drivetrain_actions, show_step, _, | 
 |                            disable_automatic_failover_neutron_for_test, | 
 |                            disable_neutron_agents_for_test): | 
 |         """ Updates RabbitMQ | 
 |         Scenario: | 
 |             1. Include the RabbitMQ upgrade pipeline job to DriveTrain | 
 |             2. Add repositories with new RabbitMQ packages | 
 |             3. Start Deploy - upgrade RabbitMQ pipeline | 
 |  | 
 |         Updating RabbitMq should be completed before the OpenStack updating | 
 |         process starts | 
 |         """ | 
 |         salt = salt_actions | 
 |         reclass = reclass_actions | 
 |         dt = drivetrain_actions | 
 |  | 
 |         # ####### Include the RabbitMQ upgrade pipeline job to DriveTrain #### | 
 |         show_step(1) | 
 |         reclass.add_class( | 
 |             "system.jenkins.client.job.deploy.update.upgrade_rabbitmq", | 
 |             "cluster/*/cicd/control/leader.yml") | 
 |         salt.enforce_state("I@jenkins:client", "jenkins.client") | 
 |  | 
 |         reclass.add_bool_key("parameters._param.openstack_upgrade_enabled", | 
 |                              "True", | 
 |                              "cluster/*/infra/init.yml") | 
 |         salt.run_state("I@rabbitmq:server", "saltutil.refresh_pillar") | 
 |  | 
 |         # ########### Add repositories with new RabbitMQ packages ############ | 
 |         show_step(2) | 
 |         salt.enforce_state("I@rabbitmq:server", "linux.system.repo") | 
 |  | 
 |         # ########### Start Deploy - upgrade RabbitMQ pipeline  ############ | 
 |         show_step(3) | 
 |         job_parameters = { | 
 |             'INTERACTIVE': 'false', | 
 |             'OS_DIST_UPGRADE': 'true', | 
 |             'OS_UPGRADE': 'true' | 
 |         } | 
 |  | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name='deploy-upgrade-rabbitmq', | 
 |             job_parameters=job_parameters, | 
 |             build_timeout=50 * 60 | 
 |         ) | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_ceph(self, salt_actions, drivetrain_actions, | 
 |                          reclass_actions, show_step, _): | 
 |         """ Updates Ceph to the latest minor version | 
 |  | 
 |         Scenario: | 
 |             1. Add workaround for unhealth Ceph | 
 |             2. Start ceph-upgrade job with default parameters | 
 |             3. Check Ceph version for all nodes | 
 |             4. WA id_reclaim only for nautilus | 
 |  | 
 |         https://docs.mirantis.com/mcp/master/mcp-operations-guide/update-upgrade/minor-update/ceph-update.html | 
 |         """ | 
 |         salt = salt_actions | 
 |         reclass = reclass_actions | 
 |         dt = drivetrain_actions | 
 |  | 
 |         # ###################### Add workaround for unhealth Ceph ############ | 
 |         show_step(1) | 
 |         salt.cmd_run("I@ceph:radosgw", | 
 |                      "ceph config set 'mon pg warn max object skew' 20") | 
 |         # ###################### Start ceph-upgrade pipeline ################# | 
 |         show_step(2) | 
 |         job_parameters = {} | 
 |  | 
 |         job_result, job_description = dt.start_job_on_jenkins( | 
 |             job_name='ceph-update', | 
 |             job_parameters=job_parameters) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |         # ########## Verify Ceph version ##################################### | 
 |         show_step(3) | 
 |  | 
 |         ceph_version_by_nodes = salt.cmd_run( | 
 |             "I@ceph:* and not I@ceph:monitoring and not I@ceph:backup:server", | 
 |             "ceph version")[0] | 
 |  | 
 |         assert has_only_similar(ceph_version_by_nodes), ceph_version_by_nodes | 
 |  | 
 |         # ########## WA id_reclaim only for nautilus ######################### | 
 |         show_step(4) | 
 |  | 
 |         ceph_version = salt_actions.get_single_pillar( | 
 |             "I@salt:master", | 
 |             "_param:_param:ceph_version") | 
 |         if ceph_version == "nautilus": | 
 |             wa_cve_2021_20288_global_id_reclaim(reclass, salt) | 
 |         else: | 
 |             LOG.info("Skip WA for luminous") | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize("_", [settings.ENV_NAME]) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test_update_stacklight(self, _, drivetrain_actions): | 
 |         """ Update packages for Stacklight | 
 |         Scenario: | 
 |         1. Start Deploy - upgrade Stacklight job | 
 |         """ | 
 |         drivetrain = drivetrain_actions | 
 |  | 
 |         job_parameters = { | 
 |             "STAGE_UPGRADE_DOCKER_COMPONENTS": True, | 
 |             "STAGE_UPGRADE_ES_KIBANA": True, | 
 |             "STAGE_UPGRADE_SYSTEM_PART": True, | 
 |             'OS_DIST_UPGRADE': 'true', | 
 |             'OS_UPGRADE': 'true' | 
 |         } | 
 |         job_result, job_description = drivetrain.start_job_on_jenkins( | 
 |             job_name="stacklight-upgrade", | 
 |             job_parameters=job_parameters) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |  | 
 | @pytest.mark.usefixtures("switch_to_proposed_pipelines", | 
 |                          "enable_openstack_update") | 
 | class TestOpenstackUpdate(object): | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.run_mcp_update | 
 |     def test__pre_update__enable_pipeline_job(self, | 
 |                                               reclass_actions, salt_actions, | 
 |                                               show_step): | 
 |         """ Enable pipeline in the Drivetrain | 
 |  | 
 |         Scenario: | 
 |         1. Add deploy.update.* classes to the reclass | 
 |         2. Start jenkins.client salt state | 
 |  | 
 |         """ | 
 |         salt = salt_actions | 
 |         reclass = reclass_actions | 
 |         show_step(1) | 
 |         reclass.add_class("system.jenkins.client.job.deploy.update.upgrade", | 
 |                           "cluster/*/cicd/control/leader.yml") | 
 |  | 
 |         reclass.add_class( | 
 |             "system.jenkins.client.job.deploy.update.upgrade_ovs_gateway", | 
 |             "cluster/*/cicd/control/leader.yml") | 
 |  | 
 |         reclass.add_class( | 
 |             "system.jenkins.client.job.deploy.update.upgrade_compute", | 
 |             "cluster/*/cicd/control/leader.yml") | 
 |  | 
 |         show_step(2) | 
 |         r, errors = salt.enforce_state("I@jenkins:client", "jenkins.client") | 
 |         assert errors is None | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.parametrize('target', get_control_plane_targets()) | 
 |     @pytest.mark.run_mcp_update | 
 |     def test__update__control_plane(self, drivetrain_actions, target): | 
 |         """Start 'Deploy - upgrade control VMs' for specific node | 
 |         """ | 
 |         job_parameters = { | 
 |             "TARGET_SERVERS": target, | 
 |             "OS_DIST_UPGRADE": True, | 
 |             "UPGRADE_SALTSTACK": True, | 
 |             "OS_UPGRADE": True, | 
 |             "INTERACTIVE": False} | 
 |         job_result, job_description = drivetrain_actions.start_job_on_jenkins( | 
 |             job_name="deploy-upgrade-control", | 
 |             job_parameters=job_parameters) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.run_mcp_update | 
 |     def test__update__data_plane(self, drivetrain_actions, salt_actions): | 
 |         """Start 'Deploy - upgrade OVS gateway' | 
 |         """ | 
 |         if not salt_actions.cmd_run("gtw*", "test.ping")[0].keys(): | 
 |             pytest.skip("This deployment doesn't have gtw* nodes") | 
 |         job_parameters = { | 
 |             "OS_DIST_UPGRADE": True, | 
 |             "OS_UPGRADE": True, | 
 |             "INTERACTIVE": False} | 
 |         job_result, job_description = drivetrain_actions.start_job_on_jenkins( | 
 |             job_name="deploy-upgrade-ovs-gateway", | 
 |             job_parameters=job_parameters) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description | 
 |  | 
 |     @pytest.mark.grab_versions | 
 |     @pytest.mark.run_mcp_update | 
 |     def test__update__computes(self, drivetrain_actions): | 
 |         """Start 'Deploy - upgrade computes' | 
 |         """ | 
 |         job_parameters = { | 
 |             "OS_DIST_UPGRADE": True, | 
 |             "OS_UPGRADE": True, | 
 |             "INTERACTIVE": False} | 
 |         job_result, job_description = drivetrain_actions.start_job_on_jenkins( | 
 |             job_name="deploy-upgrade-compute", | 
 |             job_parameters=job_parameters) | 
 |  | 
 |         assert job_result == 'SUCCESS', job_description |