Merge "[CVP,master] Fixing path to report and xpath for cvp-spt"
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 63d7b52..29f03fe 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -347,7 +347,7 @@
                 }
 
                 for (i in common.entries(smc)) {
-                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\"${i[1]}\",' user_data"
+                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\${${i[0]}:-\"${i[1]}\"},' user_data"
                 }
 
                 // calculate netmask
@@ -416,10 +416,31 @@
                 archiveArtifacts artifacts: "${context['cluster_name']}.tar.gz"
 
                 if (RequesterEmail != '' && !RequesterEmail.contains('example')) {
-                    emailext(to: RequesterEmail,
-                        attachmentsPattern: "output-${context['cluster_name']}/*",
-                        body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
-                        subject: "Your Salt model ${context['cluster_name']}")
+                    def mailSubject = "Your Salt model ${context['cluster_name']}"
+                    if (context.get('send_method') == 'gcs') {
+                        def gcs = new com.mirantis.mk.GoogleCloudStorage()
+                        def uploadIsos = [ "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso" ]
+                        if (context['local_repositories'] == 'True') {
+                            uploadIsos << "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+                        }
+                        // generate random hash to have uniq and unpredictable link to file
+                        def randHash = common.generateRandomHashString(64)
+                        def config = [
+                            'creds': context['gcs_creds'],
+                            'project': context['gcs_project'],
+                            'dest': "gs://${context['gcs_bucket']}/${randHash}",
+                            'sources': uploadIsos
+                        ]
+                        def fileURLs = gcs.uploadArtifactToGoogleStorageBucket(config).join(' ').replace('gs://', 'https://storage.googleapis.com/')
+                        emailext(to: RequesterEmail,
+                            body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and available to download via next URL: ${fileURLs} within 7 days.\nEnjoy!\n\nMirantis",
+                            subject: mailSubject)
+                    } else {
+                        emailext(to: RequesterEmail,
+                            attachmentsPattern: "output-${context['cluster_name']}/*",
+                            body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+                            subject: mailSubject)
+                    }
                 }
                 dir("output-${context['cluster_name']}") {
                     deleteDir()
diff --git a/git-mirror-pipeline.groovy b/git-mirror-pipeline.groovy
index fa49bbc..6f14866 100644
--- a/git-mirror-pipeline.groovy
+++ b/git-mirror-pipeline.groovy
@@ -5,6 +5,22 @@
     timeout(time: 12, unit: 'HOURS') {
         node() {
             try {
+                def sourceCreds = env.SOURCE_CREDENTIALS
+                if (sourceCreds && common.getCredentialsById(sourceCreds, 'password')) {
+                    withCredentials([
+                            [$class          : 'UsernamePasswordMultiBinding',
+                             credentialsId   : sourceCreds,
+                             passwordVariable: 'GIT_PASS',
+                             usernameVariable: 'GIT_USER']
+                    ]) {
+                        sh """
+                            set +x
+                            git config --global credential.${SOURCE_URL}.username \${GIT_USER}
+                            echo "echo \${GIT_PASS}" > askpass.sh && chmod +x askpass.sh
+                        """
+                        env.GIT_ASKPASS = "${env.WORKSPACE}/askpass.sh"
+                    }
+                }
                 if (BRANCHES == '*' || BRANCHES.contains('*')) {
                     branches = git.getBranchesForGitRepo(SOURCE_URL, BRANCHES)
                 } else {
@@ -18,7 +34,8 @@
                 dir('source') {
                     checkout changelog: true, poll: true,
                         scm: [$class    : 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [], userRemoteConfigs: [[credentialsId: CREDENTIALS_ID, url: SOURCE_URL]]]
+                              extensions: [[$class: 'CleanCheckout']], submoduleCfg: [],
+                              userRemoteConfigs: [[credentialsId: sourceCreds, url: SOURCE_URL]]]
                     git.mirrorGit(SOURCE_URL, TARGET_URL, CREDENTIALS_ID, branches, true)
                 }
             } catch (Throwable e) {
@@ -26,6 +43,9 @@
                 currentBuild.result = 'FAILURE'
                 currentBuild.description = currentBuild.description ? e.message + '' + currentBuild.description : e.message
                 throw e
+            } finally {
+                sh "git config --global --unset credential.${SOURCE_URL}.username || true"
+                deleteDir()
             }
         }
     }
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index f2dd78c..4ec98da 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -173,6 +173,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index 7458a27..ef6a527 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -158,6 +158,9 @@
         if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
           debian.osUpgradeNode(env, target, upgrade_mode, false)
         }
+        // Workaround for PROD-31413, install python-tornado from latest release if available and
+        // restart minion to apply new code.
+        salt.upgradePackageAndRestartSaltMinion(env, target, 'python-tornado')
       }
 
       common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index b585e7e..fb1259f 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -7,11 +7,20 @@
  *
 **/
 
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+python = new com.mirantis.mk.Python()
 
 def pepperEnv = "pepperEnv"
+
+def getValueForPillarKey(pepperEnv, target, pillarKey) {
+    def out = salt.getReturnValues(salt.getPillar(pepperEnv, target, pillarKey))
+    if (out == '') {
+        throw new Exception("Cannot get value for ${pillarKey} key on ${target} target")
+    }
+    return out.toString()
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
 
@@ -28,54 +37,71 @@
             }
         }
 
-        stage('Backup') {
-            salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'bash /usr/local/bin/cassandra-backup-runner-call.sh')
-        }
-
         stage('Restore') {
+            // stop neutron-server to prevent CRUD api calls to contrail-api service
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+            } catch (Exception er) {
+                common.warningMsg('neutron-server service already stopped')
+            }
             // get opencontrail version
-            def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
-            def contrailVersion = _pillar['return'][0].values()[0]
-            common.infoMsg("Contrail version is ${contrailVersion}")
-            if (contrailVersion >= 4) {
-                common.infoMsg("There will be steps for OC4.0 restore")
+            def contrailVersion = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "_param:opencontrail_version")
+            def configDbIp = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:host")
+            def configDbPort = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary", "opencontrail:database:bind:port_configdb")
+            common.infoMsg("OpenContrail version is ${contrailVersion}")
+            if (contrailVersion.startsWith('4')) {
+                controllerImage = getValueForPillarKey(pepperEnv, "I@opencontrail:control:role:primary",
+                        "docker:client:compose:opencontrail:service:controller:container_name")
+                common.infoMsg("Applying db restore procedure for OpenContrail 4.X version")
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+                    common.infoMsg("Stop contrail control plane containers")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose down')
                 } catch (Exception err) {
-                    common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during contrail containers shutdown: ' + err.getMessage())
+                    throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+                    common.infoMsg("Cleanup cassandra data")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'for f in $(ls /var/lib/configdb/); do rm -r /var/lib/configdb/$f; done')
                 } catch (Exception err) {
-                    common.warningMsg('cassandra data already removed? ' + err.getMessage())
+                    common.errorMsg('Cannot cleanup cassandra data on control nodes: ' + err.getMessage())
+                    throw err
                 }
                 try {
-                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+                    common.infoMsg("Start cassandra db on I@cassandra:backup:client node")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
                 } catch (Exception err) {
-                    common.warningMsg('contrail-database already started? ' + err.getMessage())
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@cassandra:backup:client node: ' + err.getMessage())
+                    throw err
                 }
-                // remove restore-already-happenned file if any is present
+                // wait for cassandra to be online
+                common.retry(6, 20){
+                    common.infoMsg("Trying to connect to casandra db on I@cassandra:backup:client node ...")
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "nc -v -z -w2 ${configDbIp} ${configDbPort}")
+                }
+                // remove restore-already-happened file if any is present
                 try {
-                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm  /var/backups/cassandra/dbrestored')
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
                 } catch (Exception err) {
                     common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
                 }
-                // perform actual backup
                 salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
-                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                sleep(5)
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
-                // the lovely wait-60-seconds mantra before restarting supervisor-database service
-                sleep(60)
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during cassandra db startup on I@opencontrail:control and not I@cassandra:backup:client nodes: ' + err.getMessage())
+                    throw err
+                }
                 // another mantra, wait till all services are up
                 sleep(60)
-            } else {
                 try {
-                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
-                } catch (Exception er) {
-                    common.warningMsg('neutron-server service already stopped')
+                    common.infoMsg("Start analytics containers node")
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:collector', 'cd /etc/docker/compose/opencontrail/; docker-compose up -d')
+                } catch (Exception err) {
+                    common.errorMsg('An error has been occurred during analytics containers startup: ' + err.getMessage())
+                    throw err
                 }
+            } else {
                 try {
                     salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
                 } catch (Exception er) {
@@ -104,8 +130,7 @@
                     common.warningMsg('Directory already empty')
                 }
 
-                _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-                def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+                def backupDir = getValueForPillarKey(pepperEnv, "I@cassandra:backup:client", "cassandra:backup:backup_dir")
                 common.infoMsg("Backup directory is ${backupDir}")
                 salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
 
@@ -127,7 +152,6 @@
                 sleep(5)
 
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
 
                 // wait until contrail-status is up
                 salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
@@ -135,11 +159,12 @@
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
                 salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
             }
+
+            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
         }
 
         stage('Opencontrail controllers health check') {
-            common.retry(3, 20){
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
+            common.retry(9, 20){
                 salt.enforceState(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'opencontrail.upgrade.verify', true, true)
             }
         }
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 9eef811..7554530 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -49,8 +49,8 @@
 def verify_es_is_green(master) {
     common.infoMsg('Verify that the Elasticsearch cluster status is green')
     try {
-        def retries_wait = 20
-        def retries = 15
+        def retries_wait = 120
+        def retries = 60
 
         def elasticsearch_vip
         def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
diff --git a/test-model-generator.groovy b/test-model-generator.groovy
index 144f760..8c08493 100644
--- a/test-model-generator.groovy
+++ b/test-model-generator.groovy
@@ -47,6 +47,15 @@
         sh "mkdir -p reports ${apiProject} ${uiProject}"
         def testImage = docker.image(cvpImageName)
         def testImageOptions = "-u root:root --network=host -v ${env.WORKSPACE}/reports:/var/lib/qa_reports --entrypoint=''"
+        withCredentials([
+          [$class          : 'UsernamePasswordMultiBinding',
+          credentialsId   : 'scale-ci',
+          passwordVariable: 'JENKINS_PASSWORD',
+          usernameVariable: 'JENKINS_USER']
+          ]) {
+            env.JENKINS_USER = JENKINS_USER
+            env.JENKINS_PASSWORD = JENKINS_PASSWORD
+        }
         try {
             stage("checkout") {
                 if (event) {
@@ -140,7 +149,7 @@
 
                 dir(apiProject) {
                     python.runVirtualenvCommand("${env.WORKSPACE}/venv",
-                            "export IMAGE=${apiImage.id}; ./bootstrap_env.sh up")
+                            "export IMAGE=${apiImage.id}; export DOCKER_COMPOSE=docker-compose-test.yml; ./bootstrap_env.sh up")
                     common.retry(5, 20) {
                         sh 'curl -v http://127.0.0.1:8001/api/v1 > /dev/null'
                     }
@@ -161,7 +170,7 @@
                         export TEST_PASSWORD=default
                         export TEST_MODELD_URL=127.0.0.1
                         export TEST_MODELD_PORT=3000
-                        export TEST_TIMEOUT=30
+                        export TEST_TIMEOUT=15
                         cd /var/lib/trymcp-tests
                         pytest ${component}
                     """
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 161029e..3a55011 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -288,6 +288,10 @@
             }
             python.setupPepperVirtualenv(venvPepper, saltMastURL, saltMastCreds)
             def minions = salt.getMinions(venvPepper, '*')
+            def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
+            if (cluster_name == '' || cluster_name == 'null' || cluster_name == null) {
+                error('Pillar data is broken for Salt master node! Please check it manually and re-run pipeline.')
+            }
 
             stage('Update Reclass and Salt-Formulas') {
                 common.infoMsg('Perform: Full salt sync')
@@ -298,7 +302,6 @@
                 common.infoMsg('Perform: archiveReclassInventory before upgrade')
                 archiveReclassInventory(inventoryBeforeFilename)
 
-                def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', '_param:cluster_name').get('return')[0].values()[0]
                 try {
                     salt.cmdRun(venvPepper, 'I@salt:master', 'cd /srv/salt/reclass/ && git status && git diff-index --quiet HEAD --')
                 }
@@ -327,6 +330,27 @@
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.updates' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.updates/system.linux.system.repo.mcp.apt_mirantis.update/g'")
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
                         "grep -r --exclude-dir=aptly -l 'system.linux.system.repo.mcp.extra' * | xargs --no-run-if-empty sed -i 's/system.linux.system.repo.mcp.extra/system.linux.system.repo.mcp.apt_mirantis.extra/g'")
+
+                    // Switch Jenkins/Gerrit to use LDAP SSL/TLS
+                    def gerritldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'gerrit_ldap_server: .*' * | grep -Po 'gerrit_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (gerritldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! gerritldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'gerrit_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|gerrit_ldap_server: .*|gerrit_ldap_server: \"ldaps://${gerritldapURI}\"|g'")
+                    }
+                    def jenkinsldapURI = salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                        "grep -r --exclude-dir=aptly 'jenkins_security_ldap_server: .*' * | grep -Po 'jenkins_security_ldap_server: \\K.*' | tr -d '\"'", true, null, false).get('return')[0].values()[0].replaceAll('Salt command execution success', '').trim()
+                    if (jenkinsldapURI.startsWith('ldap://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|ldap://|ldaps://|g'")
+                    } else if (! jenkinsldapURI.startsWith('ldaps://')) {
+                        salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+                            "grep -r --exclude-dir=aptly -l 'jenkins_security_ldap_server: .*' * | xargs --no-run-if-empty sed -i 's|jenkins_security_ldap_server: .*|jenkins_security_ldap_server: \"ldaps://${jenkinsldapURI}\"|g'")
+                    }
+
                     salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout ${reclassSystemBranch}")
                     // Add kubernetes-extra repo
                     if (salt.testTarget(venvPepper, "I@kubernetes:master")) {
@@ -487,7 +511,9 @@
                 common.infoMsg('Perform: updating openssh')
                 salt.enforceState(venvPepper, "I@linux:system", 'openssh', true)
 
-                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
+                // Apply changes for HaProxy on CI/CD nodes
+                salt.enforceState(venvPepper, 'I@keepalived:cluster:instance:cicd_control_vip and I@haproxy:proxy', 'haproxy.proxy', true)
+
                 salt.cmdRun(venvPepper, "I@salt:master", "salt -C 'I@jenkins:client and I@docker:client and not I@salt:master' state.sls docker.client --async")
 
                 sleep(180)
@@ -502,6 +528,8 @@
                 catch (Exception ex) {
                     error("Docker containers for CI/CD services are having troubles with starting.")
                 }
+
+                salt.enforceState(venvPepper, 'I@jenkins:client and not I@salt:master', 'jenkins.client', true)
             }
         }
         catch (Throwable e) {