Merge "Add update glusterfs pipelines"
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..0fba6a0 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
             stage('Remove Ceph RGW') {
                 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
             }
+
+            stage('Purge Ceph RGW pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
 
             // purge Ceph pkgs
             stage('Purge Ceph OSD pkgs') {
-                runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
             }
 
             stage('Remove OSD host from crushmap') {
@@ -294,6 +298,10 @@
                     salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
                 }
             }
+
+            stage('Purge Ceph MON pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..cc8a84d 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -16,6 +16,9 @@
  *  STAGE_UPGRADE_OSD               Set to True if Ceph osd nodes upgrade is desired
  *  STAGE_UPGRADE_RGW               Set to True if Ceph rgw nodes upgrade is desired
  *  STAGE_UPGRADE_CLIENT            Set to True if Ceph client nodes upgrade is desired (includes for example ctl/cmp nodes)
+ *  STAGE_FINALIZE                  Set to True if configs recommended for TARGET_RELEASE should be set after upgrade is done
+ *  BACKUP_ENABLED                  Select to copy the disks of Ceph VMs before upgrade and backup Ceph directories on OSD nodes
+ *  BACKUP_DIR                      Select the target dir to backup to when BACKUP_ENABLED
  *
  */
 
@@ -71,12 +74,12 @@
 
                 waitForHealthy(master)
                 try {
-                    salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+                    salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
                 } catch (Exception e) {
                     common.warningMsg('Backup already exists')
                 }
                 try {
-                    salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+                    salt.cmdRun(master, "${minionProvider}", "[ ! -f ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 ${BACKUP_DIR}/${minion_name}.${domain}.qcow2.bak")
                 } catch (Exception e) {
                     common.warningMsg('Backup already exists')
                 }
@@ -129,10 +132,12 @@
 
             stage("Verify services for ${minion}") {
                 sleep(10)
-                runCephCommand(master, ADMIN_HOST, "ceph -s")
+                runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+                waitForHealthy(master)
             }
 
             stage('Ask for manual confirmation') {
+                runCephCommand(master, ADMIN_HOST, "ceph -s")
                 input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
             }
         }
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 0c657a5..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -31,7 +31,15 @@
     try{
         stage('Initialization') {
             sh "rm -rf ${artifacts_dir}"
+            if (!TARGET_NODE) {
+              // This pillar will return us cid01
+              TARGET_NODE = "I@gerrit:client"
+            }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index b33cda6..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -37,7 +37,15 @@
         try {
             stage('Initialization') {
                 sh "rm -rf ${artifacts_dir}"
+                if (!TEMPEST_TARGET_NODE) {
+                  // This pillar will return us cid01
+                  TEMPEST_TARGET_NODE = "I@gerrit:client"
+                }
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+                if (!os_version) {
+                    throw new Exception("Openstack is not found on this env. Exiting")
+                }
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
                 keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 74c9a63..60a064c 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -27,7 +27,15 @@
     try{
         stage('Initialization') {
             sh "rm -rf ${artifacts_dir}"
+            if (!TARGET_NODE) {
+              // This pillar will return us cid01
+              TARGET_NODE = "I@gerrit:client"
+            }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index e8eb286..b0e12e9 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -45,6 +45,7 @@
         stage('Initialization') {
             deleteDir()
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            container_name = "${env.JOB_NAME}"
             cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
             os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
             if (os_version == '') {
@@ -59,15 +60,8 @@
                 SERVICE_NODE = runtest_node.keySet()[0]
             }
             else {
-                common.infoMsg("Service node is not defined in reclass")
-                SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
-                common.infoMsg("${SERVICE_NODE} will be used as Service node")
-                def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
-                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
-                common.infoMsg("Full service node name ${fullnodename}")
-                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                             null, null, ['name': fullnodename, 'classes': classes_to_add])
-                salt.checkResult(result)
+                throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                    "into reclass. Check documentation for more details")
             }
             common.infoMsg('Refreshing pillars on service node')
             salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
@@ -129,7 +123,7 @@
         stage('Run Tempest tests') {
             mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
             validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
-                                  mounts: mounts)
+                                  mounts: mounts, name: container_name)
             report_prefix += 'tempest_'
             if (env.concurrency) {
                 args += ' -w ' + env.concurrency
@@ -144,7 +138,7 @@
                     report_prefix += 'full'
                 }
             }
-            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
         }
         stage('Collect results') {
             report_prefix += "_report_${env.BUILD_NUMBER}"
@@ -162,7 +156,7 @@
         throw e
     } finally {
         if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE)
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
         }
     }
 }
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 7cbdfa0..3313d48 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -43,6 +43,8 @@
             def skipProjectsVerify = ['mk/docker-jnlp-slave']
 
             stage("test") {
+                //notification about Start job
+                ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s -m \"'Build Started %s'\"", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, BUILD_URL))
                 //check Code-Review
                 if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
                     throw new Exception('Change don\'t have a CodeReview+1, reject gate')
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 3783331..d4be6cc 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -130,6 +130,13 @@
         updateSaltFormulasDuringTest = false
     }
 
+    if (gitGuessedVersion == 'release/proposed/2019.2.0') {
+        // CFG node in 2019.2.X update has to be bootstrapped with update/proposed repository for salt formulas
+        context['cloudinit_master_config'] = context.get('cloudinit_master_config', false) ?: [:]
+        context['cloudinit_master_config']['MCP_SALT_REPO_UPDATES'] = context['cloudinit_master_config'].get('MCP_SALT_REPO_UPDATES', false) ?:
+                'deb [arch=amd64] http://mirror.mirantis.com/update/proposed/salt-formulas/xenial xenial main'
+    }
+
     common.infoMsg("Using context:\n" + context)
     print prettyPrint(toJson(context))
     return context
@@ -316,6 +323,11 @@
                 def smc = [:]
                 smc['SALT_MASTER_MINION_ID'] = "${context['salt_master_hostname']}.${context['cluster_domain']}"
                 smc['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+                if (context.get('cloudinit_master_config', false)) {
+                    context['cloudinit_master_config'].each { k, v ->
+                        smc[k] = v
+                    }
+                }
                 if (outdateGeneration) {
                     smc['DEPLOY_NETWORK_GW'] = context['deploy_network_gateway']
                     smc['DEPLOY_NETWORK_NETMASK'] = context['deploy_network_netmask']
@@ -350,7 +362,7 @@
                 }
 
                 for (i in common.entries(smc)) {
-                    sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+                    sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=\"${i[1]}\",' user_data"
                 }
 
                 // calculate netmask
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 85b93e9..b81ef97 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -51,25 +51,38 @@
     try {
         def retries_wait = 20
         def retries = 15
+
         def elasticsearch_vip
-        def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
-        if(!pillar['return'].isEmpty()) {
-            elasticsearch_vip = pillar['return'][0].values()[0]
+        def pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host'))
+        if(pillar) {
+            elasticsearch_vip = pillar
         } else {
             errorOccured = true
             common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
         }
-        pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+
+        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port'))
         def elasticsearch_port
-        if(!pillar['return'].isEmpty()) {
-            elasticsearch_port = pillar['return'][0].values()[0]
+        if(pillar) {
+            elasticsearch_port = pillar
         } else {
             errorOccured = true
             common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
         }
+
+        pillar = salt.getReturnValues(salt.getPillar(master, "I@elasticsearch:client ${extra_tgt}", 'elasticsearch:client:server:scheme'))
+        def elasticsearch_scheme
+        if(pillar) {
+            elasticsearch_scheme = pillar
+            common.infoMsg("[INFO] Using elasticsearch scheme: ${elasticsearch_scheme}")
+        } else {
+            common.infoMsg('[INFO] No pillar with Elasticsearch server scheme, using scheme: http')
+            elasticsearch_scheme = "http"
+        }
+
         common.retry(retries,retries_wait) {
             common.infoMsg('Waiting for Elasticsearch to become green..')
-            salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+            salt.cmdRun(master, "I@elasticsearch:client", "curl -sfk ${elasticsearch_scheme}://${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
         }
     } catch (Exception er) {
         errorOccured = true