Merge "[CVP,master] Use cid node by default to run rally-based pipelines"
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..0fba6a0 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
             stage('Remove Ceph RGW') {
                 salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
             }
+
+            stage('Purge Ceph RGW pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
 
             // purge Ceph pkgs
             stage('Purge Ceph OSD pkgs') {
-                runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
             }
 
             stage('Remove OSD host from crushmap') {
@@ -294,6 +298,10 @@
                     salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
                 }
             }
+
+            stage('Purge Ceph MON pkgs') {
+                salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+            }
         }
 
         if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..1695e5b 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -129,10 +129,12 @@
 
             stage("Verify services for ${minion}") {
                 sleep(10)
-                runCephCommand(master, ADMIN_HOST, "ceph -s")
+                runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+                waitForHealthy(master)
             }
 
             stage('Ask for manual confirmation') {
+                runCephCommand(master, ADMIN_HOST, "ceph -s")
                 input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
             }
         }
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 4a231dc..80160ab 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -36,6 +36,10 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index e96a34c..e933984 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -42,6 +42,10 @@
                   TEMPEST_TARGET_NODE = "I@gerrit:client"
                 }
                 saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+                if (!os_version) {
+                    throw new Exception("Openstack is not found on this env. Exiting")
+                }
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
                 salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
                 keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 62f5226..60a064c 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -32,6 +32,10 @@
               TARGET_NODE = "I@gerrit:client"
             }
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+            if (!os_version) {
+                throw new Exception("Openstack is not found on this env. Exiting")
+            }
             salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
             salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
             keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-tempest.groovy b/cvp-tempest.groovy
index e8eb286..b0e12e9 100644
--- a/cvp-tempest.groovy
+++ b/cvp-tempest.groovy
@@ -45,6 +45,7 @@
         stage('Initialization') {
             deleteDir()
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+            container_name = "${env.JOB_NAME}"
             cluster_name=salt.getPillar(saltMaster, 'I@salt:master', '_param:cluster_name')['return'][0].values()[0]
             os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
             if (os_version == '') {
@@ -59,15 +60,8 @@
                 SERVICE_NODE = runtest_node.keySet()[0]
             }
             else {
-                common.infoMsg("Service node is not defined in reclass")
-                SERVICE_NODE = (env.SERVICE_NODE) ?: 'I@salt:master'
-                common.infoMsg("${SERVICE_NODE} will be used as Service node")
-                def classes_to_add = ["cluster.${cluster_name}.infra.runtest"]
-                fullnodename = salt.getMinions(saltMaster, SERVICE_NODE).get(0)
-                common.infoMsg("Full service node name ${fullnodename}")
-                result = salt.runSaltCommand(saltMaster, 'local', ['expression': SERVICE_NODE, 'type': 'compound'], 'reclass.node_update',
-                                             null, null, ['name': fullnodename, 'classes': classes_to_add])
-                salt.checkResult(result)
+                throw new Exception("Runtest config is not found in reclass. Please create runtest.yml and include it " +
+                                    "into reclass. Check documentation for more details")
             }
             common.infoMsg('Refreshing pillars on service node')
             salt.runSaltProcessStep(saltMaster, SERVICE_NODE, 'saltutil.refresh_pillar', [], null, VERBOSE)
@@ -129,7 +123,7 @@
         stage('Run Tempest tests') {
             mounts = ['/root/test/tempest_generated.conf': '/etc/tempest/tempest.conf']
             validate.runContainer(master: saltMaster, target: TARGET_NODE, dockerImageLink: TEST_IMAGE,
-                                  mounts: mounts)
+                                  mounts: mounts, name: container_name)
             report_prefix += 'tempest_'
             if (env.concurrency) {
                 args += ' -w ' + env.concurrency
@@ -144,7 +138,7 @@
                     report_prefix += 'full'
                 }
             }
-            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' cvp /bin/bash -c 'run-tempest'")
+            salt.cmdRun(saltMaster, TARGET_NODE, "docker exec -e ARGS=\'${args}\' ${container_name} /bin/bash -c 'run-tempest'")
         }
         stage('Collect results') {
             report_prefix += "_report_${env.BUILD_NUMBER}"
@@ -162,7 +156,7 @@
         throw e
     } finally {
         if (DEBUG_MODE == 'false') {
-            validate.runCleanup(saltMaster, TARGET_NODE)
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
         }
     }
 }
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 7cbdfa0..3313d48 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -43,6 +43,8 @@
             def skipProjectsVerify = ['mk/docker-jnlp-slave']
 
             stage("test") {
+                //notification about Start job
+                ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s -m \"'Build Started %s'\"", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, BUILD_URL))
                 //check Code-Review
                 if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
                     throw new Exception('Change don\'t have a CodeReview+1, reject gate')