Merge "Try to fix heat powered labs"
diff --git a/ceph-remove-osd.groovy b/ceph-remove-osd.groovy
new file mode 100644
index 0000000..b171855
--- /dev/null
+++ b/ceph-remove-osd.groovy
@@ -0,0 +1,107 @@
+/**
+ *
+ * Remove OSD from existing cluster
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *
+ *  HOST                        Host (minion id) to be removed
+ *  ADMIN_HOST                  Host (minion id) with admin keyring
+ *  CLUSTER_FLAGS               Comma separated list of tags to apply to cluster
+ *  WAIT_FOR_HEALTHY            Wait for cluster rebalance before stoping daemons
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+
+// configure global variables
+def saltMaster
+def flags = CLUSTER_FLAGS.tokenize(',')
+
+def runCephCommand(master, cmd) {
+    return salt.cmdRun(master, ADMIN_HOST, cmd)
+}
+
+node("python") {
+
+    // create connection to salt master
+    saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+    if (flags.size() > 0) {
+        stage('Set cluster flags') {
+            for (flag in flags) {
+                runCephCommand(saltMaster, 'ceph osd set ' + flag)
+            }
+        }
+    }
+
+    // get list of disk at the osd
+    def pillar_disks = salt.getPillar(saltMaster, HOST, 'ceph:osd:disk')['return'][0].values()[0]
+    def hostname = salt.getPillar(saltMaster, HOST, 'linux:system:name')['return'][0].values()[0]
+    def hostname_id = hostname.replaceAll('osd', '')
+    def osd_ids = []
+
+    for (i in pillar_disks.keySet()) {
+        osd_ids.add('osd.' + (hostname_id + i).toInteger())
+    }
+
+    // `ceph osd out <id> <id>`
+    stage('Set OSDs out') {
+            runCephCommand(saltMaster, 'ceph osd out ' + osd_ids.join(' '))
+    }
+
+    // wait for healthy cluster
+    if (common.validInputParam('WAIT_FOR_HEALTHY') && WAIT_FOR_HEALTHY.toBoolean()) {
+        stage('Waiting for healthy cluster') {
+            while (true) {
+                def health = runCephCommand(saltMaster, 'ceph health')['return'][0].values()[0]
+                if (health.contains('HEALTH OK')) {
+                    common.infoMsg('Cluster is healthy')
+                    break;
+                }
+                sleep(60)
+            }
+        }
+    }
+
+    // stop osd daemons
+    stage('Stop OSD daemons') {
+        for (i in osd_ids) {
+            salt.runSaltProcessStep(saltMaster, HOST, 'service.stop', ['ceph-osd@' + i.replaceAll('osd.', '')],  null, true)
+        }
+    }
+
+    // `ceph osd crush remove osd.2`
+    stage('Remove OSDs from CRUSH') {
+        for (i in osd_ids) {
+            runCephCommand(saltMaster, 'ceph osd crush remove ' + i)
+        }
+    }
+
+    // remove keyring `ceph auth del osd.3`
+    stage('Remove OSD keyrings from auth') {
+        for (i in osd_ids) {
+            runCephCommand(saltMaster, 'ceph auth del ' + i)
+        }
+    }
+
+    // remove osd `ceph osd rm osd.3`
+    stage('Remove OSDs') {
+        for (i in osd_ids) {
+            runCephCommand(saltMaster, 'ceph osd rm ' + i)
+        }
+    }
+
+    // remove cluster flags
+    if (flags.size() > 0) {
+        stage('Unset cluster flags') {
+            for (flag in flags) {
+                common.infoMsg('Removing flag ' + flag)
+                runCephCommand(saltMaster, 'ceph osd unset ' + flag)
+            }
+        }
+    }
+
+}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 51dabd9..b8ac3b0 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -240,6 +240,12 @@
             }
         }
 
+        // install ceph
+        if (common.checkContains('STACK_INSTALL', 'ceph')) {
+            orchestrate.installCephMon(saltMaster)
+            orchestrate.installCephOsd(saltMaster)
+        }
+
         // install k8s
         if (common.checkContains('STACK_INSTALL', 'k8s')) {
 
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 0c57300..f6090c0 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -146,7 +146,6 @@
             sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
             sh "wget -O user_data.sh ${user_data_script_url}"
 
-
             // load data from model
             def smc = [:]
             smc['SALT_MASTER_MINION_ID'] = "cfg.${clusterDomain}"
@@ -154,6 +153,8 @@
             smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
             smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
             smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+            smc['CICD_CONTROL_ADDRESS'] = templateContext['default_context']['cicd_control_address']
+            smc['INFRA_CONFIG_ADDRESS'] = templateContext['default_context']['infra_config_address']
 
             for (i in common.entries(smc)) {
                 sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" user_data.sh"
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 89afc6c..4b554b0 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -58,7 +58,7 @@
 
         // wait until supervisor-database service is up
         salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-
+        sleep(5)
         // performs restore
         salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
         salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
@@ -73,7 +73,7 @@
 
         // wait until contrail-status is up
         salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-
+        
         salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
         salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
     }
diff --git a/test-service.groovy b/test-service.groovy
index cb9265c..cf1dcc6 100644
--- a/test-service.groovy
+++ b/test-service.groovy
@@ -80,13 +80,13 @@
             if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
                 test.install_docker(saltMaster, TEST_TEMPEST_TARGET)
             }
+
             stage('Run OpenStack tests') {
                 test.runTempestTests(saltMaster, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
             }
 
-            stage('Copy Tempest results to config node') {
-                test.copyTempestResults(saltMaster, TEST_TEMPEST_TARGET)
-            }
+            writeFile(file: 'report.xml', text: salt.getFileContent(saltMaster, TEST_TEMPEST_TARGET, '/root/report.xml'))
+            junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor:  Double.parseDouble(TEST_JUNIT_RATIO))
         }
 
     } catch (Throwable e) {
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
new file mode 100644
index 0000000..6c25071
--- /dev/null
+++ b/validate-cloud.groovy
@@ -0,0 +1,61 @@
+/**
+ *
+ * Launch validation of the cloud
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *
+ *   TEST_IMAGE                  Docker image link
+ *   TARGET_NODE                 Salt target for tempest node
+ *   TEMPEST_TEST_SET            If not false, run tests matched to pattern only
+ *   RUN_TEMPEST_TESTS           If not false, run Tempest tests
+ *   RUN_RALLY_TESTS             If not false, run Rally tests
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+validate = new com.mirantis.mcp.Validate()
+
+def saltMaster
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+    try{
+        stage('Initialization') {
+            saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        stage('Configure') {
+            validate.installDocker(saltMaster, TARGET_NODE)
+            sh "mkdir -p ${artifacts_dir}"
+            validate.runContainerConfiguration(saltMaster, TEST_IMAGE, TARGET_NODE, artifacts_dir)
+        }
+
+        stage('Run Tempest tests') {
+            if (RUN_TEMPEST_TESTS.toBoolean() == true) {
+                validate.runTempestTests(saltMaster, TARGET_NODE, artifacts_dir, TEMPEST_TEST_SET)
+            } else {
+                common.infoMsg("Skipping Tempest tests")
+            }
+        }
+
+        stage('Run Rally tests') {
+            if (RUN_RALLY_TESTS.toBoolean() == true) {
+                validate.runRallyTests(saltMaster, TARGET_NODE, artifacts_dir)
+            } else {
+                common.infoMsg("Skipping Rally tests")
+            }
+        }
+        stage('Collect results') {
+            archiveArtifacts artifacts: "${artifacts_dir}/*"
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    } finally {
+        validate.runCleanup(saltMaster, TARGET_NODE, artifacts_dir)
+    }
+}
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index 6401789..303c282 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -74,8 +74,12 @@
         salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
 
         salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
-        sleep(15)
+        try {
+            salt.commandStatus(saltMaster, 'I@galera:slave', 'service mysql status', 'running')
+        } catch (Exception er) {
+            common.warningMsg('Either there are no galera slaves or something failed when starting mysql on galera slaves')
+        }
+        sleep(5)
         salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
-
     }
 }