Merge "[CVP] Fix DEBUG_MODE for cvp-tempest" into release/proposed/2019.2.0
diff --git a/galera-cluster-verify-restore.groovy b/galera-cluster-verify-restore.groovy
index d75e3ea..e11a547 100644
--- a/galera-cluster-verify-restore.groovy
+++ b/galera-cluster-verify-restore.groovy
@@ -7,6 +7,7 @@
  *   ASK_CONFIRMATION           Ask confirmation for restore
  *   VERIFICATION_RETRIES       Number of restries to verify the restoration.
  *   CHECK_TIME_SYNC            Set to true to check time synchronization accross selected nodes.
+ *   RESTORE_TYPE               Sets restoration method
  *
 **/
 
@@ -16,6 +17,9 @@
 def python = new com.mirantis.mk.Python()
 def pepperEnv = "pepperEnv"
 def resultCode = 99
+def restoreType = env.RESTORE_TYPE
+def runRestoreDb = false
+def runBackupDb = false
 
 askConfirmation = (env.getProperty('ASK_CONFIRMATION') ?: true).toBoolean()
 checkTimeSync = (env.getProperty('CHECK_TIME_SYNC') ?: true).toBoolean()
@@ -25,31 +29,42 @@
 } else {
     verificationRetries = 5
 }
+if (restoreType.equals("BACKUP_AND_RESTORE") || restoreType.equals("ONLY_RESTORE")) {
+    runRestoreDb = true
+}
+if (restoreType.equals("BACKUP_AND_RESTORE")) {
+    runBackupDb = true
+}
 
 timeout(time: 12, unit: 'HOURS') {
     node() {
         stage('Setup virtualenv for Pepper') {
             python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
-        stage('Verify status')
+        stage('Verify status') {
             resultCode = galera.verifyGaleraStatus(pepperEnv, false, checkTimeSync)
-        stage('Restore') {
             if (resultCode == 128) {
                 common.errorMsg("Unable to connect to Galera Master. Trying slaves...")
                 resultCode = galera.verifyGaleraStatus(pepperEnv, true, checkTimeSync)
                 if (resultCode == 129) {
-                    common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification and restoration.")
+                    common.errorMsg("Unable to obtain Galera slave minions list". "Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
                     currentBuild.result = "FAILURE"
                     return
                 } else if (resultCode == 130) {
-                    common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification and restoration.")
+                    common.errorMsg("Neither master or slaves are reachable. Without fixing this issue, pipeline cannot continue in verification, backup and restoration.")
                     currentBuild.result = "FAILURE"
                     return
                 }
             }
             if (resultCode == 131) {
-                common.errorMsg("Time desynced - Click proceed when the issue is fixed or abort.")
+                common.errorMsg("Time desynced - Please fix this issue and rerun the pipeline.")
                 currentBuild.result = "FAILURE"
+                return
+            }
+            if (resultCode == 140 || resultCode == 141) {
+                common.errorMsg("Disk utilization check failed - Please fix this issue and rerun the pipeline.")
+                currentBuild.result = "FAILURE"
+                return
             }
             if (resultCode == 1) {
                 if(askConfirmation){
@@ -59,23 +74,37 @@
                 }
             } else if (resultCode > 1) {
                 if(askConfirmation){
-                    common.warningMsg("There's something wrong with the cluster, do you want to run a restore?")
+                    common.warningMsg("There's something wrong with the cluster, do you want to continue with backup and/or restore?")
                 } else {
-                    common.warningMsg("There's something wrong with the cluster, try to restore.")
+                    common.warningMsg("There's something wrong with the cluster, try to backup and/or restore.")
                 }
             } else {
                 if(askConfirmation){
-                  common.warningMsg("There seems to be everything alright with the cluster, do you still want to run a restore?")
+                  common.warningMsg("There seems to be everything alright with the cluster, do you still want to continue with backup and/or restore?")
                 } else {
-                  common.warningMsg("There seems to be everything alright with the cluster, do nothing")
+                  common.warningMsg("There seems to be everything alright with the cluster, no backup and no restoration will be done.")
+                  currentBuild.result = "SUCCESS"
+                  return
                 }
             }
+        }
+        if (runBackupDb) {
+            stage('Backup') {
+                deployBuild = build( job: "galera-database-backup-pipeline", parameters: [
+                    [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: SALT_MASTER_URL],
+                    [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+                    [$class: 'StringParameterValue', name: 'OVERRIDE_BACKUP_NODE', value: "none"],
+                    ]
+                )
+            }
+        }
+        stage('Restore') {
             if(askConfirmation){
               input message: "Are you sure you want to run a restore? Click to confirm"
             }
             try {
                 if((!askConfirmation && resultCode > 0) || askConfirmation){
-                  galera.restoreGaleraDb(pepperEnv)
+                  galera.restoreGaleraCluster(pepperEnv, runRestoreDb)
                 }
             } catch (Exception e) {
                 common.errorMsg("Restoration process has failed.")
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 4ec98da..5929390 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -32,6 +32,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -153,6 +155,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
index ef6a527..e768564 100644
--- a/openstack-data-upgrade.groovy
+++ b/openstack-data-upgrade.groovy
@@ -31,6 +31,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify that compute/neutron agents on hosts are up.
  * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
@@ -138,6 +140,8 @@
     for (target in targetNodes){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
 
diff --git a/openstack-rabbitmq-upgrade.groovy b/openstack-rabbitmq-upgrade.groovy
index aabdafc..bc252da 100644
--- a/openstack-rabbitmq-upgrade.groovy
+++ b/openstack-rabbitmq-upgrade.groovy
@@ -29,6 +29,8 @@
  * No service downtime
  * No workload downtime''',
     'Launched actions': '''
+ * Refresh pillars on the target nodes.
+ * Apply the 'linux.system.repo' state on the target nodes.
  * Verify API, perform basic CRUD operations for services.
  * Verify rabbitmq is running and operational.''',
     'State result': 'Basic checks around services API are passed.'
@@ -114,6 +116,8 @@
     for (target in upgradeTargets){
       common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
         openstack.runOpenStackUpgradePhase(env, target, 'pre')
+        salt.runSaltProcessStep(env, target, 'saltutil.refresh_pillar', [], null, true)
+        salt.enforceState(env, target, 'linux.system.repo')
         openstack.runOpenStackUpgradePhase(env, target, 'verify')
       }
     }