Merge "Fixed docker-cleanup-pipeline"
diff --git a/cloud-update.groovy b/cloud-update.groovy
index a8b2c71..66bad8a 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -11,8 +11,8 @@
  *   ROLLBACK_BY_REDEPLOY       Omit taking live snapshots. Rollback is planned to be done by redeployment (bool)
  *   STOP_SERVICES              Stop API services before update (bool)
  *   TARGET_UPDATES             Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid,cmp,kvm,osd,gtw-physical)
- *   TARGET_ROLLBACKS           Comma separated list of nodes to update (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
- *   TARGET_MERGES              Comma separated list of nodes to update (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
+ *   TARGET_ROLLBACKS           Comma separated list of nodes to rollback (Valid values are ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cmp,kvm,osd,gtw-physical)
+ *   TARGET_MERGES              Comma separated list of nodes to merge (Valid values are cfg,ctl,prx,msg,dbs,log,mon,mtr,ntw,nal,gtw-virtual,cmn,rgw,cid)
  *   CTL_TARGET                 Salt targeted CTL nodes (ex. ctl*)
  *   PRX_TARGET                 Salt targeted PRX nodes (ex. prx*)
  *   MSG_TARGET                 Salt targeted MSG nodes (ex. msg*)
@@ -28,11 +28,11 @@
  *   CMP_TARGET                 Salt targeted physical compute nodes (ex. cmp001*)
  *   KVM_TARGET                 Salt targeted physical KVM nodes (ex. kvm01*)
  *   CEPH_OSD_TARGET            Salt targeted physical Ceph OSD nodes (ex. osd001*)
- *   GTW_TARGET                 Salt targeted physical GTW nodes (ex. gtw01*)
+ *   GTW_TARGET                 Salt targeted physical or virtual GTW nodes (ex. gtw01*)
  *   REBOOT                     Reboot nodes after update (bool)
- *   ROLLBACK_PKG_VERSIONS      Space separated list of pkgs=versions to rollback to (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- *   PURGE_PKGS                 Space separated list of pkgs=versions to be purged (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
- *   REMOVE_PKGS                Space separated list of pkgs=versions to be removed (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   ROLLBACK_PKG_VERSIONS      Space separated list of pkgs=versions to rollback to on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   PURGE_PKGS                 Space separated list of pkgs=versions to be purged on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
+ *   REMOVE_PKGS                Space separated list of pkgs=versions to be removed on physical targeted machines (ex. pkg_name1=pkg_version1 pkg_name2=pkg_version2)
  *   RESTORE_GALERA             Restore Galera DB (bool)
  *   RESTORE_CONTRAIL_DB        Restore Cassandra and Zookeeper DBs for OpenContrail (bool)
  *
@@ -460,7 +460,7 @@
     stage("Apply highstate on ${target} nodes") {
         try {
             common.retry(3){
-                //salt.enforceHighstate(pepperEnv, target)
+                salt.enforceHighstate(pepperEnv, target)
             }
         } catch (Exception e) {
             common.errorMsg(e)
@@ -539,6 +539,7 @@
         }
         nodeCount++
     }
+    salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
 }
 
 
@@ -601,6 +602,12 @@
     }
 }
 
+def saltMasterBackup(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+    salt.enforceState(pepperEnv, 'I@salt:master', 'backupninja')
+    salt.cmdRun(pepperEnv, 'I@salt:master', "su root -c 'backupninja -n --run /etc/backup.d/200.backup.rsync'")
+}
+
 def backupCeph(pepperEnv, tgt) {
     def salt = new com.mirantis.mk.Salt()
     salt.enforceState(pepperEnv, 'I@ceph:backup:server', 'ceph.backup')
@@ -702,24 +709,43 @@
     ]
 }
 
-def verifyAPIs(pepperEnv) {
+def verifyAPIs(pepperEnv, target) {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
-    salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
+    def out = salt.cmdRun(pepperEnv, target, '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+    if (out.toString().toLowerCase().contains('error')) {
+        common.errorMsg(out)
+        if (INTERACTIVE.toBoolean()) {
+            input message: "APIs are not working as expected. Please fix it manually."
+        } else {
+            throw new Exception("APIs are not working as expected")
+        }
+    }
 }
 
-def verifyGalera(pepperEnv) {
+def verifyGalera(pepperEnv, target, count=0, maxRetries=200) {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
-    def out = salt.getReturnValues(salt.cmdRun(pepperEnv, 'I@galera:master', 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
-
-    if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
-        if (INTERACTIVE.toBoolean()) {
-            input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+    def out
+    while(count < maxRetries) {
+        try {
+            out = salt.getReturnValues(salt.cmdRun(pepperEnv, target, 'salt-call mysql.status | grep -A1 wsrep_cluster_size'))
+        } catch (Exception er) {
+            common.infoMsg(er)
+        }
+        if ((!out.toString().contains('wsrep_cluster_size')) || (out.toString().contains('0'))) {
+            count++
+            if (count == maxRetries) {
+                if (INTERACTIVE.toBoolean()) {
+                    input message: "Galera is not working as expected. Please check it and fix it first before clicking on PROCEED."
+                } else {
+                    common.errorMsg(out)
+                    throw new Exception("Galera is not working as expected")
+                }
+            }
+            sleep(time: 500, unit: 'MILLISECONDS')
         } else {
-            common.errorMsg(out)
-            throw new Exception("Galera is not working as expected")
+            break
         }
     }
 }
@@ -823,6 +849,8 @@
                     getCfgNodeProvider(pepperEnv, master)
                     if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
                         virsh.liveSnapshotPresent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
+                    } else {
+                        saltMasterBackup(pepperEnv)
                     }
                     if (PER_NODE.toBoolean()) {
                         def targetHosts = salt.getMinionsSorted(pepperEnv, target)
@@ -904,24 +932,22 @@
                 def target = DBS_TARGET
                 if (salt.testTarget(pepperEnv, target)) {
                     backupGalera(pepperEnv)
-                    salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['keepalived'], null, true)
-                    salt.runSaltProcessStep(pepperEnv, target, 'service.stop', ['haproxy'], null, true)
                     if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
                         def generalTarget = 'dbs'
                         liveSnapshot(pepperEnv, target, generalTarget)
                     }
-                    if (REBOOT.toBoolean()) {
+                    if (REBOOT.toBoolean() || PER_NODE.toBoolean()) {
                         def targetHosts = salt.getMinionsSorted(pepperEnv, target)
-                        // one by one update
                         for (t in targetHosts) {
                             updatePkgs(pepperEnv, t)
                             highstate(pepperEnv, t)
+                            verifyGalera(pepperEnv, t)
                         }
                     } else {
                         updatePkgs(pepperEnv, target)
                         highstate(pepperEnv, target)
+                        verifyGalera(pepperEnv, target)
                     }
-                    verifyGalera(pepperEnv)
                 }
             }
 
@@ -997,7 +1023,7 @@
                         def generalTarget = 'cmn'
                         liveSnapshot(pepperEnv, target, generalTarget)
                     } else {
-                        backupCeph(pepperEnv)
+                        backupCeph(pepperEnv, target)
                     }
                     if (PER_NODE.toBoolean()) {
                         def targetHosts = salt.getMinionsSorted(pepperEnv, target)
@@ -1181,7 +1207,7 @@
             /*
                 * Rollback section
             */
-          /*  if (rollbacks.contains("ctl")) {
+          /*  if (rollbacks.contains("cfg")) {
                 if (salt.testTarget(pepperEnv, 'I@salt:master')) {
                     stage('ROLLBACK_CFG') {
                         input message: "To rollback CFG nodes run the following commands on kvm nodes hosting the CFG nodes: virsh destroy cfg0X.domain; virsh define /var/lib/libvirt/images/cfg0X.domain.xml; virsh start cfg0X.domain; virsh snapshot-delete cfg0X.domain --metadata ${SNAPSHOT_NAME}; rm /var/lib/libvirt/images/cfg0X.domain.${SNAPSHOT_NAME}.qcow2; rm /var/lib/libvirt/images/cfg0X.domain.xml; At the end restart 'docker' service on all cicd nodes and run 'linux.system.repo' Salt states on cicd nodes. After running the previous commands current pipeline job will be killed."
@@ -1234,7 +1260,7 @@
                     if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
                         rollback(pepperEnv, target, 'dbs')
                         clusterGalera(pepperEnv)
-                        verifyGalera(pepperEnv)
+                        verifyGalera(pepperEnv, target)
                     } else {
                         removeNode(pepperEnv, target, 'dbs')
                     }
@@ -1426,54 +1452,66 @@
             if (merges.contains("ctl")) {
                 if (salt.testTarget(pepperEnv, CTL_TARGET)) {
                     mergeSnapshot(pepperEnv, CTL_TARGET, 'ctl')
+                    verifyService(pepperEnv, CTL_TARGET, 'nova-api')
                 }
             }
 
             if (merges.contains("prx")) {
                 if (salt.testTarget(pepperEnv, PRX_TARGET)) {
                     mergeSnapshot(pepperEnv, PRX_TARGET, 'prx')
+                    verifyService(pepperEnv, PRX_TARGET, 'nginx')
                 }
             }
 
             if (merges.contains("msg")) {
                 if (salt.testTarget(pepperEnv, MSG_TARGET)) {
                     mergeSnapshot(pepperEnv, MSG_TARGET, 'msg')
+                    verifyService(pepperEnv, MSG_TARGET, 'rabbitmq-server')
                 }
             }
 
             if (merges.contains("dbs")) {
                 if (salt.testTarget(pepperEnv, DBS_TARGET)) {
                     mergeSnapshot(pepperEnv, DBS_TARGET, 'dbs')
+                    verifyGalera(pepperEnv, DBS_TARGET)
+                    backupGalera(pepperEnv)
                 }
             }
 
             if (merges.contains("ntw")) {
                 if (salt.testTarget(pepperEnv, NTW_TARGET)) {
                     mergeSnapshot(pepperEnv, NTW_TARGET, 'ntw')
+                    verifyContrail(pepperEnv, NTW_TARGET)
+                    backupContrail(pepperEnv)
                 }
             }
 
             if (merges.contains("nal")) {
                 if (salt.testTarget(pepperEnv, NAL_TARGET)) {
                     mergeSnapshot(pepperEnv, NAL_TARGET, 'nal')
+                    verifyContrail(pepperEnv, NAL_TARGET)
                 }
             }
 
             if (merges.contains("gtw-virtual")) {
                 if (salt.testTarget(pepperEnv, GTW_TARGET)) {
                     mergeSnapshot(pepperEnv, GTW_TARGET, 'gtw')
+                    verifyService(pepperEnv, GTW_TARGET, 'neutron-dhcp-agent')
                 }
             }
 
             if (merges.contains("cmn")) {
                 if (salt.testTarget(pepperEnv, CMN_TARGET)) {
                     mergeSnapshot(pepperEnv, CMN_TARGET, 'cmn')
+                    verifyCeph(pepperEnv, CMN_TARGET, 'mon@')
+                    backupCeph(pepperEnv, CMN_TARGET)
                 }
             }
 
             if (merges.contains("rgw")) {
                 if (salt.testTarget(pepperEnv, RGW_TARGET)) {
                     mergeSnapshot(pepperEnv, RGW_TARGET, 'rgw')
+                    verifyCeph(pepperEnv, RGW_TARGET, 'radosgw@rgw.')
                 }
             }
 
@@ -1498,15 +1536,18 @@
             if (merges.contains("cid")) {
                 if (salt.testTarget(pepperEnv, CID_TARGET)) {
                     mergeSnapshot(pepperEnv, CID_TARGET, 'cid')
+                    verifyService(pepperEnv, CID_TARGET, 'docker')
                 }
             }
 
             if (RESTORE_GALERA.toBoolean()) {
                 restoreGalera(pepperEnv)
+                verifyGalera(pepperEnv, DBS_TARGET)
             }
 
             if (RESTORE_CONTRAIL_DB.toBoolean()) {
                 restoreContrailDb(pepperEnv)
+                // verification is already present in restore pipelines
             }
 
         } catch (Throwable e) {