Merge "define api version during openstack-env-creation"
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f8b92c3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+.gradle
+build
diff --git a/README.rst b/README.rst
index 591ce2c..e60adf3 100644
--- a/README.rst
+++ b/README.rst
@@ -10,3 +10,13 @@
 
 Unless specifically noted, all parts of this project are licensed 
 under the Apache 2.0 `license <https://github.com/Mirantis/mk-pipelines/LICENSE>`_.
+
+
+Testing
+========
+
+Basic gradle test can be executed by (where 172.18.176.4) is address of DNS server capable to resolve artifacts server
+
+.. code:: bash
+
+  docker run --rm --dns 172.18.176.4 -v $PWD:/usr/bin/app:z niaquinto/gradle check
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 2d186ec..e35e815 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -120,8 +120,8 @@
                         stage('Launch new Heat stack') {
                             // create stack
                             envParams = [
-                                'instance_zone': HEAT_STACK_ZONE,
-                                'public_net': HEAT_STACK_PUBLIC_NET
+                                'cluster_zone': HEAT_STACK_ZONE,
+                                'cluster_public_net': HEAT_STACK_PUBLIC_NET
                             ]
                             openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, false)
                         }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 83b7a99..5ddea91 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -15,6 +15,7 @@
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 saltModelTesting = new com.mirantis.mk.SaltModelTesting()
+ssh = new com.mirantis.mk.Ssh()
 
 timestamps {
     node("python&&docker") {
@@ -38,8 +39,6 @@
                 user = env.BUILD_USER_ID
             }
 
-
-
             currentBuild.description = clusterName
             print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
 
@@ -53,7 +52,7 @@
                     sh "git init"
 
                     if (SHARED_RECLASS_URL != '') {
-                        sh "git submodule add ${SHARED_RECLASS_URL} '${modelEnv}/classes/system'"
+                        ssh.agentSh "git submodule add ${SHARED_RECLASS_URL} '${modelEnv}/classes/system'"
                         git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
                     }
                 }
@@ -61,18 +60,34 @@
 
             def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
             for (product in productList) {
-                def stagename = (product == "infra") ? "Generate base infrastructure" : "Generate product ${product}"
-                stage(stagename) {
-                    if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                        && templateContext.default_context["${product}_enabled"].toBoolean())) {
-                        templateDir = "${templateEnv}/cluster_product/${product}"
-                        templateOutputDir = "${env.WORKSPACE}/template/output/${product}"
-                        sh "mkdir -p ${templateOutputDir}"
-                        sh "mkdir -p ${outputDestination}"
-                        python.setupCookiecutterVirtualenv(cutterEnv)
-                        python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                        sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+
+                // get templateOutputDir and productDir
+                if (product.startsWith("stacklight")) {
+                    templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+                    try {
+                        productDir = "stacklight" + templateContext.default_context['stacklight_version']
+                    } catch (Throwable e) {
+                        productDir = "stacklight1"
                     }
+                } else {
+                    templateOutputDir = "${env.WORKSPACE}/output/${product}"
+                    productDir = product
+                }
+
+                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
+                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
+
+                    templateDir = "${templateEnv}/cluster_product/${productDir}"
+                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+
+                    sh "rm -rf ${templateOutputDir} || true"
+                    sh "mkdir -p ${templateOutputDir}"
+                    sh "mkdir -p ${outputDestination}"
+
+                    python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
+                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+                } else {
+                    common.warningMsg("Product " + product + " is disabled")
                 }
             }
 
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 02c2bc1..a01affe 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -34,15 +34,20 @@
 def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
 def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
 
-
 def void runCommonCommands(target, command, args, check, salt, saltMaster, common) {
 
     out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
     salt.printSaltCommandResult(out)
-    sleep(60)
-    out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
-    salt.printSaltCommandResult(out)
-    input message: "Please check the output of \'${check}\' and continue if it is correct."
+    // wait until $check is in correct state
+    if ( check == "nodetool status" ) {
+        salt.commandStatus(saltMaster, target, check, 'Status=Up')  
+    } else if ( check == "contrail-status" ) {
+        salt.commandStatus(saltMaster, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)  
+    }
+
+    //out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
+    //salt.printSaltCommandResult(out)
+    //input message: "Please check the output of \'${check}\' and continue if it is correct."
 }
 
 timestamps {
@@ -142,21 +147,21 @@
                 args = 'apt install contrail-database -y;'
                 check = 'nodetool status'
 
-                // ntw01
+                // nal01
                 runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
+                // nal02
                 runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
+                // nal03
                 runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // ntw01
+                // nal01
                 runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
+                // nal02
                 runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
+                // nal03
                 runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
 
                 try {
@@ -221,7 +226,9 @@
                     }
 
                     salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    sleep(10)
+
+                    //sleep(10)
+                    salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
 
                     out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
                     salt.printSaltCommandResult(out)
@@ -257,7 +264,8 @@
                     }
 
                     salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    sleep(10)
+                    //sleep(10)
+                    salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
 
                     out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
                     salt.printSaltCommandResult(out)
@@ -349,21 +357,21 @@
                 args = 'apt install contrail-database -y --force-yes;'
                 check = 'nodetool status'
 
-                // ntw01
+                // nal01
                 runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
+                // nal02
                 runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
+                // nal03
                 runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
 
                 args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
                 check = 'contrail-status'
 
-                // ntw01
+                // nal01
                 runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
-                // ntw02
+                // nal02
                 runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
-                // ntw03
+                // nal03
                 runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
 
                 try {
@@ -428,7 +436,8 @@
                     }
 
                     salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    sleep(10)
+                    //sleep(10)
+                    salt.commandStatus(saltMaster, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
 
                     out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
                     salt.printSaltCommandResult(out)
@@ -465,7 +474,9 @@
                     }
 
                     salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
-                    sleep(10)
+
+                    //sleep(10)
+                    salt.commandStatus(saltMaster, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
 
                     out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
                     salt.printSaltCommandResult(out)
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 1dfc4cb..7bf646c 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -81,7 +81,8 @@
                 // salt 'kvm02*' state.sls salt.control
                 salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
 
-                sleep(70)
+                // wait until upg node is registered in salt-key
+                salt.minionPresent(saltMaster, 'I@salt:master', 'upg01')
 
                 // salt '*' saltutil.refresh_pillar
                 salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
@@ -288,7 +289,13 @@
                 // salt 'kvm*' state.sls salt.control
                 salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
 
-                sleep(70)
+                // wait until ctl and prx nodes are registered in salt-key
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
+
 
                 // salt '*' saltutil.refresh_pillar
                 salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
@@ -410,7 +417,10 @@
                     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
                     salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
                     salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
-                    sleep(5)
+
+                    // wait until mysql service on galera master is up
+                    salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+
                     salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
                     //
 
@@ -559,7 +569,10 @@
                 salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
                 salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
                 salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
-                sleep(5)
+
+                // wait until mysql service on galera master is up
+                salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+
                 salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
                 //
 
@@ -572,7 +585,12 @@
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
                 salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
 
-                sleep(70)
+                // wait until ctl and prx nodes are registered in salt-key
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl01')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl02')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'ctl03')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'prx01')
+                salt.minionPresent(saltMaster, 'I@salt:master', 'prx02')
 
                 salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-conductor'], null, true)
                 salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['nova-scheduler'], null, true)
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index af2016e..4a9c89e 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -57,23 +57,26 @@
 
             salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
 
-            sleep(30)
+            // wait until supervisor-database service is up
+            salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
 
             // performs restore
             salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
             salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
             salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-            salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
 
-            sleep(60)
+            // wait until supervisor-database service is up
+            salt.commandStatus(saltMaster, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+            salt.commandStatus(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+            sleep(5)
+
             salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
 
-            sleep(50)
+            // wait until contrail-status is up
+            salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+            
             salt.cmdRun(saltMaster, 'I@opencontrail:control', "nodetool status")
             salt.cmdRun(saltMaster, 'I@opencontrail:control', "contrail-status")
         }
     }
 }
-
-
-
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index 6b4822a..3e5da6c 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -42,7 +42,10 @@
             } catch (Exception er) {
                 common.warningMsg('Zookeeper service already stopped')
             }
-            sleep(5)
+            //sleep(5)
+            // wait until zookeeper service is down
+            salt.commandStatus(saltMaster, 'I@opencontrail:control', 'service zookeeper status', 'stop')
+
             try {
                 salt.cmdRun(saltMaster, 'I@opencontrail:control', "mkdir -p /root/zookeeper/zookeeper.bak")
             } catch (Exception er) {
@@ -73,7 +76,9 @@
             salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)
             salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.start', ['supervisor-control'], null, true)
 
-            sleep(50)
+            // wait until contrail-status is up
+            salt.commandStatus(saltMaster, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+            
             salt.cmdRun(saltMaster, 'I@opencontrail:control', "ls /var/lib/zookeeper/version-2")
             try {
                 salt.cmdRun(saltMaster, 'I@opencontrail:control', "echo stat | nc localhost 2181")
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index fa98b9a..cff994d 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -39,19 +39,38 @@
     def templateOutputDir = templateBaseDir
     sh "rm -rf ${generatedModel} || true"
 
-    println "Generating model from context ${modelFile}"
+    common.infoMsg("Generating model from context ${modelFile}")
 
     def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
     for (product in productList) {
+
+        // get templateOutputDir and productDir
+        if (product.startsWith("stacklight")) {
+            templateOutputDir = "${env.WORKSPACE}/output/stacklight"
+            try {
+                productDir = "stacklight" + templateContext.default_context['stacklight_version']
+            } catch (Throwable e) {
+                productDir = "stacklight1"
+            }
+        } else {
+            templateOutputDir = "${env.WORKSPACE}/output/${product}"
+            productDir = product
+        }
+
         if (product == "infra" || (templateContext.default_context["${product}_enabled"]
             && templateContext.default_context["${product}_enabled"].toBoolean())) {
-            templateDir = "${templateEnv}/cluster_product/${product}"
-            templateOutputDir = "${env.WORKSPACE}/output/${product}"
+
+            templateDir = "${templateEnv}/cluster_product/${productDir}"
+            common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
+
             sh "rm -rf ${templateOutputDir} || true"
             sh "mkdir -p ${templateOutputDir}"
             sh "mkdir -p ${outputDestination}"
+
             python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
             sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
+        } else {
+            common.warningMsg("Product " + product + " is disabled")
         }
     }
     generateSaltMaster(generatedModel, clusterDomain, clusterName)
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index d63a78c..0a44247 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -29,15 +29,6 @@
 
 node("python") {
   try{
-    stage("stop old tests"){
-      if (gerritRef) {
-        def runningTestBuildNums = _getRunningTriggeredTestsBuildNumbers(env["JOB_NAME"], GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER)
-        for(int i=0; i<runningTestBuildNums.size(); i++){
-          common.infoMsg("Old test with run number ${runningTestBuildNums[i]} found, stopping")
-          Jenkins.instance.getItemByFullName(env["JOB_NAME"]).getBuildByNumber(runningTestBuildNums[i]).finish(hudson.model.Result.ABORTED, new java.io.IOException("Aborting build"));
-        }
-      }
-    }
     stage("checkout") {
       if (gerritRef) {
         // job is triggered by Gerrit
@@ -140,15 +131,3 @@
   }
 }
 
-@NonCPS
-def _getRunningTriggeredTestsBuildNumbers(jobName, gerritChangeNumber, excludePatchsetNumber){
-  def gerrit = new com.mirantis.mk.Gerrit()
-  def jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
-  def triggeredBuilds= gerrit.getGerritTriggeredBuilds(jenkinsUtils.getJobRunningBuilds(jobName), gerritChangeNumber, excludePatchsetNumber)
-  def buildNums =[]
-  for(int i=0;i<triggeredBuilds.size();i++){
-      buildNums.add(triggeredBuilds[i].number)
-  }
-  return buildNums
-}
-
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 79a6662..cf37b30 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -46,15 +46,6 @@
 def checkouted = false
 node("python") {
   try{
-    stage("stop old tests"){
-      if (gerritRef) {
-        def runningTestBuildNums = _getRunningTriggeredTestsBuildNumbers(env["JOB_NAME"], GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER)
-        for(int i=0; i<runningTestBuildNums.size(); i++){
-          common.infoMsg("Old test with run number ${runningTestBuildNums[i]} found, stopping")
-          Jenkins.instance.getItemByFullName(env["JOB_NAME"]).getBuildByNumber(runningTestBuildNums[i]).finish(hudson.model.Result.ABORTED, new java.io.IOException("Aborting build"));
-        }
-      }
-    }
     stage("checkout") {
       if (gerritRef) {
         // job is triggered by Gerrit
@@ -126,14 +117,3 @@
   }
 }
 
-@NonCPS
-def _getRunningTriggeredTestsBuildNumbers(jobName, gerritChangeNumber, excludePatchsetNumber){
-  def gerrit = new com.mirantis.mk.Gerrit()
-  def jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
-  def triggeredBuilds= gerrit.getGerritTriggeredBuilds(jenkinsUtils.getJobRunningBuilds(jobName), gerritChangeNumber, excludePatchsetNumber)
-  def buildNums =[]
-  for (int i=0; i<triggeredBuilds.size(); i++) {
-      buildNums.add(triggeredBuilds[i].number)
-  }
-  return buildNums
-}
\ No newline at end of file
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index c363ce2..345f280 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -70,7 +70,10 @@
             salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
             salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
             salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
-            sleep(5)
+
+            // wait until mysql service on galera master is up
+            salt.commandStatus(saltMaster, 'I@galera:master', 'service mysql status', 'running')
+
             salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
             sleep(15)
             salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")