Move Galera method to new separate class
- add getWsrepParameters method
- fix variables naming
Related: PROD-27561 (PROD:27561)
Change-Id: I5184f995f5cd979605c12d2c9794b6a610902e88
(cherry picked from commit 8f0f3ac38df0a089f8bd52bd68be78ce7594452b)
(cherry picked from commit 5df75785c01ad9965f9ef21461b8007a0538b922)
diff --git a/src/com/mirantis/mk/Galera.groovy b/src/com/mirantis/mk/Galera.groovy
new file mode 100644
index 0000000..96777a7
--- /dev/null
+++ b/src/com/mirantis/mk/Galera.groovy
@@ -0,0 +1,328 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Galera functions
+ *
+ */
+
+
+/**
+ * Returns parameters from mysql.status output on given target node
+ *
+ * @param env Salt Connection object or pepperEnv
+ * @param target Targeted node
+ * @param parameters Parameters to be retruned (String or list of Strings). If no parameters are provided or is set to '[]', it returns all of them.
+ * @return result List of parameters with its values
+ */
+
+def getWsrepParameters(env, target, parameters=[], print=false) {
+ result = []
+ out = salt.runSaltProcessStep(env, "${target}", "mysql.status", [], null, false)
+ outlist = out['return'][0]
+ resultYaml = outlist.get(outlist.keySet()[0]).sort()
+ if (print) {
+ common.prettyPrint(resultYaml)
+ }
+ if (parameters instanceof String) {
+ value = resultYaml[key]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
+ }
+ result = [key: value]
+ } else {
+ if (parameters == []) {
+ result = resultYaml
+ } else {
+ for (key in parameters) {
+ value = resultYaml[key]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
+ }
+ result << [key: value]
+ }
+ }
+ }
+ return result
+}
+
+/**
+ * Verifies Galera database
+ *
+ * This function checks for Galera master, tests connection and if reachable, it obtains the result
+ * of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
+ *
+ * @param env Salt Connection object or pepperEnv
+ * @param slave Boolean value to enable slave checking (if master in unreachable)
+ * @param checkTimeSync Boolean value to enable time sync check
+ * @return resultCode int values used to determine exit status in the calling function
+ */
+def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ def out = ""
+ def status = "unknown"
+ def testNode = ""
+ if (!slave) {
+ try {
+ galeraMaster = salt.getMinions(env, "I@galera:master")
+ common.infoMsg("Current Galera master is: ${galeraMaster}")
+ salt.minionsReachable(env, "I@salt:master", "I@galera:master")
+ testNode = "I@galera:master"
+ } catch (Exception e) {
+ common.errorMsg('Galera master is not reachable.')
+ return 128
+ }
+ } else {
+ try {
+ galeraSlaves = salt.getMinions(env, "I@galera:slave")
+ common.infoMsg("Testing Galera slave minions: ${galeraSlaves}")
+ } catch (Exception e) {
+ common.errorMsg("Cannot obtain Galera slave minions list.")
+ return 129
+ }
+ for (minion in galeraSlaves) {
+ try {
+ salt.minionsReachable(env, "I@salt:master", minion)
+ testNode = minion
+ break
+ } catch (Exception e) {
+ common.warningMsg("Slave '${minion}' is not reachable.")
+ }
+ }
+ }
+ if (!testNode) {
+ common.errorMsg("No Galera slave was reachable.")
+ return 130
+ }
+ if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
+ common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
+ return 131
+ }
+ try {
+ out = salt.runSaltProcessStep(env, "${testNode}", "mysql.status", [], null, false)
+ } catch (Exception e) {
+ common.errorMsg('Could not determine mysql status.')
+ return 256
+ }
+ if (out) {
+ try {
+ status = validateAndPrintGaleraStatusReport(env, out, testNode)
+ } catch (Exception e) {
+ common.errorMsg('Could not parse the mysql status output. Check it manually.')
+ return 1
+ }
+ } else {
+ common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
+ return 1024
+ }
+ if (status == "OK") {
+ common.infoMsg("No errors found - MySQL status is ${status}.")
+ return 0
+ } else if (status == "unknown") {
+ common.warningMsg('MySQL status cannot be detemined')
+ return 1
+ } else {
+ common.errorMsg("Errors found.")
+ return 2
+ }
+}
+
+/** Validates and prints result of verifyGaleraStatus function
+@param env Salt Connection object or pepperEnv
+@param out Output of the mysql.status Salt function
+@return status "OK", "ERROR" or "uknown" depending on result of validation
+*/
+
+def validateAndPrintGaleraStatusReport(env, out, minion) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ if (minion == "I@galera:master") {
+ role = "master"
+ } else {
+ role = "slave"
+ }
+ sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
+ expected_cluster_size = sizeOut.size()
+ outlist = out['return'][0]
+ resultYaml = outlist.get(outlist.keySet()[0]).sort()
+ common.prettyPrint(resultYaml)
+ parameters = [
+ wsrep_cluster_status: [title: 'Cluster status', expectedValues: ['Primary'], description: ''],
+ wsrep_cluster_size: [title: 'Current cluster size', expectedValues: [expected_cluster_size], description: ''],
+ wsrep_ready: [title: 'Node status', expectedValues: ['ON', true], description: ''],
+ wsrep_local_state_comment: [title: 'Node status comment', expectedValues: ['Joining', 'Waiting on SST', 'Joined', 'Synced', 'Donor'], description: ''],
+ wsrep_connected: [title: 'Node connectivity', expectedValues: ['ON', true], description: ''],
+ wsrep_local_recv_queue_avg: [title: 'Average size of local reveived queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 means that the node cannot apply write-sets as fast as it receives them, which can lead to replication throttling)'],
+ wsrep_local_send_queue_avg: [title: 'Average size of local send queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 indicate replication throttling or network throughput issues, such as a bottleneck on the network link.)']
+ ]
+ for (key in parameters.keySet()) {
+ value = resultYaml[key]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
+ }
+ parameters.get(key) << [actualValue: value]
+ }
+ for (key in parameters.keySet()) {
+ param = parameters.get(key)
+ if (key == 'wsrep_local_recv_queue_avg' || key == 'wsrep_local_send_queue_avg') {
+ if (param.get('actualValue') > param.get('expectedThreshold').get('error')) {
+ param << [match: 'error']
+ } else if (param.get('actualValue') > param.get('expectedThreshold').get('warn')) {
+ param << [match: 'warn']
+ } else {
+ param << [match: 'ok']
+ }
+ } else {
+ for (expValue in param.get('expectedValues')) {
+ if (expValue == param.get('actualValue')) {
+ param << [match: 'ok']
+ break
+ } else {
+ param << [match: 'error']
+ }
+ }
+ }
+ }
+ cluster_info_report = []
+ cluster_warning_report = []
+ cluster_error_report = []
+ for (key in parameters.keySet()) {
+ param = parameters.get(key)
+ if (param.containsKey('expectedThreshold')) {
+ expValues = "below ${param.get('expectedThreshold').get('warn')}"
+ } else {
+ if (param.get('expectedValues').size() > 1) {
+ expValues = param.get('expectedValues').join(' or ')
+ } else {
+ expValues = param.get('expectedValues')[0]
+ }
+ }
+ reportString = "${param.title}: ${param.actualValue} (Expected: ${expValues}) ${param.description}"
+ if (param.get('match').equals('ok')) {
+ cluster_info_report.add("[OK ] ${reportString}")
+ } else if (param.get('match').equals('warn')) {
+ cluster_warning_report.add("[WARNING] ${reportString}")
+ } else {
+ cluster_error_report.add("[ ERROR] ${reportString})")
+ }
+ }
+ common.infoMsg("CLUSTER STATUS REPORT: ${cluster_info_report.size()} expected values, ${cluster_warning_report.size()} warnings and ${cluster_error_report.size()} error found:")
+ if (cluster_info_report.size() > 0) {
+ common.infoMsg(cluster_info_report.join('\n'))
+ }
+ if (cluster_warning_report.size() > 0) {
+ common.warningMsg(cluster_warning_report.join('\n'))
+ }
+ if (cluster_error_report.size() > 0) {
+ common.errorMsg(cluster_error_report.join('\n'))
+ return "ERROR"
+ } else {
+ return "OK"
+ }
+}
+
+def getGaleraLastShutdownNode(env) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ members = ''
+ lastNode = [ip: '', seqno: -2]
+ try {
+ members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
+ } catch (Exception er) {
+ common.errorMsg('Could not retrieve members list')
+ return 'I@galera:master'
+ }
+ if (members) {
+ for (member in members) {
+ try {
+ salt.minionsReachable(env, 'I@salt:master', "S@${member.host}")
+ out = salt.getReturnValues(salt.cmdRun(env, "S@${member.host}", 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false))
+ seqno = out.tokenize('\n')[0].trim()
+ if (seqno.isNumber()) {
+ seqno = seqno.toInteger()
+ } else {
+ seqno = -2
+ }
+ highestSeqno = lastNode.get('seqno')
+ if (seqno > highestSeqno) {
+ lastNode << [ip: "${member.host}", seqno: seqno]
+ }
+ } catch (Exception er) {
+ common.warningMsg("Could not determine 'seqno' value for node ${member.host} ")
+ }
+ }
+ }
+ if (lastNode.get('ip') != '') {
+ return "S@${lastNode.ip}"
+ } else {
+ return "I@galera:master"
+ }
+}
+
+/**
+ * Restores Galera database
+ * @param env Salt Connection object or pepperEnv
+ * @return output of salt commands
+ */
+def restoreGaleraDb(env) {
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ try {
+ salt.runSaltProcessStep(env, 'I@galera:slave', 'service.stop', ['mysql'])
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ try {
+ salt.runSaltProcessStep(env, 'I@galera:master', 'service.stop', ['mysql'])
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ lastNodeTarget = getGaleraLastShutdownNode(env)
+ try {
+ salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ } catch (Exception er) {
+ common.warningMsg('Files are not present')
+ }
+ try {
+ salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/grastate.dat")
+ } catch (Exception er) {
+ common.warningMsg('Files are not present')
+ }
+ try {
+ salt.cmdRun(env, lastNodeTarget, "mkdir /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Directory already exists')
+ }
+ try {
+ salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
+ } catch (Exception er) {
+ common.warningMsg('Directory already empty')
+ }
+ try {
+ salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Files were already moved')
+ }
+ try {
+ salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["/var/lib/mysql/.galera_bootstrap"])
+ } catch (Exception er) {
+ common.warningMsg('File is not present')
+ }
+ salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ def backup_dir = salt.getReturnValues(salt.getPillar(env, lastNodeTarget, 'xtrabackup:client:backup_dir'))
+ if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+ salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["${backup_dir}/dbrestored"])
+ salt.cmdRun(env, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(env, lastNodeTarget, 'service.start', ['mysql'])
+
+ // wait until mysql service on galera master is up
+ try {
+ salt.commandStatus(env, lastNodeTarget, 'service mysql status', 'running')
+ } catch (Exception er) {
+ input message: "Database is not running please fix it first and only then click on PROCEED."
+ }
+
+ salt.runSaltProcessStep(env, "I@galera:master and not ${lastNodeTarget}", 'service.start', ['mysql'])
+ salt.runSaltProcessStep(env, "I@galera:slave and not ${lastNodeTarget}", 'service.start', ['mysql'])
+}
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index c9e74fd..b11f628 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -526,280 +526,30 @@
}
}
-/**
- * Verifies Galera database
- *
- * This function checks for Galera master, tests connection and if reachable, it obtains the result
- * of Salt mysql.status function. The result is then parsed, validated and outputed to the user.
- *
- * @param env Salt Connection object or pepperEnv
- * @param slave Boolean value to enable slave checking (if master in unreachable)
- * @param checkTimeSync Boolean value to enable time sync check
- * @return resultCode int values used to determine exit status in the calling function
- */
def verifyGaleraStatus(env, slave=false, checkTimeSync=false) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def out = ""
- def status = "unknown"
- def testNode = ""
- if (!slave) {
- try {
- galeraMaster = salt.getMinions(env, "I@galera:master")
- common.infoMsg("Current Galera master is: ${galeraMaster}")
- salt.minionsReachable(env, "I@salt:master", "I@galera:master")
- testNode = "I@galera:master"
- } catch (Exception e) {
- common.errorMsg('Galera master is not reachable.')
- return 128
- }
- } else {
- try {
- galeraMinions = salt.getMinions(env, "I@galera:slave")
- common.infoMsg("Testing Galera slave minions: ${galeraMinions}")
- } catch (Exception e) {
- common.errorMsg("Cannot obtain Galera slave minions list.")
- return 129
- }
- for (minion in galeraMinions) {
- try {
- salt.minionsReachable(env, "I@salt:master", minion)
- testNode = minion
- break
- } catch (Exception e) {
- common.warningMsg("Slave '${minion}' is not reachable.")
- }
- }
- }
- if (!testNode) {
- common.errorMsg("No Galera slave was reachable.")
- return 130
- }
- if (checkTimeSync && !salt.checkClusterTimeSync(env, "I@galera:master or I@galera:slave")) {
- common.errorMsg("Time in cluster is desynchronized or it couldn't be detemined. You should fix this issue manually before proceeding.")
- return 131
- }
- try {
- out = salt.cmdRun(env, "I@salt:master", "salt -C '${testNode}' mysql.status")
- } catch (Exception e) {
- common.errorMsg('Could not determine mysql status.')
- return 256
- }
- if (out) {
- try {
- status = validateAndPrintGaleraStatusReport(env, out, testNode)
- } catch (Exception e) {
- common.errorMsg('Could not parse the mysql status output. Check it manually.')
- return 1
- }
- } else {
- common.errorMsg("Mysql status response unrecognized or is empty. Response: ${out}")
- return 1024
- }
- if (status == "OK") {
- common.infoMsg("No errors found - MySQL status is ${status}.")
- return 0
- } else if (status == "unknown") {
- common.warningMsg('MySQL status cannot be detemined')
- return 1
- } else {
- common.errorMsg("Errors found.")
- return 2
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("verifyGaleraStatus method was moved to Galera class. Please change your calls accordingly.")
+ return galera.verifyGaleraStatus(env, slave, checkTimeSync)
}
-/** Validates and prints result of verifyGaleraStatus function
-@param env Salt Connection object or pepperEnv
-@param out Output of the mysql.status Salt function
-@return status "OK", "ERROR" or "uknown" depending on result of validation
-*/
-
def validateAndPrintGaleraStatusReport(env, out, minion) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- if (minion == "I@galera:master") {
- role = "master"
- } else {
- role = "slave"
- }
- sizeOut = salt.getReturnValues(salt.getPillar(env, minion, "galera:${role}:members"))
- expected_cluster_size = sizeOut.size()
- outlist = out['return'][0]
- resultString = outlist.get(outlist.keySet()[0]).replace("\n ", " ").replace(" ", "").replace("Salt command execution success", "").replace("----------", "").replace(": \n", ": no value\n")
- resultYaml = readYaml text: resultString
- parameters = [
- wsrep_cluster_status: [title: 'Cluster status', expectedValues: ['Primary'], description: ''],
- wsrep_cluster_size: [title: 'Current cluster size', expectedValues: [expected_cluster_size], description: ''],
- wsrep_ready: [title: 'Node status', expectedValues: ['ON', true], description: ''],
- wsrep_local_state_comment: [title: 'Node status comment', expectedValues: ['Joining', 'Waiting on SST', 'Joined', 'Synced', 'Donor'], description: ''],
- wsrep_connected: [title: 'Node connectivity', expectedValues: ['ON', true], description: ''],
- wsrep_local_recv_queue_avg: [title: 'Average size of local reveived queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 means that the node cannot apply write-sets as fast as it receives them, which can lead to replication throttling)'],
- wsrep_local_send_queue_avg: [title: 'Average size of local send queue', expectedThreshold: [warn: 0.5, error: 1.0], description: '(Value above 0 indicate replication throttling or network throughput issues, such as a bottleneck on the network link.)']
- ]
- for (key in parameters.keySet()) {
- value = resultYaml[key]
- parameters.get(key) << [actualValue: value]
- }
- for (key in parameters.keySet()) {
- param = parameters.get(key)
- if (key == 'wsrep_local_recv_queue_avg' || key == 'wsrep_local_send_queue_avg') {
- if (param.get('actualValue') > param.get('expectedThreshold').get('error')) {
- param << [match: 'error']
- } else if (param.get('actualValue') > param.get('expectedThreshold').get('warn')) {
- param << [match: 'warn']
- } else {
- param << [match: 'ok']
- }
- } else {
- for (expValue in param.get('expectedValues')) {
- if (expValue == param.get('actualValue')) {
- param << [match: 'ok']
- break
- } else {
- param << [match: 'error']
- }
- }
- }
- }
- cluster_info_report = []
- cluster_warning_report = []
- cluster_error_report = []
- for (key in parameters.keySet()) {
- param = parameters.get(key)
- if (param.containsKey('expectedThreshold')) {
- expValues = "below ${param.get('expectedThreshold').get('warn')}"
- } else {
- if (param.get('expectedValues').size() > 1) {
- expValues = param.get('expectedValues').join(' or ')
- } else {
- expValues = param.get('expectedValues')[0]
- }
- }
- reportString = "${param.title}: ${param.actualValue} (Expected: ${expValues}) ${param.description}"
- if (param.get('match').equals('ok')) {
- cluster_info_report.add("[OK ] ${reportString}")
- } else if (param.get('match').equals('warn')) {
- cluster_warning_report.add("[WARNING] ${reportString}")
- } else {
- cluster_error_report.add("[ ERROR] ${reportString})")
- }
- }
- common.infoMsg("CLUSTER STATUS REPORT: ${cluster_info_report.size()} expected values, ${cluster_warning_report.size()} warnings and ${cluster_error_report.size()} error found:")
- if (cluster_info_report.size() > 0) {
- common.infoMsg(cluster_info_report.join('\n'))
- }
- if (cluster_warning_report.size() > 0) {
- common.warningMsg(cluster_warning_report.join('\n'))
- }
- if (cluster_error_report.size() > 0) {
- common.errorMsg(cluster_error_report.join('\n'))
- return "ERROR"
- } else {
- return "OK"
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("validateAndPrintGaleraStatusReport method was moved to Galera class. Please change your calls accordingly.")
+ return galera.validateAndPrintGaleraStatusReport(env, out, minion)
}
def getGaleraLastShutdownNode(env) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- members = ''
- lastNode = [ip: '', seqno: -2]
- try {
- members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
- } catch (Exception er) {
- common.errorMsg('Could not retrieve members list')
- return 'I@galera:master'
- }
- if (members) {
- for (member in members) {
- try {
- salt.minionsReachable(env, 'I@salt:master', "S@${member.host}")
- out = salt.getReturnValues(salt.cmdRun(env, "S@${member.host}", 'cat /var/lib/mysql/grastate.dat | grep "seqno" | cut -d ":" -f2', true, null, false))
- seqno = out.tokenize('\n')[0].trim()
- if (seqno.isNumber()) {
- seqno = seqno.toInteger()
- } else {
- seqno = -2
- }
- highestSeqno = lastNode.get('seqno')
- if (seqno > highestSeqno) {
- lastNode << [ip: "${member.host}", seqno: seqno]
- }
- } catch (Exception er) {
- common.warningMsg("Could not determine 'seqno' value for node ${member.host} ")
- }
- }
- }
- if (lastNode.get('ip') != '') {
- return "S@${lastNode.ip}"
- } else {
- return "I@galera:master"
- }
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("getGaleraLastShutdownNode method was moved to Galera class. Please change your calls accordingly.")
+ return galera.getGaleraLastShutdownNode(env)
}
-/**
- * Restores Galera database
- * @param env Salt Connection object or pepperEnv
- * @return output of salt commands
- */
def restoreGaleraDb(env) {
- def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- try {
- salt.runSaltProcessStep(env, 'I@galera:slave', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- try {
- salt.runSaltProcessStep(env, 'I@galera:master', 'service.stop', ['mysql'])
- } catch (Exception er) {
- common.warningMsg('Mysql service already stopped')
- }
- lastNodeTarget = getGaleraLastShutdownNode(env)
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, 'I@galera:slave', "rm /var/lib/mysql/grastate.dat")
- } catch (Exception er) {
- common.warningMsg('Files are not present')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mkdir /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Directory already exists')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "rm -rf /root/mysql/mysql.bak/*")
- } catch (Exception er) {
- common.warningMsg('Directory already empty')
- }
- try {
- salt.cmdRun(env, lastNodeTarget, "mv /var/lib/mysql/* /root/mysql/mysql.bak")
- } catch (Exception er) {
- common.warningMsg('Files were already moved')
- }
- try {
- salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["/var/lib/mysql/.galera_bootstrap"])
- } catch (Exception er) {
- common.warningMsg('File is not present')
- }
- salt.cmdRun(env, lastNodeTarget, "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
- def backup_dir = salt.getReturnValues(salt.getPillar(env, lastNodeTarget, 'xtrabackup:client:backup_dir'))
- if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
- salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["${backup_dir}/dbrestored"])
- salt.cmdRun(env, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(env, lastNodeTarget, 'service.start', ['mysql'])
-
- // wait until mysql service on galera master is up
- try {
- salt.commandStatus(env, lastNodeTarget, 'service mysql status', 'running')
- } catch (Exception er) {
- input message: "Database is not running please fix it first and only then click on PROCEED."
- }
-
- salt.runSaltProcessStep(env, "I@galera:master and not ${lastNodeTarget}", 'service.start', ['mysql'])
- salt.runSaltProcessStep(env, "I@galera:slave and not ${lastNodeTarget}", 'service.start', ['mysql'])
+ def galera = new com.mirantis.mk.Galera()
+ common.warningMsg("restoreGaleraDb method was moved to Galera class. Please change your calls accordingly.")
+ return galera.restoreGaleraDb(env)
}
\ No newline at end of file