Merge "Remove stack limits for mceloud"
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index 7cb6f55..cb8e951 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -163,6 +163,9 @@
}
stage("Deploy Docker services") {
+ // We need /etc/aptly-publisher.yaml to be present before
+ // services are deployed
+ salt.enforceState(saltMaster, 'I@aptly:publisher', 'aptly.publisher', true)
retry(3) {
sleep(5)
salt.enforceState(saltMaster, 'I@docker:swarm:role:master', 'docker.client')
diff --git a/cleanup-pipeline.groovy b/cleanup-pipeline.groovy
index 4b67213..282b041 100644
--- a/cleanup-pipeline.groovy
+++ b/cleanup-pipeline.groovy
@@ -7,11 +7,14 @@
* STACK_TYPE Type of the stack (heat, aws)
*
* Heat parameters:
- * OPENSTACK_API_URL OpenStack API address
- * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
- * OPENSTACK_API_PROJECT OpenStack project to connect to
- * OPENSTACK_API_CLIENT Versions of OpenStack python clients
- * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
+ * OPENSTACK_API_URL OpenStack API address
+ * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
+ * OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
+ * OPENSTACK_API_PROJECT_ID ID for OpenStack project
+ * OPENSTACK_API_USER_DOMAIN Domain for OpenStack user
+ * OPENSTACK_API_CLIENT Versions of OpenStack python clients
+ * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
*
* AWS parameters:
* AWS_API_CREDENTIALS Credentials id AWS EC2 API
@@ -42,7 +45,7 @@
} else if (STACK_TYPE == 'aws') {
- env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+ env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_DEFAULT_REGION)
aws.setupVirtualEnv(venv_path)
} else {
@@ -53,15 +56,18 @@
stage('Delete stack') {
if (STACK_TYPE == 'heat') {
- def openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+ def openstackCloud = openstack.createOpenstackEnv(
+ OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+ OPENSTACK_API_PROJECT,OPENSTACK_API_PROJECT_DOMAIN,
+ OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
+ OPENSTACK_API_VERSION)
openstack.getKeystoneToken(openstackCloud, venv_path)
common.infoMsg("Deleting Heat Stack " + STACK_NAME)
openstack.deleteHeatStack(openstackCloud, STACK_NAME, venv_path)
} else if (STACK_TYPE == 'aws') {
- aws.deteteStack(venv_path, env_vars, STACK_NAME)
- aws.waitForStatus(venv_path, evn_vars, STACK_NAME, 'DELETE_COMPLETE', ['DELETE_FAILED'])
+ aws.deleteStack(venv_path, env_vars, STACK_NAME)
} else {
throw new Exception('Stack type is not supported')
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 44a536a..41e0cca 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -156,7 +156,8 @@
// start stack
def stack_params = ["ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY]
- aws.createStack(venv_path, env_vars, STACK_TEMPLATE, STACK_NAME, stack_params)
+ def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
+ aws.createStack(venv_path, env_vars, template_file, STACK_NAME, stack_params)
}
// wait for stack to be ready
@@ -337,21 +338,24 @@
// Clean
//
- if (STACK_TYPE == 'heat') {
+ if (STACK_NAME && STACK_NAME != '') {
// send notification
common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
+ }
- if (STACK_DELETE.toBoolean() == true) {
- common.errorMsg('Heat job cleanup triggered')
- stage('Trigger cleanup job') {
- build job: 'deploy-stack-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
- }
- } else {
- if (currentBuild.result == 'FAILURE') {
- common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
- if (SALT_MASTER_URL) {
- common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
- }
+ if (STACK_DELETE.toBoolean() == true) {
+ stage('Trigger cleanup job') {
+ common.errorMsg('Stack cleanup job triggered')
+ build(job: STACK_CLEANUP_JOB, parameters: [
+ [$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME],
+ [$class: 'StringParameterValue', name: 'STACK_TYPE', value: STACK_TYPE]
+ ])
+ }
+ } else {
+ if (currentBuild.result == 'FAILURE') {
+ common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+ if (SALT_MASTER_URL) {
+ common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
}
}
}
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index 9dad0fe..3e251c4 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -17,17 +17,17 @@
*
* Expected parameters:
* required for STACK_TYPE=heat
- * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
- * HEAT_STACK_ZONE Heat stack availability zone
- * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
- * OPENSTACK_API_URL OpenStack API address
- * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
- * OPENSTACK_API_PROJECT OpenStack project to connect to
- * OPENSTACK_PROJECT_DOMAIN Domain for OpenStack project
- * OPENSTACK_PROJECT_ID ID for OpenStack project
- * OPENSTACK_USER_DOMAIN Domain for OpenStack user
- * OPENSTACK_API_CLIENT Versions of OpenStack python clients
- * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
+ * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
+ * HEAT_STACK_ZONE Heat stack availability zone
+ * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
+ * OPENSTACK_API_URL OpenStack API address
+ * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
+ * OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_API_PROJECT_DOMAIN Domain for OpenStack project
+ * OPENSTACK_API_PROJECT_ID ID for OpenStack project
+ * OPENSTACK_API_USER_DOMAIN Domain for OpenStack user
+ * OPENSTACK_API_CLIENT Versions of OpenStack python clients
+ * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
*
* SALT_MASTER_CREDENTIALS Credentials to the Salt API
*
@@ -56,11 +56,7 @@
salt = new com.mirantis.mk.Salt()
test = new com.mirantis.mk.Test()
-
-
-
_MAX_PERMITTED_STACKS = 2
-overwriteFile = "/srv/salt/reclass/classes/cluster/overwrite.yml"
timestamps {
node {
@@ -113,8 +109,8 @@
openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
openstackCloud = openstack.createOpenstackEnv(
OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
- OPENSTACK_API_PROJECT,OPENSTACK_PROJECT_DOMAIN,
- OPENSTACK_PROJECT_ID, OPENSTACK_USER_DOMAIN,
+ OPENSTACK_API_PROJECT, OPENSTACK_API_PROJECT_DOMAIN,
+ OPENSTACK_API_PROJECT_ID, OPENSTACK_API_USER_DOMAIN,
OPENSTACK_API_VERSION)
openstack.getKeystoneToken(openstackCloud, openstackEnv)
//
@@ -184,29 +180,28 @@
stage('Install Kubernetes control') {
// Overwrite Kubernetes vars if specified
- if (env.getEnvironment().containsKey("KUBERNETES_HYPERKUBE_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}")
+ if (env.getEnvironment().containsKey('KUBERNETES_HYPERKUBE_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_hyperkube_image', KUBERNETES_HYPERKUBE_IMAGE)
}
// Overwrite Calico vars if specified
- if (env.getEnvironment().containsKey("CALICO_CNI_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calico_cni_image: ${CALICO_CNI_IMAGE}")
+ if (env.getEnvironment().containsKey('CALICO_CNI_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calico_cni_image', CALICO_CNI_IMAGE)
}
- if (env.getEnvironment().containsKey("CALICO_NODE_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calico_node_image: ${CALICO_NODE_IMAGE}")
+ if (env.getEnvironment().containsKey('CALICO_NODE_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calico_image', CALICO_NODE_IMAGE)
}
- if (env.getEnvironment().containsKey("CALICOCTL_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calicoctl_image: ${CALICOCTL_IMAGE}")
+ if (env.getEnvironment().containsKey('CALICOCTL_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_calicoctl_image', CALICOCTL_IMAGE)
}
// Overwrite netchecker vars if specified
- if (env.getEnvironment().containsKey("NETCHECKER_AGENT_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_netchecker_agent_image: ${NETCHECKER_AGENT_IMAGE}")
+ if (env.getEnvironment().containsKey('NETCHECKER_AGENT_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_netchecker_agent_image', NETCHECKER_AGENT_IMAGE)
}
- if (env.getEnvironment().containsKey("NETCHECKER_SERVER_IMAGE")) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_netchecker_server_image: ${NETCHECKER_SERVER_IMAGE}")
+ if (env.getEnvironment().containsKey('NETCHECKER_SERVER_IMAGE')) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', 'kubernetes_netchecker_server_image', NETCHECKER_SERVER_IMAGE)
}
-
orchestrate.installKubernetesControl(saltMaster)
}
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
new file mode 100644
index 0000000..cde839c
--- /dev/null
+++ b/openstack-compute-upgrade.groovy
@@ -0,0 +1,112 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ * TARGET_PACKAGES Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
+ * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
+ * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
+ * TARGET_BATCH_LIVE Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
+def targetTestSubset
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def packages
+def command
+def commandKwargs
+
+node() {
+ try {
+
+ stage('Connect to Salt master') {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ minions = salt.getMinions(saltMaster, targetAll)
+
+ if (minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ if (TARGET_SUBSET_TEST != "") {
+ targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+ } else {
+ targetTestSubset = minions.join(' or ')
+ }
+ targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+
+ targetLiveAll = minions.join(' or ')
+ common.infoMsg("Found nodes: ${targetLiveAll}")
+ common.infoMsg("Selected test nodes: ${targetTestSubset}")
+ common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+ }
+
+ stage("Add new repos on sample") {
+ salt.enforceState(saltMaster, targetTestSubset, 'linux.system')
+ }
+
+ stage("List package upgrades") {
+ salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+ }
+
+ stage('Confirm live package upgrades on sample') {
+ if(TARGET_PACKAGES==""){
+ timeout(time: 2, unit: 'HOURS') {
+ def userInput = input(
+ id: 'userInput', message: 'Insert package names for update', parameters: [
+ [$class: 'TextParameterDefinition', defaultValue: '', description: 'Package names (or *)', name: 'packages']
+ ])
+ if(userInput!= "" && userInput!= "*"){
+ TARGET_PACKAGES = userInput
+ }
+ }
+ }else{
+ timeout(time: 2, unit: 'HOURS') {
+ input message: "Approve live package upgrades on ${targetLiveSubset} nodes?"
+ }
+ }
+ }
+
+ if (TARGET_PACKAGES != "") {
+ command = "pkg.install"
+ packages = TARGET_PACKAGES.tokenize(' ')
+ commandKwargs = ['only_upgrade': 'true']
+ }else {
+ command = "pkg.upgrade"
+ packages = null
+ }
+
+ stage('Apply package upgrades on sample') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, packages, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+
+ stage('Confirm package upgrades on all nodes') {
+ timeout(time: 2, unit: 'HOURS') {
+ input message: "Approve live package upgrades on ${targetLiveAll} nodes?"
+ }
+ }
+
+ stage('Apply package upgrades on all nodes') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, packages, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 7dd2ea1..66cdebb 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -81,7 +81,7 @@
// salt 'kvm02*' state.sls salt.control
salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
- sleep(60)
+ sleep(70)
// salt '*' saltutil.refresh_pillar
salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
@@ -120,11 +120,15 @@
salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+ salt.enforceState(saltMaster, 'I@xtrabackup:server', 'xtrabackup')
+ salt.enforceState(saltMaster, 'I@xtrabackup:client', 'openssh.client')
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
if(databases && databases != ""){
def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
+ for( i = 0; i < databasesList.size(); i++){
if(databasesList[i].toLowerCase().contains('upgrade')){
salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
common.warningMsg("removing database ${databasesList[i]}")
@@ -268,9 +272,7 @@
salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
-
- salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
- salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh'")
try {
salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
@@ -281,7 +283,7 @@
// salt 'kvm*' state.sls salt.control
salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
- sleep(60)
+ sleep(70)
// salt '*' saltutil.refresh_pillar
salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
@@ -357,27 +359,62 @@
} catch (Exception e) {
errorOccured = true
common.warningMsg('Some states that require syncdb failed. Restoring production databases')
- databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
- if(databases && databases != ""){
- databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
- if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
- common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
- }
- }
- salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
- }else{
- common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases, remove none upgrade database files from /root/mysql/flags/ and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
+
+ // database restore section
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
}
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ } catch (Exception er) {
+ common.warningMsg('Files are not present')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Directory already exists')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+ } catch (Exception er) {
+ common.warningMsg('Directory already empty')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Files were already moved')
+ }
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ } catch (Exception er) {
+ common.warningMsg('File is not present')
+ }
+ salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ backup_dir = _pillar['return'][0].values()[0]
+ if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+ print(backup_dir)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ sleep(5)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ //
+
common.errorMsg("Stage Real control upgrade failed")
}
if(!errorOccured){
// salt 'cmp*' cmd.run 'service nova-compute restart'
salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
- // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
+ // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
// salt 'ctl*' state.sls keepalived
// salt 'prx*' state.sls keepalived
salt.enforceState(saltMaster, 'prx*', 'keepalived')
@@ -447,20 +484,43 @@
common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
}
- databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
- if(databases && databases != ""){
- databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
- if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
- common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
- }
- }
- salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
- }else{
- common.errorMsg("No none _upgrade databases were returned")
+ // database restore section
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ } catch (Exception e) {
+ common.warningMsg('Mysql service already stopped')
}
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ } catch (Exception e) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ } catch (Exception e) {
+ common.warningMsg('Files are not present')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /var/lib/mysql/*")
+ } catch (Exception e) {
+ common.warningMsg('Directory already empty')
+ }
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ } catch (Exception e) {
+ common.warningMsg('File is not present')
+ }
+ salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ backup_dir = _pillar['return'][0].values()[0]
+ if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+ print(backup_dir)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ sleep(5)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ //
salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
@@ -471,7 +531,7 @@
// salt 'cmp*' cmd.run 'service nova-compute restart'
salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
- sleep(60)
+ sleep(70)
salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
}
diff --git a/test-nodejs-pipeline.groovy b/test-nodejs-pipeline.groovy
index b57d351..b1024cc 100644
--- a/test-nodejs-pipeline.groovy
+++ b/test-nodejs-pipeline.groovy
@@ -9,12 +9,12 @@
gerrit = new com.mirantis.mk.Gerrit()
common = new com.mirantis.mk.Common()
-def executeCmd(containerId, cmd) {
+def executeCmd(containerName, cmd) {
stage(cmd) {
- assert containerId != null
+ assert containerName != null
common.infoMsg("Starting command: ${cmd}")
def output = sh(
- script: "docker exec ${containerId} ${cmd}",
+ script: "docker exec ${containerName} ${cmd}",
returnStdout: true,
)
common.infoMsg(output)
@@ -40,7 +40,7 @@
def checkouted = false
node("vm") {
- def containerId
+ def containerName
def uniqId
try {
stage('Checkout source code') {
@@ -60,7 +60,7 @@
stage('Generate config file for devops portal') {
writeFile (
file: "${workspace}/test_config.json",
- text: '${JSON_CONFIG}'
+ text: "${JSON_CONFIG}"
)
}
stage('Start container') {
@@ -72,14 +72,16 @@
uniqId = defaultGitRef.tokenize('/').takeRight(2).join('') + timeStamp
}
sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} up -d")
- containerId = "${uniqId}_devopsportal_1"
- common.successMsg("Container with id ${containerId} started.")
- sh("docker cp ${workspace}/. ${containerId}:/opt/workspace/")
+ containerName = "${uniqId}_devopsportal_1"
+ common.successMsg("Container with id ${containerName} started.")
+ sh("docker cp ${workspace}/. ${containerName}:/opt/workspace/")
}
- executeCmd(containerId, "npm install")
+ executeCmd(containerName, "npm install")
def cmds = COMMANDS.tokenize('\n')
for (int i = 0; i < cmds.size(); i++) {
- executeCmd(containerId, cmds[i])
+ timeout(5) {
+ executeCmd(containerName, cmds[i])
+ }
}
} catch (err) {
currentBuild.result = 'FAILURE'
@@ -88,14 +90,14 @@
} finally {
common.sendNotification(currentBuild.result, "" ,["slack"])
stage('Cleanup') {
- if (containerId != null) {
+ if (containerName != null) {
dockerCleanupCommands = ['stop', 'rm -f']
for (int i = 0; i < dockerCleanupCommands.size(); i++) {
sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} ${dockerCleanupCommands[i]} || true")
}
sh("docker network rm ${uniqId}_default || true")
sh("rm -f ${workspace}/test_config.json || true")
- common.infoMsg("Container with id ${containerId} was removed.")
+ common.infoMsg("Container with id ${containerName} was removed.")
}
}
}
diff --git a/update-package.groovy b/update-package.groovy
index 1dd6e02..c6d008d 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -6,9 +6,9 @@
* SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
* TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
* TARGET_PACKAGES Space delimited list of packages to be updates [package1=version package2=version], empty string means all updating all packages to the latest version.
- * TARGET_SIZE_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SIZE_SAMPLE Number of selected noded to live apply selected package update.
- * TARGET_SIZE_BATCH Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
+ * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
+ * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
+ * TARGET_BATCH_LIVE Batch size for the complete live package update on all nodes, empty string means apply to all targetted nodes.
*
**/
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
new file mode 100644
index 0000000..6c8b379
--- /dev/null
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -0,0 +1,83 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+
+def saltMaster
+
+timestamps {
+ node() {
+
+ stage('Connect to Salt API') {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('Start restore') {
+ // # actual upgrade
+
+ stage('Ask for manual confirmation') {
+ input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore mysql db?"
+ }
+ // database restore section
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.stop', ['mysql'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.stop', ['mysql'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Mysql service already stopped')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:slave', "rm /var/lib/mysql/ib_logfile*")
+ } catch (Exception er) {
+ common.warningMsg('Files are not present')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "mkdir /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Directory already exists')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "rm -rf /root/mysql/mysql.bak/*")
+ } catch (Exception er) {
+ common.warningMsg('Directory already empty')
+ }
+ try {
+ salt.cmdRun(saltMaster, 'I@galera:master', "mv /var/lib/mysql/* /root/mysql/mysql.bak")
+ } catch (Exception er) {
+ common.warningMsg('Files were already moved')
+ }
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["/var/lib/mysql/.galera_bootstrap"], null, true)
+ } catch (Exception er) {
+ common.warningMsg('File is not present')
+ }
+ salt.cmdRun(saltMaster, 'I@galera:master', "sed -i '/gcomm/c\\wsrep_cluster_address=\"gcomm://\"' /etc/mysql/my.cnf")
+ _pillar = salt.getPillar(saltMaster, "I@galera:master", 'xtrabackup:client:backup_dir')
+ backup_dir = _pillar['return'][0].values()[0]
+ if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
+ print(backup_dir)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.cmdRun(saltMaster, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(saltMaster, 'I@galera:master', 'service.start', ['mysql'], null, true)
+ sleep(5)
+ salt.runSaltProcessStep(saltMaster, 'I@galera:slave', 'service.start', ['mysql'], null, true)
+ sleep(15)
+ salt.cmdRun(saltMaster, 'I@galera:master', "su root -c 'salt-call mysql.status | grep -A1 wsrep_cluster_size'")
+
+ }
+ }
+}
+
+
+