Merge "modified update mirror image pipeline"
diff --git a/.gitignore b/.gitignore
index 3060674..054b8b3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,7 @@
.gradle
build
-.idea
\ No newline at end of file
+.idea
+.project
+.settings
+.classpath
+/bin/**
\ No newline at end of file
diff --git a/aptly-promote-pipeline.groovy b/aptly-promote-pipeline.groovy
index dfc764e..99a07e1 100644
--- a/aptly-promote-pipeline.groovy
+++ b/aptly-promote-pipeline.groovy
@@ -16,11 +16,25 @@
components = ""
}
+def storages
+try {
+ storages = STORAGES.tokenize(',')
+} catch (MissingPropertyException e) {
+ storages = ['local']
+}
+
node() {
try{
stage("promote") {
lock("aptly-api") {
- aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean())
+ for (storage in storages) {
+
+ if (storage == "local") {
+ storage = ""
+ }
+
+ aptly.promotePublish(APTLY_URL, SOURCE, TARGET, RECREATE, components, packages, DIFF_ONLY, '-d --timeout 600', DUMP_PUBLISH.toBoolean(), storage)
+ }
}
}
} catch (Throwable e) {
diff --git a/build-mirror-image.groovy b/build-mirror-image.groovy
index cfe87b7..8aca89c 100644
--- a/build-mirror-image.groovy
+++ b/build-mirror-image.groovy
@@ -24,6 +24,8 @@
def rcFile = ""
def openstackEnv = ""
def serverStatus = ""
+def uploadImageStatus = ""
+def uploadMd5Status = ""
def retry(int times = 5, int delay = 0, Closure body) {
int retries = 0
@@ -111,10 +113,11 @@
salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly_mirror_update.sh -s -v', 'runas=aptly'], null, true)
salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['nohup aptly api serve --no-lock > /dev/null 2>&1 </dev/null &', 'runas=aptly'], null, true)
salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly-publisher --timeout=1200 publish -v -c /etc/aptly-publisher.yaml --architectures amd64 --url http://127.0.0.1:8080 --recreate --force-overwrite', 'runas=aptly'], null, true)
- salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh'], null, true)
+ salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly db cleanup', 'runas=aptly'], null, true)
//NEW way
//salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
- //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-arf", 'runas=aptly'], null, true)
+ //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", 'runas=aptly'], null, true)
+ salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh'], null, true)
}
stage("Create Git mirror"){
@@ -153,11 +156,23 @@
}
stage("Publish image"){
+ common.infoMsg("Saving image ${IMAGE_NAME}-${dateTime}")
openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}-${dateTime} ${IMAGE_NAME}-${dateTime}", rcFile, openstackEnv)
- python.setupVirtualenv(venvS4cmd)
- python.runVirtualenvCommand(venvS4cmd, "pip install s4cmd")
- creds = common.getPasswordCredentials(AWS_CREDENTIALS_ID)
- python.runVirtualenvCommand(venvS4cmd, "python ./${venvS4cmd}/bin/s4cmd.py --access-key ${creds.username} --secret-key ${creds.password.toString()} --multipart-split-size=5368709120 put ${IMAGE_NAME}-${dateTime} s3://${AWS_S3_BUCKET_NAME}/${IMAGE_NAME}-${dateTime}")
+ sh "md5sum ${IMAGE_NAME}-${dateTime} > ${IMAGE_NAME}-${dateTime}.md5"
+
+ common.infoMsg("Uploading image ${IMAGE_NAME}-${dateTime}")
+ retry(3, 5){
+ uploadImageStatus = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime} ${UPLOAD_URL}", returnStatus: true)
+ if(uploadImageStatus!=0){
+ throw new Exception("Image upload failed")
+ }
+ }
+ retry(3, 5){
+ uploadMd5Status = sh(script: "curl -f -T ${IMAGE_NAME}-${dateTime}.md5 ${UPLOAD_URL}", returnStatus: true)
+ if(uploadMd5Status != 0){
+ throw new Exception("MD5 sum upload failed")
+ }
+ }
}
} catch (Throwable e) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 5844f77..8323e41 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -46,37 +46,48 @@
def backup(master, target) {
stage("backup ${target}") {
- def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
- def domain = _pillar['return'][0].values()[0].values()[0]
-
- def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
- def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
-
- def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
- def minions = target_pillar['return'][0].values()
- for (minion in minions) {
- def minion_name = minion.values()[0]
- def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
- def minionProvider = provider_pillar['return'][0].values()[0]
-
- waitForHealthy(master)
+ if (target == 'osd') {
try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+ salt.enforceState(master, "I@ceph:${target}", "ceph.backup", true)
+ runCephCommand(master, "I@ceph:${target}", "su root -c '/usr/local/bin/ceph-backup-runner-call.sh'")
} catch (Exception e) {
- common.warningMsg('Backup already exists')
+ common.errorMsg(e)
+ common.errorMsg("Make sure Ceph backup on OSD nodes is enabled")
+ throw new InterruptedException()
}
- try {
- salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('Backup already exists')
+ } else {
+ def _pillar = salt.getGrain(master, 'I@salt:master', 'domain')
+ def domain = _pillar['return'][0].values()[0].values()[0]
+
+ def kvm_pillar = salt.getGrain(master, 'I@salt:control', 'id')
+ def kvm01 = kvm_pillar['return'][0].values()[0].values()[0]
+
+ def target_pillar = salt.getGrain(master, "I@ceph:${target}", 'host')
+ def minions = target_pillar['return'][0].values()
+ for (minion in minions) {
+ def minion_name = minion.values()[0]
+ def provider_pillar = salt.getPillar(master, "${kvm01}", "salt:control:cluster:internal:node:${minion_name}:provider")
+ def minionProvider = provider_pillar['return'][0].values()[0]
+
+ waitForHealthy(master)
+ try {
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && virsh destroy ${minion_name}.${domain}")
+ } catch (Exception e) {
+ common.warningMsg('Backup already exists')
+ }
+ try {
+ salt.cmdRun(master, "${minionProvider}", "[ ! -f /root/${minion_name}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${minion_name}.${domain}/system.qcow2 /root/${minion_name}.${domain}.qcow2.bak")
+ } catch (Exception e) {
+ common.warningMsg('Backup already exists')
+ }
+ try {
+ salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
+ } catch (Exception e) {
+ common.warningMsg(e)
+ }
+ salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
+ waitForHealthy(master)
}
- try {
- salt.cmdRun(master, "${minionProvider}", "virsh start ${minion_name}.${domain}")
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.minionsReachable(master, 'I@salt:master', "${minion_name}*")
- waitForHealthy(master)
}
}
return
@@ -143,6 +154,9 @@
if (STAGE_UPGRADE_RGW.toBoolean() == true) {
backup(pepperEnv, 'radosgw')
}
+ if (STAGE_UPGRADE_OSD.toBoolean() == true) {
+ backup(pepperEnv, 'osd')
+ }
}
if (flags.size() > 0) {
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index cbb09b7..c0652e9 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -400,6 +400,12 @@
}
+ if (common.checkContains('STACK_INSTALL', 'oss')) {
+ stage('Install Oss infra') {
+ orchestrate.installOssInfra(venvPepper)
+ }
+ }
+
if (common.checkContains('STACK_INSTALL', 'cicd')) {
stage('Install Cicd') {
orchestrate.installDockerSwarm(venvPepper)
@@ -421,6 +427,17 @@
}
}
+ if (common.checkContains('STACK_INSTALL', 'oss')) {
+ stage('Install OSS') {
+ if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
+ // In case if StackLightv2 enabled containers already started
+ orchestrate.installDockerSwarm(venvPepper)
+ salt.enforceState(venvPepper, 'I@docker:swarm:role:master and I@devops_portal:config', 'docker.client', true)
+ }
+ orchestrate.installOss(venvPepper)
+ }
+ }
+
//
// Test
//
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
new file mode 100644
index 0000000..8867875
--- /dev/null
+++ b/docker-mirror-images.groovy
@@ -0,0 +1,54 @@
+/**
+ *
+ * Mirror Docker images
+ *
+ * Expected parameters:
+ * TARGET_REGISTRY_CREDENTIALS_ID Credentials for target Docker Registry
+ * TARGET_REGISTRY Target Docker Registry name
+ * REGISTRY_URL Target Docker Registry URL
+ * IMAGE_TAG Tag to use when pushing images
+ * IMAGE_LIST List of images to mirror
+ *
+ */
+import java.util.regex.Pattern;
+
+def common = new com.mirantis.mk.Common()
+
+@NonCPS
+def getImageName(String image) {
+ def regex = Pattern.compile('(?:.+/)?([^:]+)(?::.+)?')
+ def matcher = regex.matcher(image)
+ if(matcher.find()){
+ def imageName = matcher.group(1)
+ return imageName
+ }else{
+ throw new IllegalArgumentException("Wrong format of image name.")
+ }
+}
+
+node("docker") {
+ try {
+ stage("Mirror Docker Images"){
+ def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
+ sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
+ def images = IMAGE_LIST.tokenize('\n')
+ def imageName, imagePath, targetRegistry, imageArray
+ for (image in images){
+ if(image.trim().indexOf(' ') == -1){
+ throw new IllegalArgumentException("Wrong format of image and target repository input")
+ }
+ imageArray = image.trim().tokenize(' ')
+ imagePath = imageArray[0]
+ targetRegistry = imageArray[1]
+ imageName = getImageName(image)
+ sh """docker pull ${imagePath}
+ docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
+ docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+}
\ No newline at end of file
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index 311cfef..83f17ee 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -32,18 +32,22 @@
def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
-def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
+def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms contrail-nova-driver'
def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
salt.printSaltCommandResult(out)
+ // if Error occured - throw exception
+ if (out.toString().contains('E: ')) {
+ throw new Exception("Command execution failed")
+ }
// wait until $check is in correct state
if ( check == "nodetool status" ) {
salt.commandStatus(pepperEnv, target, check, 'Status=Up')
} else if ( check == "contrail-status" ) {
- salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+ salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/", null, false)
}
//out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
@@ -75,18 +79,22 @@
return
}
+ salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+ salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
+
try {
salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
} catch (Exception er) {
- common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
- return
+ throw new Exception('Zookeeper failed to backup. Please fix it before continuing.')
}
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
+
try {
salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
} catch (Exception er) {
- common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
- return
+ throw new Exception('Cassandra failed to backup. Please fix it before continuing.')
}
args = 'apt install contrail-database -y;'
@@ -228,7 +236,7 @@
salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
@@ -265,7 +273,7 @@
salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
@@ -438,7 +446,7 @@
salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
@@ -477,7 +485,7 @@
salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
//sleep(10)
- salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active", null, false)
+ salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
salt.printSaltCommandResult(out)
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index 4edcdb7..f763e5e 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -63,6 +63,8 @@
common.infoMsg("Running part of kitchen test")
if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
+ sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
def suite = ruby.getSuiteName(KITCHEN_ENV)
if (suite && suite != "") {
common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 6159a96..f1a4ab1 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -27,6 +27,8 @@
* RALLY_FLAVOR The name of the flavor for Rally image
* RALLY_CONFIG_REPO Git repository with files for Rally
* RALLY_CONFIG_BRANCH Git branch which will be used during the checkout
+ * RALLY_SCENARIOS Path to file or directory with rally scenarios
+ * RALLY_TASK_ARGS_FILE Path to file with rally tests arguments
* TEST_K8S_API_SERVER Kubernetes API address
* TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
* TEST_K8S_NODE Kubernetes node to run tests from
@@ -71,7 +73,7 @@
"rally_image=${RALLY_IMAGE}",
"rally_flavor=${RALLY_FLAVOR}",
"availability_zone=${AVAILABILITY_ZONE}"]
- validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, rally_variables)
+ validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables)
} else {
common.infoMsg("Skipping Rally tests")
}