Merge "cloud deploy: remove ZWSP from STACK_NAME"
diff --git a/.gitreview b/.gitreview
index 9075ea3..ce0aa41 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=gerrit.mcp.mirantis.net
+host=gerrit.mcp.mirantis.com
port=29418
project=mk/mk-pipelines.git
diff --git a/branch-git-repos.groovy b/branch-git-repos.groovy
index 47c143a..0624c40 100644
--- a/branch-git-repos.groovy
+++ b/branch-git-repos.groovy
@@ -116,19 +116,9 @@
sh "git branch -d '${gitBranchNew}' && git push origin ':${gitBranchNew}' || :"
sh "git tag -d '${gitBranchNew}' && git push origin ':refs/tags/${gitBranchNew}' || :"
- // Check if gitSrcObj is a branch
- gitCommit = sh (script: "git ls-remote --heads --quiet origin '${gitSrcObj}' | awk '{print \$1}'",
- returnStdout: true).trim()
- if (gitCommit) {
- // Rename existing branch
- sh "git checkout -b '${gitSrcObj}' -t 'origin/${gitSrcObj}'" // Checkout old branch
- sh "git branch -m '${gitSrcObj}' '${gitBranchNew}'" // ... rename it
- sh "git push origin ':${gitSrcObj}'" // ... remove old remote branch
- } else {
// Create new branch
- sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
- }
- sh "git push origin '${gitBranchNew}'" // ... push new branch
+ sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
+ sh "git push origin '${gitBranchNew}'" // ... push new branch
}
}
}
diff --git a/build-debian-packages-prometheus-relay.groovy b/build-debian-packages-prometheus-relay.groovy
index f101f57..ea19c9d 100644
--- a/build-debian-packages-prometheus-relay.groovy
+++ b/build-debian-packages-prometheus-relay.groovy
@@ -13,7 +13,7 @@
sh("rm -rf * || true")
}
- def workingDir = "src/gerrit.mcp.mirantis.net/debian"
+ def workingDir = "src/gerrit.mcp.mirantis.com/debian"
stage("checkout") {
git.checkoutGitRepository(
"${workingDir}/prometheus-relay",
@@ -53,7 +53,7 @@
export GOROOT=\$PWD/go &&
export GOPATH=\$PWD &&
export PATH=\$PATH:\$GOPATH/bin:\$GOROOT/bin &&
- cd src/gerrit.mcp.mirantis.net/debian/prometheus-relay &&
+ cd src/gerrit.mcp.mirantis.com/debian/prometheus-relay &&
make""")
}
archiveArtifacts artifacts: "${workingDir}/prometheus-relay/build/*.deb"
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index da3e177..8a7a90d 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -194,7 +194,7 @@
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
- salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://apt.mcp.mirantis.net:8081/api/version >/dev/null && break; done')
}
}
salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 254f39d..1f7dd1f 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -203,7 +203,7 @@
envParams.put('cfg_bootstrap_extra_repo_params', BOOTSTRAP_EXTRA_REPO_PARAMS)
}
- // put extra salt-formulas
+ // put extra salt-formulas # FIXME: looks like some outdated logic. See #PROD-23127
if (common.validInputParam('EXTRA_FORMULAS')) {
common.infoMsg("Setting extra salt-formulas to ${EXTRA_FORMULAS}")
envParams.put('cfg_extra_formulas', EXTRA_FORMULAS)
@@ -378,8 +378,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, "* ${extra_tgt}", 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, "* ${extra_tgt}", ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, "I@kubernetes:* ${extra_tgt}", 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, "I@kubernetes:* ${extra_tgt}", ['salt.minion.cert'], true)
}
if (common.checkContains('STACK_INSTALL', 'contrail')) {
@@ -427,6 +427,23 @@
}
orchestrate.installKubernetesCompute(venvPepper, extra_tgt)
+ // Setup kubernetes addons for opencontrail. More info in the definition of the func.
+ orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
+ }
+ }
+
+ // install ceph
+ if (common.checkContains('STACK_INSTALL', 'ceph')) {
+ stage('Install Ceph MONs') {
+ orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
+ }
+
+ stage('Install Ceph OSDs') {
+ orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
+ }
+
+ stage('Install Ceph clients') {
+ orchestrate.installCephClient(venvPepper, extra_tgt)
}
}
@@ -473,25 +490,14 @@
if (common.checkContains('STACK_INSTALL', 'contrail')) {
orchestrate.installContrailCompute(venvPepper, extra_tgt)
+ orchestrate.installBackup(venvPepper, 'contrail', extra_tgt)
}
}
}
- // install ceph
+ // connect ceph
if (common.checkContains('STACK_INSTALL', 'ceph')) {
- stage('Install Ceph MONs') {
- orchestrate.installCephMon(venvPepper, "I@ceph:mon ${extra_tgt}", extra_tgt)
- }
-
- stage('Install Ceph OSDs') {
- orchestrate.installCephOsd(venvPepper, "I@ceph:osd ${extra_tgt}", true, extra_tgt)
- }
-
-
- stage('Install Ceph clients') {
- orchestrate.installCephClient(venvPepper, extra_tgt)
- }
stage('Connect Ceph') {
orchestrate.connectCeph(venvPepper, extra_tgt)
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 2729d98..ab72f76 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -58,7 +58,7 @@
def command
def commandKwargs
-def wait = 10
+wait = 10
if (common.validInputParam('MINIONS_TEST_TIMEOUT') && MINIONS_TEST_TIMEOUT.isInteger()) {
wait = "${MINIONS_TEST_TIMEOUT}".toInteger()
}
@@ -388,7 +388,7 @@
} else {
def salt = new com.mirantis.mk.Salt()
for (s in services) {
- def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, "${probe}*", "service --status-all | grep ${s} | awk \'{print \$4}\'"))
+ def outputServicesStr = salt.getReturnValues(salt.cmdRun(pepperEnv, probe, "service --status-all | grep ${s} | awk \'{print \$4}\'"))
def servicesList = outputServicesStr.tokenize("\n").init() //init() returns the items from the Iterable excluding the last item
if (servicesList) {
for (name in servicesList) {
@@ -458,7 +458,8 @@
stage("Apply highstate on ${target} nodes") {
try {
common.retry(3){
- salt.enforceHighstate(pepperEnv, target)
+ out = salt.enforceHighstate(pepperEnv, target)
+ salt.printSaltCommandResult(out)
}
} catch (Exception e) {
common.errorMsg(e)
@@ -572,14 +573,12 @@
}
try {
salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
- // purge and setup previous repos
- salt.enforceState(pepperEnv, tgt, 'linux.system.repo')
} catch (Exception e) {
common.errorMsg(e)
if (INTERACTIVE.toBoolean()) {
- input message: "Salt state linux.system.repo on ${tgt} failed. Do you want to PROCEED?."
+ input message: "Not all minions ${tgt} returned after snapshot revert. Do you want to PROCEED?."
} else {
- throw new Exception("Salt state linux.system.repo on ${tgt} failed")
+ throw new Exception("Not all minions ${tgt} returned after snapshot revert")
}
}
}
@@ -847,7 +846,7 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try {
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests before upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
@@ -1582,7 +1581,7 @@
// verification is already present in restore pipelines
}
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests after upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
diff --git a/cvp-func.groovy b/cvp-func.groovy
index d1fff1a..0c657a5 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -1,6 +1,6 @@
/**
*
- * Launch validation of the cloud
+ * Launch CVP Tempest verification of the cloud
*
* Expected parameters:
@@ -14,7 +14,7 @@
* SKIP_LIST_PATH Path to tempest skip list file in TOOLS_REPO
* TARGET_NODE Node to run container with Tempest/Rally
* TEMPEST_REPO Tempest repo to clone and use
- * TEMPEST_TEST_PATTERN Tests to run during HA scenarios
+ * TEMPEST_TEST_PATTERN Tests to run
* TEMPEST_ENDPOINT_TYPE Type of OS endpoint to use during test run
*
*/
@@ -30,11 +30,15 @@
node() {
try{
stage('Initialization') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- validate.runBasicContainer(saltMaster, TARGET_NODE, TEST_IMAGE)
sh "rm -rf ${artifacts_dir}"
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
+ keystone_creds = validate._get_keystone_creds_v3(saltMaster)
+ if (!keystone_creds) {
+ keystone_creds = validate._get_keystone_creds_v2(saltMaster)
+ }
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO, TEMPEST_ENDPOINT_TYPE)
}
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index ab5b5d4..b33cda6 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -1,6 +1,6 @@
/**
*
- * Launch HA test for the cloud
+ * Launch CVP HA testing for the cloud (virtualized control plane only)
*
* Expected parameters:
*
@@ -36,11 +36,15 @@
def num_retries = Integer.parseInt(RETRY_CHECK_STATUS)
try {
stage('Initialization') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- validate.runBasicContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE)
sh "rm -rf ${artifacts_dir}"
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
+ keystone_creds = validate._get_keystone_creds_v3(saltMaster)
+ if (!keystone_creds) {
+ keystone_creds = validate._get_keystone_creds_v2(saltMaster)
+ }
+ validate.runContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
}
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index fe86197..74c9a63 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -1,6 +1,6 @@
/**
*
- * Launch validation of the cloud
+ * Launch CVP Rally performance testing of the cloud
*
* Expected parameters:
* SALT_MASTER_URL URL of Salt master
@@ -26,11 +26,15 @@
node() {
try{
stage('Initialization') {
- saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
sh "rm -rf ${artifacts_dir}"
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
- validate.runBasicContainer(saltMaster, TARGET_NODE, TEST_IMAGE)
+ keystone_creds = validate._get_keystone_creds_v3(saltMaster)
+ if (!keystone_creds) {
+ keystone_creds = validate._get_keystone_creds_v2(saltMaster)
+ }
+ validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
}
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index dd58da5..7cf8e28 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -10,30 +10,107 @@
* TESTS_REPO Repo to clone
* TESTS_SETTINGS Additional environment varibales to apply
* PROXY Proxy to use for cloning repo or for pip
+ * TEST_IMAGE Docker image link or name to use for running container with test framework.
+ * DEBUG_MODE If you need to debug (keep container after test), please enabled this
*
*/
+common = new com.mirantis.mk.Common()
validate = new com.mirantis.mcp.Validate()
-
+salt = new com.mirantis.mk.Salt()
def artifacts_dir = 'validation_artifacts/'
+def remote_dir = '/root/qa_results/'
+def container_workdir = '/var/lib'
+def TARGET_NODE = "I@gerrit:client"
+def reinstall_env = false
+def container_name = "${env.JOB_NAME}"
+def saltMaster
+def settings
node() {
try{
stage('Initialization') {
- validate.prepareVenv(TESTS_REPO, PROXY)
+ sh "rm -rf ${artifacts_dir}"
+ if ( TESTS_SETTINGS != "" ) {
+ for (var in TESTS_SETTINGS.tokenize(";")) {
+ key = var.tokenize("=")[0].trim()
+ value = var.tokenize("=")[1].trim()
+ if (key == 'TARGET_NODE') {
+ TARGET_NODE = value
+ common.infoMsg("Node for container is set to ${TARGET_NODE}")
+ }
+ if (key == 'REINSTALL_ENV') {
+ reinstall_env = value.toBoolean()
+ }
+ }
+ }
+ if ( IMAGE == "" ) {
+ common.infoMsg("Env for tests will be built on Jenkins slave")
+ TARGET_NODE = ""
+ validate.prepareVenv(TESTS_REPO, PROXY)
+ } else {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+ salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}")
+ validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
+ if ( TESTS_REPO != "") {
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/cvp*")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
+ TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
+ if ( reinstall_env ) {
+ common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
+ salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
+ }
+ }
+ }
}
stage('Run Tests') {
sh "mkdir -p ${artifacts_dir}"
- validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+ validate.runPyTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, TESTS_SETTINGS.tokenize(";"), container_name, TARGET_NODE, remote_dir, artifacts_dir)
}
+
stage ('Publish results') {
archiveArtifacts artifacts: "${artifacts_dir}/*"
junit "${artifacts_dir}/*.xml"
+ if (env.JOB_NAME.contains("cvp-spt")) {
+ plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
+ group: 'SPT',
+ style: 'line',
+ title: 'SPT Glance results',
+ xmlSeries: [[
+ file: "${env.JOB_NAME}_report.xml",
+ nodeType: 'NODESET',
+ url: '',
+ xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+ plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
+ group: 'SPT',
+ style: 'line',
+ title: 'SPT HW2HW results',
+ xmlSeries: [[
+ file: "${env.JOB_NAME}_report.xml",
+ nodeType: 'NODESET',
+ url: '',
+ xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+ plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
+ group: 'SPT',
+ style: 'line',
+ title: 'SPT VM2VM results',
+ xmlSeries: [[
+ file: "${env.JOB_NAME}_report.xml",
+ nodeType: 'NODESET',
+ url: '',
+ xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+ }
}
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
throw e
+ } finally {
+ if (DEBUG_MODE == 'false') {
+ validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+ salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+ }
}
}
diff --git a/cvp-stacklight.groovy b/cvp-stacklight.groovy
new file mode 100644
index 0000000..e7ce974
--- /dev/null
+++ b/cvp-stacklight.groovy
@@ -0,0 +1,33 @@
+/**
+ *
+ * Temporary pipeline for running cvp-stacklight job
+ *
+ * Expected parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ * TESTS_SET Leave empty for full run or choose a file (test)
+ * TESTS_REPO Repo to clone
+ * TESTS_SETTINGS Additional environment varibales to apply
+ * PROXY Proxy to use for cloning repo or for pip
+ *
+ */
+
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+ stage('Initialization') {
+ validate.prepareVenv(TESTS_REPO, PROXY)
+ }
+
+ stage('Run Tests') {
+ sh "mkdir -p ${artifacts_dir}"
+ validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+ }
+ stage ('Publish results') {
+ archiveArtifacts artifacts: "${artifacts_dir}/*"
+ junit "${artifacts_dir}/*.xml"
+ }
+}
diff --git a/deploy-aws-k8s-kqueen-pipeline.groovy b/deploy-aws-k8s-kqueen-pipeline.groovy
index 0a5903e..8fd92bf 100644
--- a/deploy-aws-k8s-kqueen-pipeline.groovy
+++ b/deploy-aws-k8s-kqueen-pipeline.groovy
@@ -124,8 +124,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, 'I@kubernetes:*', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, 'I@kubernetes:*', ['salt.minion.cert'], true)
orchestrate.installKubernetesInfra(venvPepper)
}
diff --git a/deploy-heat-k8s-kqueen-pipeline.groovy b/deploy-heat-k8s-kqueen-pipeline.groovy
index 7071b96..6e5705e 100644
--- a/deploy-heat-k8s-kqueen-pipeline.groovy
+++ b/deploy-heat-k8s-kqueen-pipeline.groovy
@@ -122,8 +122,8 @@
}
// ensure certificates are generated properly
- salt.runSaltProcessStep(venvPepper, '*', 'saltutil.refresh_pillar', [], null, true)
- salt.enforceState(venvPepper, '*', ['salt.minion.cert'], true)
+ salt.runSaltProcessStep(venvPepper, 'I@kubernetes:*', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(venvPepper, 'I@kubernetes:*', ['salt.minion.cert'], true)
orchestrate.installKubernetesInfra(venvPepper)
}
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index b94928e..a39051f 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -9,93 +9,101 @@
* REGISTRY_URL - Docker registry URL (can be empty)
* ARTIFACTORY_URL - URL to artifactory
* ARTIFACTORY_NAMESPACE - Artifactory namespace (oss, cicd,...)
+ * UPLOAD_TO_DOCKER_HUB - True\False
* REGISTRY_CREDENTIALS_ID - Docker hub credentials id
*
-**/
+ **/
def common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
def git = new com.mirantis.mk.Git()
def dockerLib = new com.mirantis.mk.Docker()
def artifactory = new com.mirantis.mcp.MCPArtifactory()
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+uploadToDockerHub = env.UPLOAD_TO_DOCKER_HUB ?: false
+
timeout(time: 12, unit: 'HOURS') {
- node("docker") {
- def workspace = common.getWorkspace()
- def imageTagsList = IMAGE_TAGS.tokenize(" ")
- try{
+ node(slaveNode) {
+ def workspace = common.getWorkspace()
+ def imageTagsList = env.IMAGE_TAGS.tokenize(" ")
+ try {
- def buildArgs = []
- try {
- buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
- } catch (Throwable e) {
- buildArgs = []
- }
- def dockerApp
- stage("checkout") {
- git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
- }
+ def buildArgs = []
+ try {
+ buildArgs = IMAGE_BUILD_PARAMS.tokenize(' ')
+ } catch (Throwable e) {
+ buildArgs = []
+ }
+ def dockerApp
+ stage("checkout") {
+ git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
+ }
- if (IMAGE_BRANCH == "master") {
- try {
- def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
- def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
- imageTagsList << tag
- revision = revision ? revision : "0"
- if(Integer.valueOf(revision) > 0){
- imageTagsList << "${tag}-${revision}"
+ if (IMAGE_BRANCH == "master") {
+ try {
+ def tag = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
+ def revision = sh(script: "git describe --tags --abbrev=4 | grep -oP \"^${tag}-\\K.*\" | awk -F\\- '{print \$1}'", returnStdout: true).trim()
+ imageTagsList << tag
+ revision = revision ? revision : "0"
+ if (Integer.valueOf(revision) > 0) {
+ imageTagsList << "${tag}-${revision}"
+ }
+ if (!imageTagsList.contains("latest")) {
+ imageTagsList << "latest"
+ }
+ } catch (Exception e) {
+ common.infoMsg("Impossible to find any tag")
+ }
}
- if (!imageTagsList.contains("latest")) {
- imageTagsList << "latest"
- }
- } catch (Exception e) {
- common.infoMsg("Impossible to find any tag")
- }
- }
- stage("build") {
- common.infoMsg("Building docker image ${IMAGE_NAME}")
- dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
- if(!dockerApp){
- throw new Exception("Docker build image failed")
- }
- }
- stage("upload to docker hub"){
- docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
- for(int i=0;i<imageTagsList.size();i++){
- common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
- dockerApp.push(imageTagsList[i])
+ stage("build") {
+ common.infoMsg("Building docker image ${IMAGE_NAME}")
+ dockerApp = dockerLib.buildDockerImage(IMAGE_NAME, "", "${workspace}/${DOCKERFILE_PATH}", imageTagsList[0], buildArgs)
+ if (!dockerApp) {
+ throw new Exception("Docker build image failed")
+ }
}
- }
+ stage("upload to docker hub") {
+ if (uploadToDockerHub) {
+ docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
+ for (int i = 0; i < imageTagsList.size(); i++) {
+ common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
+ dockerApp.push(imageTagsList[i])
+ }
+ }
+ } else {
+ common.infoMsg('upload to docker hub skipped')
+ }
+ }
+ stage("upload to artifactory") {
+ if (common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
+ def artifactoryName = "mcp-ci";
+ def artifactoryServer = Artifactory.server(artifactoryName)
+ def shortImageName = IMAGE_NAME
+ if (IMAGE_NAME.contains("/")) {
+ shortImageName = IMAGE_NAME.tokenize("/")[1]
+ }
+ for (imageTag in imageTagsList) {
+ sh "docker tag ${IMAGE_NAME}:${imageTagsList[0]} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
+ for (artifactoryRepo in ["docker-dev-local", "docker-prod-local"]) {
+ common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
+ artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
+ "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
+ imageTag, artifactoryRepo)
+ }
+ }
+ } else {
+ common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
}
- stage("upload to artifactory"){
- if(common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
- def artifactoryName = "mcp-ci";
- def artifactoryServer = Artifactory.server(artifactoryName)
- def shortImageName = IMAGE_NAME
- if (IMAGE_NAME.contains("/")) {
- shortImageName = IMAGE_NAME.tokenize("/")[1]
- }
- for (imageTag in imageTagsList) {
- sh "docker tag ${IMAGE_NAME} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
- for(artifactoryRepo in ["docker-dev-local", "docker-prod-local"]){
- common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTag} to artifactory ${artifactoryName} using repo ${artifactoryRepo}")
- artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
- "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
- imageTag, artifactoryRepo)
- }
- }
- }else{
- common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
-
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 07a80e7..4ccc74a 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -8,51 +8,109 @@
* REGISTRY_URL Target Docker Registry URL
* IMAGE_TAG Tag to use when pushing images
* SOURCE_IMAGE_TAG Tag to use when pulling images(optional,if SOURCE_IMAGE_TAG has been found)
+ * SET_DEFAULT_ARTIFACTORY_PROPERTIES Add extra props. directly to artifactory,
* IMAGE_LIST List of images to mirror
+ * Example: docker.elastic.co/elasticsearch/elasticsearch:5.4.1 docker-prod-local.docker.mirantis.net/mirantis/external/docker.elastic.co/elasticsearch
+ * docker.elastic.co/elasticsearch/elasticsearch:SUBS_SOURCE_IMAGE_TAG docker-prod-local.docker.mirantis.net/mirantis/external/elasticsearch:${IMAGE_TAG}* Will be proceed like:
+ * docker tag docker.elastic.co/elasticsearch/elasticsearch:5.4.1 docker-prod-local.docker.mirantis.net/mirantis/external/docker.elastic.co/elasticsearch/elasticsearch:5.4.1
+ *
*
*/
-import java.util.regex.Pattern;
+import java.util.regex.Pattern
+import groovy.json.JsonSlurper
-def common = new com.mirantis.mk.Common()
+common = new com.mirantis.mk.Common()
+external = false
+externalMarker = '/mirantis/external/'
-@NonCPS
+slaveNode = env.SLAVE_NODE ?: 'docker'
+setDefaultArtifactoryProperties = env.SET_DEFAULT_ARTIFACTORY_PROPERTIES ?: true
+
def getImageName(String image) {
def regex = Pattern.compile('(?:.+/)?([^:]+)(?::.+)?')
def matcher = regex.matcher(image)
- if(matcher.find()){
+ if (matcher.find()) {
def imageName = matcher.group(1)
return imageName
- }else{
- throw new IllegalArgumentException("Wrong format of image name.")
+ } else {
+ error("Wrong format of image name.")
}
}
-timeout(time: 12, unit: 'HOURS') {
- node("docker") {
+
+timeout(time: 4, unit: 'HOURS') {
+ node(slaveNode) {
try {
- stage("Mirror Docker Images"){
- def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
- sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
+ stage("Mirror Docker Images") {
+
def images = IMAGE_LIST.tokenize('\n')
- def imageName, imagePath, targetRegistry, imageArray
- for (image in images){
- if(image.trim().indexOf(' ') == -1){
- throw new IllegalArgumentException("Wrong format of image and target repository input")
+ def imageName, sourceImage, targetRegistryPath, imageArray
+ for (image in images) {
+ if (image.trim().indexOf(' ') == -1) {
+ error("Wrong format of image and target repository input")
}
imageArray = image.trim().tokenize(' ')
- imagePath = imageArray[0]
- if (imagePath.contains('SUBS_SOURCE_IMAGE_TAG')) {
- common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${SOURCE_IMAGE_TAG}")
- imagePath = imagePath.replace('SUBS_SOURCE_IMAGE_TAG', SOURCE_IMAGE_TAG)
+ sourceImage = imageArray[0]
+ if (sourceImage.contains('SUBS_SOURCE_IMAGE_TAG')) {
+ common.warningMsg("Replacing SUBS_SOURCE_IMAGE_TAG => ${env.SOURCE_IMAGE_TAG}")
+ sourceImage = sourceImage.replace('SUBS_SOURCE_IMAGE_TAG', env.SOURCE_IMAGE_TAG)
}
- targetRegistry = imageArray[1]
- imageName = getImageName(imagePath)
- sh """docker pull ${imagePath}
- docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
- docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
+ targetRegistryPath = imageArray[1]
+ targetRegistry = imageArray[1].split('/')[0]
+ imageName = getImageName(sourceImage)
+ targetImageFull = "${targetRegistryPath}/${imageName}:${env.IMAGE_TAG}"
+ srcImage = docker.image(sourceImage)
+ common.retry(3, 5) {
+ srcImage.pull()
+ }
+ // Use sh-docker call for tag, due magic code in plugin:
+ // https://github.com/jenkinsci/docker-workflow-plugin/blob/docker-workflow-1.17/src/main/resources/org/jenkinsci/plugins/docker/workflow/Docker.groovy#L168-L170
+ sh("docker tag ${srcImage.id} ${targetImageFull}")
+ common.infoMsg("Attempt to push docker image into remote registry: ${env.REGISTRY_URL}")
+ common.retry(3, 5) {
+ docker.withRegistry(env.REGISTRY_URL, env.TARGET_REGISTRY_CREDENTIALS_ID) {
+ sh("docker push ${targetImageFull}")
+ }
+ }
+ if (targetImageFull.contains(externalMarker)) {
+ external = true
+ }
+
+ if (setDefaultArtifactoryProperties) {
+ common.infoMsg("Processing artifactory props for : ${targetImageFull}")
+ LinkedHashMap artifactoryProperties = [:]
+ // Get digest of pushed image
+ String unique_image_id = sh(
+ script: "docker inspect --format='{{index .RepoDigests 0}}' '${targetImageFull}'",
+ returnStdout: true,
+ ).trim()
+ def image_sha256 = unique_image_id.tokenize(':')[1]
+ def ret = new URL("https://${targetRegistry}/artifactory/api/search/checksum?sha256=${image_sha256}").getText()
+ // Most probably, we would get many images, especially for external images. We need to guess
+ // exactly one, which we pushing now
+ guessImage = targetImageFull.replace(':', '/').replace(targetRegistry, '')
+ ArrayList img_data = new JsonSlurper().parseText(ret)['results']
+ img_data*.uri.each { imgUrl ->
+ if (imgUrl.contains(guessImage)) {
+ artifactoryProperties = [
+ 'com.mirantis.targetTag' : env.IMAGE_TAG,
+ 'com.mirantis.uniqueImageId': unique_image_id,
+ ]
+ if (external) {
+ artifactoryProperties << ['com.mirantis.externalImage': external]
+ }
+ common.infoMsg("artifactoryProperties=> ${artifactoryProperties}")
+ // Call pipeline-library routine to set properties
+ def mcp_artifactory = new com.mirantis.mcp.MCPArtifactory()
+ common.retry(3, 5) {
+ mcp_artifactory.setProperties(imgUrl - '/manifest.json', artifactoryProperties)
+ }
+ }
+ }
+ }
}
}
} catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
+ // Stub for future processing
currentBuild.result = "FAILURE"
throw e
}
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index e42524b..aeaee9a 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -3,79 +3,105 @@
* CREDENTIALS_ID - Gerrit credentails ID
* JOBS_NAMESPACE - Gerrit gating jobs namespace (mk, contrail, ...)
*
-**/
+ **/
def common = new com.mirantis.mk.Common()
def gerrit = new com.mirantis.mk.Gerrit()
def ssh = new com.mirantis.mk.Ssh()
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try{
- // test if change is not already merged
- ssh.prepareSshAgentKey(CREDENTIALS_ID)
- ssh.ensureKnownHosts(GERRIT_HOST)
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
- def doSubmit = false
- def giveVerify = false
- stage("test") {
- if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")){
- // test max CodeReview
- if(gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Code-Review", "+")){
- doSubmit = true
- def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
- def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
- def jobsNamespace = JOBS_NAMESPACE
- def plural_namespaces = ['salt-formulas', 'salt-models']
- // remove plural s on the end of job namespace
- if (JOBS_NAMESPACE in plural_namespaces){
- jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
- }
- // salt-formulas tests have -latest on end of the name
- if(JOBS_NAMESPACE.equals("salt-formulas")){
- gerritProject=gerritProject+"-latest"
- }
- def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
- if (_jobExists(testJob)) {
- common.infoMsg("Test job ${testJob} found, running")
- def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet,"Verified", "+")
- build job: testJob, parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
- ]
- giveVerify = true
- } else {
- common.infoMsg("Test job ${testJob} not found")
- }
- } else {
- common.errorMsg("Change don't have a CodeReview, skipping gate")
- }
- } else {
- common.infoMsg("Test job skipped")
- }
- }
- stage("submit review"){
- if(gerritChange.status == "MERGED"){
- common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
- }else if(doSubmit){
- if(giveVerify){
- common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
- ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }else{
- ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }
- common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
+
+slaveNode = env.SLAVE_NODE ?: 'docker'
+giveVerify = false
@NonCPS
-def _jobExists(jobName){
- return Jenkins.instance.items.find{it -> it.name.equals(jobName)}
+def isJobExists(jobName) {
+ return Jenkins.instance.items.find { it -> it.name.equals(jobName) }
+}
+
+def callJobWithExtraVars(String jobName) {
+ def gerritVars = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ gerritVars += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
+ testJob = build job: jobName, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: gerritVars]
+ ]
+ if (testJob.getResult() != 'SUCCESS') {
+ error("Gate job ${testJob.getBuildUrl().toString()} finished with ${testJob.getResult()} !")
+ }
+ giveVerify = true
+}
+
+
+timeout(time: 12, unit: 'HOURS') {
+ node(slaveNode) {
+ try {
+ // test if change is not already merged
+ ssh.prepareSshAgentKey(CREDENTIALS_ID)
+ ssh.ensureKnownHosts(GERRIT_HOST)
+ def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID, true)
+ def doSubmit = false
+ stage("test") {
+ if (gerritChange.status != "MERGED" && !SKIP_TEST.equals("true")) {
+ // test max CodeReview
+ if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+ doSubmit = true
+ def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
+ def gerritProject = gerritProjectArray[gerritProjectArray.size() - 1]
+ def jobsNamespace = JOBS_NAMESPACE
+ def plural_namespaces = ['salt-formulas', 'salt-models']
+ // remove plural s on the end of job namespace
+ if (JOBS_NAMESPACE in plural_namespaces) {
+ jobsNamespace = JOBS_NAMESPACE.substring(0, JOBS_NAMESPACE.length() - 1)
+ }
+ // salt-formulas tests have -latest on end of the name
+ if (JOBS_NAMESPACE.equals("salt-formulas")) {
+ gerritProject = gerritProject + "-latest"
+ }
+ def testJob = String.format("test-%s-%s", jobsNamespace, gerritProject)
+ if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+ callJobWithExtraVars('test-mk-cookiecutter-templates')
+ } else if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ callJobWithExtraVars('test-salt-model-reclass-system')
+ } else {
+ if (isJobExists(testJob)) {
+ common.infoMsg("Test job ${testJob} found, running")
+ def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
+ build job: testJob, parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: GERRIT_REFSPEC]
+ ]
+ giveVerify = true
+ } else {
+ common.infoMsg("Test job ${testJob} not found")
+ }
+ }
+ } else {
+ common.errorMsg("Change don't have a CodeReview, skipping gate")
+ }
+ } else {
+ common.infoMsg("Test job skipped")
+ }
+ }
+ stage("submit review") {
+ if (gerritChange.status == "MERGED") {
+ common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+ } else if (doSubmit) {
+ if (giveVerify) {
+ common.warningMsg("Change ${GERRIT_CHANGE_NUMBER} don't have a Verified, but tests were successful, so adding Verified and submitting")
+ ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --verified +1 --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ } else {
+ ssh.agentSh(String.format("ssh -p 29418 %s@%s gerrit review --submit %s,%s", GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ }
+ common.infoMsg(String.format("Gerrit review %s,%s submitted", GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ }
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 0924951..558609a 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -4,335 +4,251 @@
* Expected parameters:
* COOKIECUTTER_TEMPLATE_CONTEXT Context parameters for the template generation.
* EMAIL_ADDRESS Email to send a created tar file
- *
+ * CREDENTIALS_ID Credentials id for git
**/
+import static groovy.json.JsonOutput.*
common = new com.mirantis.mk.Common()
+common2 = new com.mirantis.mcp.Common()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
saltModelTesting = new com.mirantis.mk.SaltModelTesting()
-ssh = new com.mirantis.mk.Ssh()
-def reclassVersion = 'v1.5.4'
-if (common.validInputParam('RECLASS_VERSION')) {
- reclassVersion = RECLASS_VERSION
-}
-slaveNode = (env.SLAVE_NODE ?: 'python&&docker')
+slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+gerritCredentials = env.CREDENTIALS_ID ?: 'mcp-ci-gerrit'
-// install extra formulas required only for rendering cfg01. All others - should be fetched automatically via
-// salt.master.env state, during salt-master bootstrap.
-// TODO: In the best - those data should fetched somewhere from CC, per env\context. Like option, process _enabled
-// options from CC contexts
-// currently, just mix them together in one set
-def testCfg01ExtraFormulas = 'glusterfs jenkins logrotate maas ntp rsyslog fluentd telegraf prometheus ' +
- 'grafana backupninja'
+timeout(time: 1, unit: 'HOURS') {
+ node(slaveNode) {
+ def templateEnv = "${env.WORKSPACE}/template"
+ def modelEnv = "${env.WORKSPACE}/model"
+ def testEnv = "${env.WORKSPACE}/test"
+ def pipelineEnv = "${env.WORKSPACE}/pipelines"
-
-timeout(time: 2, unit: 'HOURS') {
- node(slaveNode) {
- def templateEnv = "${env.WORKSPACE}/template"
- def modelEnv = "${env.WORKSPACE}/model"
- def testEnv = "${env.WORKSPACE}/test"
- def pipelineEnv = "${env.WORKSPACE}/pipelines"
-
- try {
- def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
- def mcpVersion = templateContext.default_context.mcp_version
- def sharedReclassUrl = templateContext.default_context.shared_reclass_url
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def saltMaster = templateContext.default_context.salt_master_hostname
- def localRepositories = templateContext.default_context.local_repositories.toBoolean()
- def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
- def cutterEnv = "${env.WORKSPACE}/cutter"
- def jinjaEnv = "${env.WORKSPACE}/jinja"
- def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
- def systemEnv = "${modelEnv}/classes/system"
- def targetBranch = "feature/${clusterName}"
- def templateBaseDir = "${env.WORKSPACE}/template"
- def templateDir = "${templateEnv}/template/dir"
- def templateOutputDir = templateBaseDir
- def user
- def testResult = false
- wrap([$class: 'BuildUser']) {
- user = env.BUILD_USER_ID
- }
-
- if (mcpVersion != '2018.4.0') {
- testCfg01ExtraFormulas += ' auditd'
- }
-
- currentBuild.description = clusterName
- print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
-
- stage('Download Cookiecutter template') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
- def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
- git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
- // Use refspec if exists first of all
- if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
- dir(templateEnv) {
- ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
- }
- } else {
- // Use mcpVersion git tag if not specified branch for cookiecutter-templates
- if (cookiecutterTemplateBranch == '') {
- cookiecutterTemplateBranch = mcpVersion
- // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- cookiecutterTemplateBranch = 'master'
+ try {
+ def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
+ // TODO: switch to apt_mk_version im context['mcp_version']
+ // TODO add check's for critical var's
+ def context = templateContext['default_context']
+ // Use mcpVersion git tag if not specified branch for cookiecutter-templates
+ if (!context['cookiecutter_template_branch'] instanceof java.lang.String) {
+ context['cookiecutter_template_branch'] = context['mcp_version']
+ // Don't have nightly/testing/stable for cookiecutter-templates repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+ common.warningMsg("Fetching cookiecutterTemplate from master!")
+ context['cookiecutter_template_branch'] = 'master'
+ }
}
- }
- git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
- }
- }
-
- stage('Create empty reclass model') {
- dir(path: modelEnv) {
- sh "rm -rfv .git"
- sh "git init"
- ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
- }
-
- def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
- // Use refspec if exists first of all
- if (sharedReclassBranch.toString().startsWith('refs/')) {
- dir(systemEnv) {
- ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
- }
- } else {
- // Use mcpVersion git tag if not specified branch for reclass-system
- if (sharedReclassBranch == '') {
- sharedReclassBranch = mcpVersion
- // Don't have nightly/testing for reclass-system repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- common.warningMsg("Fetching reclass-system from master!")
- sharedReclassBranch = 'master'
+ // Use context['mcp_version'] git tag if not specified branch for reclass-system
+ if (!context['shared_reclass_branch'] instanceof java.lang.String) {
+ context['shared_reclass_branch'] = context['mcp_version']
+ // Don't have nightly/testing for reclass-system repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+ common.warningMsg("Fetching reclass-system from master!")
+ context['shared_reclass_branch'] = 'master'
+ }
}
- }
- git.changeGitBranch(systemEnv, sharedReclassBranch)
+ //
+ distribRevision = context['mcp_version']
+ if (['master'].contains(context['mcp_version'])) {
+ distribRevision = 'nightly'
+ }
+ if (distribRevision.contains('/')) {
+ distribRevision = distribRevision.split('/')[-1]
+ }
+ //
+ def cutterEnv = "${env.WORKSPACE}/cutter"
+ def systemEnv = "${modelEnv}/classes/system"
+ def testResult = false
+ def user
+ wrap([$class: 'BuildUser']) {
+ user = env.BUILD_USER_ID
+ }
+ currentBuild.description = context['cluster_name']
+ common.infoMsg("Using context:\n" + context)
+ print prettyPrint(toJson(context))
+
+ stage('Download Cookiecutter template') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: templateEnv]],
+ userRemoteConfigs: [[url: context['cookiecutter_template_url'], refspec: context['cookiecutter_template_branch'], credentialsId: gerritCredentials],],
+ ])
+ }
+ stage('Create empty reclass model') {
+ dir(path: modelEnv) {
+ sh "rm -rfv .git; git init"
+ sshagent(credentials: [gerritCredentials]) {
+ sh "git submodule add ${context['shared_reclass_url']} 'classes/system'"
+ }
+ }
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: systemEnv]],
+ userRemoteConfigs: [[url: context['shared_reclass_url'], refspec: context['shared_reclass_branch'], credentialsId: gerritCredentials],],
+ ])
+ git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
+ }
+
+ stage('Generate model') {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ // FIXME refector generateModel
+ python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, modelEnv, templateEnv, false)
+ git.commitGitChanges(modelEnv, "Create model ${context['cluster_name']}", "${user}@localhost", "${user}")
+ }
+
+ stage("Test") {
+ if (TEST_MODEL.toBoolean()) {
+ // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+ if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
+ common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
+ distribRevision = 'proposed'
+ }
+ sh("cp -r ${modelEnv} ${testEnv}")
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
+ try {
+ def config = [
+ 'dockerHostname' : "${context['salt_master_hostname']}.${context['cluster_domain']}",
+ 'reclassEnv' : testEnv,
+ 'distribRevision' : distribRevision,
+ 'dockerContainerName': DockerCName,
+ 'testContext' : 'salt-model-node'
+ ]
+ testResult = saltModelTesting.testNode(config)
+ common.infoMsg("Test finished: SUCCESS")
+ } catch (Exception ex) {
+ common.warningMsg("Test finished: FAILED")
+ testResult = false
+ }
+ } else {
+ common.warningMsg("Test stage has been skipped!")
+ }
+ }
+ stage("Generate config drives") {
+ // apt package genisoimage is required for this stage
+
+ // download create-config-drive
+ // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+ def mcpCommonScriptsBranch = context['mcp_common_scripts_branch']
+ if (mcpCommonScriptsBranch == '') {
+ mcpCommonScriptsBranch = context['mcp_version']
+ // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(context['mcp_version'])) {
+ common.warningMsg("Fetching mcp-common-scripts from master!")
+ mcpCommonScriptsBranch = 'master'
+ }
+ }
+ def commonScriptsRepoUrl = 'ssh://gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts'
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+ userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch, credentialsId: gerritCredentials],],
+ ])
+
+ sh 'cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive'
+ sh '[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data'
+
+ sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+ sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+ args = "--user-data user_data --hostname ${context['salt_master_hostname']} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"
+
+ // load data from model
+ def smc = [:]
+ smc['SALT_MASTER_MINION_ID'] = "${context['salt_master_hostname']}.${context['cluster_domain']}"
+ smc['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+ smc['DEPLOY_NETWORK_GW'] = context['deploy_network_gateway']
+ smc['DEPLOY_NETWORK_NETMASK'] = context['deploy_network_netmask']
+ if (context.get('deploy_network_mtu')) {
+ smc['DEPLOY_NETWORK_MTU'] = context['deploy_network_mtu']
+ }
+ smc['DNS_SERVERS'] = context['dns_server01']
+ smc['MCP_VERSION'] = "${context['mcp_version']}"
+ if (context['local_repositories'] == 'True') {
+ def localRepoIP = context['local_repo_url']
+ smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+ smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+ smc['PIPELINES_FROM_ISO'] = 'false'
+ smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+ smc['LOCAL_REPOS'] = 'true'
+ }
+ if (context['upstream_proxy_enabled'] == 'True') {
+ if (context['upstream_proxy_auth_enabled'] == 'True') {
+ smc['http_proxy'] = 'http://' + context['upstream_proxy_user'] + ':' + context['upstream_proxy_password'] + '@' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + context['upstream_proxy_user'] + ':' + context['upstream_proxy_password'] + '@' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+ } else {
+ smc['http_proxy'] = 'http://' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + context['upstream_proxy_address'] + ':' + context['upstream_proxy_port']
+ }
+ }
+
+ for (i in common.entries(smc)) {
+ sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ }
+
+ // create cfg config-drive
+ sh "./create-config-drive ${args}"
+ sh("mkdir output-${context['cluster_name']} && mv ${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
+
+ // save cfg iso to artifacts
+ archiveArtifacts artifacts: "output-${context['cluster_name']}/${context['salt_master_hostname']}.${context['cluster_domain']}-config.iso"
+
+ if (context['local_repositories'] == 'True') {
+ def aptlyServerHostname = context.aptly_server_hostname
+ sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
+
+ def smc_apt = [:]
+ smc_apt['SALT_MASTER_DEPLOY_IP'] = context['salt_master_management_address']
+ smc_apt['APTLY_DEPLOY_IP'] = context['aptly_server_deploy_address']
+ smc_apt['APTLY_DEPLOY_NETMASK'] = context['deploy_network_netmask']
+ smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${context['cluster_domain']}"
+
+ for (i in common.entries(smc_apt)) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+ }
+
+ // create apt config-drive
+ sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+ sh("mv ${aptlyServerHostname}.${context['cluster_domain']}-config.iso output-${context['cluster_name']}/")
+
+ // save apt iso to artifacts
+ archiveArtifacts artifacts: "output-${context['cluster_name']}/${aptlyServerHostname}.${context['cluster_domain']}-config.iso"
+ }
+ }
+
+ stage('Save changes reclass model') {
+ sh(returnStatus: true, script: "tar -czf output-${context['cluster_name']}/${context['cluster_name']}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
+ archiveArtifacts artifacts: "output-${context['cluster_name']}/${context['cluster_name']}.tar.gz"
+
+ if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+ emailext(to: EMAIL_ADDRESS,
+ attachmentsPattern: "output-${context['cluster_name']}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${context['cluster_name']} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: "Your Salt model ${context['cluster_name']}")
+ }
+ dir("output-${context['cluster_name']}") {
+ deleteDir()
+ }
+ }
+
+ // Fail, but leave possibility to get failed artifacts
+ if (!testResult && TEST_MODEL.toBoolean()) {
+ common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+ error('Test stage finished: FAILURE')
+ }
+
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ }
+ // common.sendNotification(currentBuild.result,"",["slack"])
}
- git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
- }
-
- def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
- for (product in productList) {
-
- // get templateOutputDir and productDir
- if (product.startsWith("stacklight")) {
- templateOutputDir = "${env.WORKSPACE}/output/stacklight"
-
- def stacklightVersion
- try {
- stacklightVersion = templateContext.default_context['stacklight_version']
- } catch (Throwable e) {
- common.warningMsg('Stacklight version loading failed')
- }
-
- if (stacklightVersion) {
- productDir = "stacklight" + stacklightVersion
- } else {
- productDir = "stacklight1"
- }
-
- } else {
- templateOutputDir = "${env.WORKSPACE}/output/${product}"
- productDir = product
- }
-
- if (product == "infra" || (templateContext.default_context["${product}_enabled"]
- && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
- templateDir = "${templateEnv}/cluster_product/${productDir}"
- common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
- sh "rm -rf ${templateOutputDir} || true"
- sh "mkdir -p ${templateOutputDir}"
- sh "mkdir -p ${outputDestination}"
-
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
- sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
- } else {
- common.warningMsg("Product " + product + " is disabled")
- }
- }
-
- if (localRepositories && !offlineDeployment) {
- def aptlyModelUrl = templateContext.default_context.local_model_url
- dir(path: modelEnv) {
- ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
- if (!(mcpVersion in ["nightly", "testing", "stable"])) {
- ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
- }
- }
- }
-
- stage('Generate new SaltMaster node') {
- def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
- def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: ${saltMaster}
- domain: ${clusterDomain}
- """
- sh "mkdir -p ${modelEnv}/nodes/"
- writeFile(file: nodeFile, text: nodeString)
-
- git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
- }
-
- stage("Test") {
- if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
- sh("cp -r ${modelEnv} ${testEnv}")
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- common.infoMsg("Attempt to run test against formula-version: ${mcpVersion}")
- testResult = saltModelTesting.setupAndTestNode(
- "${saltMaster}.${clusterDomain}",
- "",
- testCfg01ExtraFormulas,
- testEnv,
- 'pkg',
- mcpVersion,
- reclassVersion,
- 0,
- false,
- false,
- '',
- '',
- DockerCName)
- if (testResult) {
- common.infoMsg("Test finished: SUCCESS")
- } else {
- common.warningMsg('Test finished: FAILURE')
- }
- } else {
- common.warningMsg("Test stage has been skipped!")
- }
- }
- stage("Generate config drives") {
- // apt package genisoimage is required for this stage
-
- // download create-config-drive
- // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
- def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
- if (mcpCommonScriptsBranch == '') {
- mcpCommonScriptsBranch = mcpVersion
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if ([ "nightly" , "testing", "stable" ].contains(mcpVersion)) {
- common.warningMsg("Fetching mcp-common-scripts from master!")
- mcpCommonScriptsBranch = 'master'
- }
- }
- def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
- def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
- common.retry(3, 5) {
- sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
- sh "wget -O user_data.sh ${user_data_script_url}"
- }
-
- sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
- sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
- args = "--user-data user_data.sh --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
- // load data from model
- def smc = [:]
- smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
- smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
- smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
- smc['MCP_VERSION'] = "${mcpVersion}"
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def localRepoIP = templateContext['default_context']['local_repo_url']
- smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
- smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
- smc['PIPELINES_FROM_ISO'] = 'false'
- smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
- smc['LOCAL_REPOS'] = 'true'
- }
- if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
- if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- } else {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- }
- }
-
- for (i in common.entries(smc)) {
- sh "sed -i 's,export ${i[0]}=.*,export ${i[0]}=${i[1]},' user_data.sh"
- }
-
- // create cfg config-drive
- sh "./create-config-drive ${args}"
- sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save cfg iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
- def user_data_script_apt_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/mirror_config.sh"
- sh "wget -O mirror_config.sh ${user_data_script_apt_url}"
-
- def smc_apt = [:]
- smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
- smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
- for (i in common.entries(smc_apt)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
- }
-
- // create apt config-drive
- sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
- sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save apt iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
- }
- }
-
- stage('Save changes reclass model') {
- sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
- archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
-
- if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
- emailext(to: EMAIL_ADDRESS,
- attachmentsPattern: "output-${clusterName}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${clusterName}")
- }
- dir("output-${clusterName}") {
- deleteDir()
- }
- }
-
- // Fail, but leave possibility to get failed artifacts
- if (!testResult && TEST_MODEL.toBoolean()) {
- common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
- error('Test stage finished: FAILURE')
- }
-
- } catch (Throwable e) {
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- stage('Clean workspace directories') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- }
- // common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
diff --git a/generate-salt-model-docs-pipeline.groovy b/generate-salt-model-docs-pipeline.groovy
index 4a36f0e..59dd3eb 100644
--- a/generate-salt-model-docs-pipeline.groovy
+++ b/generate-salt-model-docs-pipeline.groovy
@@ -14,83 +14,83 @@
salt = new com.mirantis.mk.Salt()
timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
- def workspace = common.getWorkspace()
- def masterName = "cfg01." + CLUSTER_NAME.replace("-","_") + ".lab"
- def jenkinsUserIds = common.getJenkinsUserIds()
- def img = docker.image("tcpcloud/salt-models-testing:nightly")
- img.pull()
- img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
- stage("Prepare salt env") {
- if(MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
- checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
- } else {
- throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
- }
- if(checkouted) {
- if (fileExists('classes/system')) {
- ssh.prepareSshAgentKey(CREDENTIALS_ID)
- dir('classes/system') {
- // XXX: JENKINS-33510 dir step not work properly inside containers, so let's taky reclass system model directly
- //remoteUrl = git.getGitRemote()
- ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+ node("python") {
+ try {
+ def workspace = common.getWorkspace()
+ def masterName = "cfg01." + CLUSTER_NAME.replace("-", "_") + ".lab"
+ def jenkinsUserIds = common.getJenkinsUserIds()
+ def img = docker.image("tcpcloud/salt-models-testing:nightly")
+ img.pull()
+ img.inside("-u root:root --hostname ${masterName} --ulimit nofile=4096:8192 --cpus=2") {
+ stage("Prepare salt env") {
+ if (MODEL_GIT_REF != "" && MODEL_GIT_URL != "") {
+ checkouted = gerrit.gerritPatchsetCheckout(MODEL_GIT_URL, MODEL_GIT_REF, "HEAD", CREDENTIALS_ID)
+ } else {
+ throw new Exception("Cannot checkout gerrit patchset, MODEL_GIT_URL or MODEL_GIT_REF is null")
}
- ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
- }
- }
- withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]){
- sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2 salt' >> /etc/hosts")
- sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
+ if (checkouted) {
+ if (fileExists('classes/system')) {
+ ssh.prepareSshAgentKey(CREDENTIALS_ID)
+ dir('classes/system') {
+ // XXX: JENKINS-33510 dir step not work properly inside containers, so let's taky reclass system model directly
+ //remoteUrl = git.getGitRemote()
+ ssh.ensureKnownHosts("https://github.com/Mirantis/reclass-system-salt-model")
+ }
+ ssh.agentSh("git submodule init; git submodule sync; git submodule update --recursive")
+ }
+ }
+ withEnv(["MASTER_HOSTNAME=${masterName}", "CLUSTER_NAME=${CLUSTER_NAME}", "MINION_ID=${masterName}"]) {
+ sh("cp -r ${workspace}/* /srv/salt/reclass && echo '127.0.1.2 salt' >> /etc/hosts")
+ sh("""bash -c 'source /srv/salt/scripts/bootstrap.sh; cd /srv/salt/scripts \
&& source_local_envs \
&& configure_salt_master \
&& configure_salt_minion \
&& install_salt_formula_pkg; \
saltservice_restart; \
saltmaster_init'""")
- }
- }
- stage("Generate documentation"){
- def saltResult = sh(script:"salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus:true)
- if(saltResult > 0){
- common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+ }
}
- }
- stage("Publish outputs"){
- try {
- // /srv/static/sites/reclass_doc will be used for publishHTML step
- // /srv/static/extern will be used as tar artifact
- def outputPresent = sh(script:"ls /srv/static/sites/reclass_doc > /dev/null 2>&1 && ls /srv/static/extern > /dev/null 2>&1", returnStatus: true) == 0
- if(outputPresent){
- sh("""mkdir ${workspace}/output && \
+ stage("Generate documentation") {
+ def saltResult = sh(script: "salt-call state.sls salt.minion,sphinx.server,nginx", returnStatus: true)
+ if (saltResult > 0) {
+ common.warnMsg("Salt call salt.minion,sphinx.server,nginx failed but continuing")
+ }
+ }
+ stage("Publish outputs") {
+ try {
+ // /srv/static/sites/reclass_doc will be used for publishHTML step
+ // /srv/static/extern will be used as tar artifact
+ def outputPresent = sh(script: "ls /srv/static/sites/reclass_doc > /dev/null 2>&1 && ls /srv/static/extern > /dev/null 2>&1", returnStatus: true) == 0
+ if (outputPresent) {
+ sh("""mkdir ${workspace}/output && \
tar -zcf ${workspace}/output/docs-html.tar.gz /srv/static/sites/reclass_doc && \
tar -zcf ${workspace}/output/docs-src.tar.gz /srv/static/extern && \
cp -R /srv/static/sites/reclass_doc ${workspace}/output && \
chown -R ${jenkinsUserIds[0]}:${jenkinsUserIds[1]} ${workspace}/output""")
- publishHTML (target: [
- alwaysLinkToLastBuild: true,
- keepAll: true,
- reportDir: 'output/reclass_doc',
- reportFiles: 'index.html',
- reportName: "Reclass-documentation"
- ])
- archiveArtifacts artifacts: "output/*"
- } else {
- common.errorMsg("Documentation publish failed, one of output directories /srv/static/sites/reclass_doc or /srv/static/extern not exists!")
- }
- } catch(Exception e) {
- common.errorMsg("Documentation publish stage failed!")
+ publishHTML(target: [
+ alwaysLinkToLastBuild: true,
+ keepAll : true,
+ reportDir : 'output/reclass_doc',
+ reportFiles : 'index.html',
+ reportName : "Reclass-documentation"
+ ])
+ archiveArtifacts artifacts: "output/*"
+ } else {
+ common.errorMsg("Documentation publish failed, one of output directories /srv/static/sites/reclass_doc or /srv/static/extern not exists!")
+ }
+ } catch (Exception e) {
+ common.errorMsg("Documentation publish stage failed!")
+ }
}
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result, "", ["slack"])
+ }
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
+ }
}
- }
}
diff --git a/git-merge-branches-pipeline.groovy b/git-merge-branches-pipeline.groovy
deleted file mode 100644
index d1c3ee2..0000000
--- a/git-merge-branches-pipeline.groovy
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Git merge branches pipeline
- * REPO_URL - Repository URL
- * TARGET_BRANCH - Target branch for merging
- * SOURCE_BRANCH - The branch will be merged to TARGET_BRANCH
- * CREDENTIALS_ID - Used credentails ID
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def git = new com.mirantis.mk.Git()
-timeout(time: 12, unit: 'HOURS') {
- node {
- try{
- stage("checkout") {
- git.checkoutGitRepository('repo', REPO_URL, TARGET_BRANCH, IMAGE_CREDENTIALS_ID)
- }
- stage("merge") {
- dir("repo"){
- sh("git fetch origin/${SOURCE_BRANCH} && git merge ${SOURCE_BRANCH} && git push origin ${TARGET_BRANCH}")
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
-
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 3e7828b..be065a1 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -10,6 +10,12 @@
* CTL_TARGET Salt targeted kubernetes CTL nodes (ex. I@kubernetes:master). Kubernetes control plane
* CMP_TARGET Salt targeted compute nodes (ex. cmp* and 'I@kubernetes:pool') Kubernetes computes
* PER_NODE Target nodes will be managed one by one (bool)
+ * SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
+ * UPGRADE_DOCKER Upgrade docker component
+ * CONFORMANCE_RUN_AFTER Run Kubernetes conformance tests after update
+ * CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
+ * TEST_K8S_API_SERVER Kubernetes API server address for test execution
+ * ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
*
**/
def common = new com.mirantis.mk.Common()
@@ -50,6 +56,119 @@
}
}
+def cordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Cordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl cordon ${nodeShortName}", true, 1)
+ }
+}
+
+def uncordonNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Uncordoning ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl uncordon ${nodeShortName}", true, 1)
+ }
+}
+
+def drainNode(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+ def originalTarget = "I@kubernetes:master and not ${target}"
+
+ stage("Draining ${target} kubernetes node") {
+ def nodeShortName = target.tokenize(".")[0]
+ salt.cmdRun(pepperEnv, originalTarget, "kubectl drain --force --ignore-daemonsets --grace-period 100 --timeout 300s --delete-local-data ${nodeShortName}", true, 1)
+ }
+}
+
+def regenerateCerts(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Regenerate certs for ${target}") {
+ salt.enforceState(pepperEnv, target, 'salt.minion.cert')
+ }
+}
+
+def updateAddons(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading Addons at ${target}") {
+ salt.enforceState(pepperEnv, target, "kubernetes.master.kube-addons")
+ }
+}
+
+def updateAddonManager(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading AddonManager at ${target}") {
+ salt.enforceState(pepperEnv, target, "kubernetes.master.setup")
+ }
+}
+
+def upgradeDocker(pepperEnv, target) {
+ def salt = new com.mirantis.mk.Salt()
+
+ stage("Upgrading docker at ${target}") {
+ salt.enforceState(pepperEnv, target, 'docker.host')
+ }
+}
+
+def runConformance(pepperEnv, target, k8s_api, image) {
+ def salt = new com.mirantis.mk.Salt()
+ def containerName = 'conformance_tests'
+ output_file = image.replaceAll('/', '-') + '.output'
+ def output_file_full_path = "/tmp/" + image.replaceAll('/', '-') + '.output'
+ def artifacts_dir = '_artifacts/'
+ salt.cmdRun(pepperEnv, target, "docker rm -f ${containerName}", false)
+ salt.cmdRun(pepperEnv, target, "docker run -d --name ${containerName} --net=host -e API_SERVER=${k8s_api} ${image}")
+ sleep(10)
+
+ print("Waiting for tests to run...")
+ salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker wait ${containerName}"], null, false)
+
+ print("Writing test results to output file...")
+ salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker logs -t ${containerName} > ${output_file_full_path}"])
+ print("Conformance test output saved in " + output_file_full_path)
+
+ // collect output
+ sh "mkdir -p ${artifacts_dir}"
+ file_content = salt.getFileContent(pepperEnv, target, '/tmp/' + output_file)
+ writeFile file: "${artifacts_dir}${output_file}", text: file_content
+ sh "cat ${artifacts_dir}${output_file}"
+ try {
+ sh "cat ${artifacts_dir}${output_file} | grep 'Test Suite Failed' && exit 1 || exit 0"
+ } catch (Throwable e) {
+ print("Conformance tests failed. Please check output")
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+}
+
+def buildImageURL(pepperEnv, target, mcp_repo) {
+ def salt = new com.mirantis.mk.Salt()
+ def raw_version = salt.cmdRun(pepperEnv, target, "kubectl version --short -o json")['return'][0].values()[0].replaceAll('Salt command execution success','')
+ print("Kubernetes version: " + raw_version)
+ def serialized_version = readJSON text: raw_version
+ def short_version = (serialized_version.serverVersion.gitVersion =~ /([v])(\d+\.)(\d+\.)(\d+\-)(\d+)/)[0][0]
+ print("Kubernetes short version: " + short_version)
+ def conformance_image = mcp_repo + "/mirantis/kubernetes/k8s-conformance:" + short_version
+ return conformance_image
+}
+
+def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
+ stage("Running conformance tests") {
+ def image = buildImageURL(pepperEnv, target, mcp_repo)
+ print("Using image: " + image)
+ runConformance(pepperEnv, target, k8s_api, image)
+ }
+}
+
timeout(time: 12, unit: 'HOURS') {
node() {
@@ -59,6 +178,14 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
+
if ((common.validInputParam('KUBERNETES_HYPERKUBE_IMAGE')) && (common.validInputParam('KUBERNETES_PAUSE_IMAGE'))) {
overrideKubernetesImage(pepperEnv)
}
@@ -73,11 +200,29 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesControlUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesControlUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesControlUpdate(pepperEnv, t)
+ updateAddonManager(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesControlUpdate(pepperEnv, target)
}
+ if (!SIMPLE_UPGRADE.toBoolean()) {
+ // Addons upgrade should be performed after all nodes will upgraded
+ updateAddons(pepperEnv, target)
+ // Wait for 90 sec for addons reconciling
+ sleep(90)
+ }
}
if (updates.contains("cmp")) {
@@ -87,12 +232,31 @@
def targetHosts = salt.getMinionsSorted(pepperEnv, target)
for (t in targetHosts) {
- performKubernetesComputeUpdate(pepperEnv, t)
+ if (SIMPLE_UPGRADE.toBoolean()) {
+ performKubernetesComputeUpdate(pepperEnv, t)
+ } else {
+ cordonNode(pepperEnv, t)
+ drainNode(pepperEnv, t)
+ regenerateCerts(pepperEnv, t)
+ if (UPGRADE_DOCKER.toBoolean()) {
+ upgradeDocker(pepperEnv, t)
+ }
+ performKubernetesComputeUpdate(pepperEnv, t)
+ uncordonNode(pepperEnv, t)
+ }
}
} else {
performKubernetesComputeUpdate(pepperEnv, target)
}
}
+
+ if (CONFORMANCE_RUN_AFTER.toBoolean()) {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
index af96600..4d9d498 100644
--- a/opencontrail-upgrade.groovy
+++ b/opencontrail-upgrade.groovy
@@ -33,7 +33,7 @@
def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms contrail-nova-driver'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service supervisor-vrouter start'
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 76243e5..7b5036f 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,13 +26,16 @@
def command = 'cmd.shell'
def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def thirdPartyControlPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,redis-server,ifmap-server,supervisor'
def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def thirdPartyAnalyticsPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,python-cassandra,cassandra-cpp-driver,redis-server,supervisor'
//def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
-def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
-def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper']
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
+def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper', 'redis-server']
def configServices = ['contrail-webui-jobserver', 'contrail-webui-webserver', 'supervisor-config', 'supervisor-database', 'zookeeper']
-def controlServices = ['ifmap-server', 'supervisor-control']
+def controlServices = ['ifmap-server', 'supervisor-control', 'redis-server']
+def thirdPartyServicesToDisable = ['kafka', 'zookeeper', 'cassandra']
def config4Services = ['zookeeper', 'contrail-webui-middleware', 'contrail-webui', 'contrail-api', 'contrail-schema', 'contrail-svc-monitor', 'contrail-device-manager', 'contrail-config-nodemgr', 'contrail-database']
def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
@@ -107,7 +110,7 @@
common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
throw er
}
-
+
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
try {
@@ -161,20 +164,44 @@
common.errorMsg("Opencontrail Controller failed to be upgraded.")
throw er
}
+ }
- }
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
- //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
- //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
- for (service in controlServices) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
- }
- for (service in analyticsServices) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
+ stage('Opencontrail controllers backup and cleanup') {
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
+
+ for (service in (controlServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
+ }
+ for (service in (analyticsServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
+ }
+
+ def tmpCfgBackupDir = '/tmp/cfg_backup'
+ def thirdPartyCfgFilesToBackup = ['/var/lib/zookeeper/myid', '/etc/zookeeper/conf/', '/usr/share/kafka/config/']
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [tmpCfgBackupDir])
+
+ for (cfgFilePath in thirdPartyCfgFilesToBackup) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [tmpCfgBackupDir + cfgFilePath])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.copy', [cfgFilePath, tmpCfgBackupDir + cfgFilePath, 'recurse=True'])
+ }
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs + ',' + thirdPartyControlPkgsToRemove])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs + ',' + thirdPartyAnalyticsPkgsToRemove])
+
+ for (cfgFilePath in thirdPartyCfgFilesToBackup) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [cfgFilePath])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.copy', [tmpCfgBackupDir + cfgFilePath, cfgFilePath, 'recurse=True'])
+ }
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail Controllers backup and cleanup stage has failed.")
+ throw er
}
+ }
}
@@ -305,6 +332,12 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ for (service in (controlServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.enable', [service])
+ }
+ for (service in (analyticsServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.enable', [service])
+ }
}
}
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
deleted file mode 100644
index 2984b55..0000000
--- a/openstack-compute-upgrade.groovy
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
-
- opencontrail = null
-
- try {
- opencontrail = salt.cmdRun(pepperEnv, targetTestSubsetProbe, "salt-call grains.item roles | grep opencontrail.compute")
- print(opencontrail)
- } catch (Exception er) {
- common.infoMsg("opencontrail is not used")
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on test nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetTestSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetTestSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetTestSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on sample nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveSubset, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveSubset, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- openvswitch = null
-
- try {
- openvswitch = salt.cmdRun(pepperEnv, targetLiveSubsetProbe, "salt-call grains.item roles | grep neutron.compute")
- } catch (Exception er) {
- common.infoMsg("openvswitch is not used")
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- if(opencontrail != null) {
- stage('Remove OC component from repos on all targeted nodes') {
- def contrail_repo_file1 = ''
- def contrail_repo_file2 = ''
- try {
- contrail_repo_file1 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\$\\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- contrail_repo_file2 = salt.cmdRun(pepperEnv, targetLiveAll, "grep -Eorl \\ oc\\([0-9]*\\ \\) /etc/apt/sources.list*")['return'][0].values()[0].split("\n")[0]
- } catch (Exception er) {
- common.warningMsg(er)
- }
- salt.cmdRun(pepperEnv, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
- try {
- salt.cmdRun(pepperEnv, targetLiveAll, "salt-call pkg.refresh_db")
- } catch (Exception er) {
- common.warningMsg(er)
- // remove the malformed repo entry
- salt.cmdRun(pepperEnv, targetLiveAll, "rm ${contrail_repo_file1} ${contrail_repo_file2}")
- salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'pkg.refresh_db', [], null, true)
- }
- }
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- if(openvswitch != null) {
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'neutron'])
- }
- } else {
- stage("Run salt states on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['nova', 'linux.system.repo'])
- }
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
-
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 89b5e77..5febb3c 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -1,582 +1,193 @@
/**
+ * Upgrade OpenStack packages on control plane nodes.
+ * There are no silver boollet in uprading cloud.
* Update packages on given nodes
*
* Expected parameters:
* SALT_MASTER_CREDENTIALS Credentials to the Salt API.
* SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
- * STAGE_TEST_UPGRADE Run test upgrade stage (bool)
- * STAGE_REAL_UPGRADE Run real upgrade stage (bool)
- * STAGE_ROLLBACK_UPGRADE Run rollback upgrade stage (bool)
- * SKIP_VM_RELAUNCH Set to true if vms should not be recreated (bool)
- * OPERATING_SYSTEM_RELEASE_UPGRADE Set to true if operating system of vms should be upgraded to newer release (bool)
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
* INTERACTIVE Ask interactive questions during pipeline run (bool).
*
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
**/
def common = new com.mirantis.mk.Common()
def salt = new com.mirantis.mk.Salt()
def python = new com.mirantis.mk.Python()
+def debian = new com.mirantis.mk.Debian()
+def openstack = new com.mirantis.mk.Openstack()
-def getNodeProvider(pepperEnv, name) {
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Stop OpenStack services',
+ [
+ 'Description': 'All OpenStack python services will be stopped on All control nodes. This does not affect data plane services such as openvswitch or qemu.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack python services are stopped.
+ * OpenStack API are not accessible from this point.
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Stop OpenStack python services''',
+ 'State result': 'OpenStack python services are stopped',
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+
+def stopOpenStackServices(env, target) {
def salt = new com.mirantis.mk.Salt()
- def kvm = salt.getKvmMinionId(pepperEnv)
- return salt.getReturnValues(salt.getPillar(pepperEnv, "${kvm}", "salt:control:cluster:internal:node:${name}:provider"))
-}
-
-def stopServices(pepperEnv, probe, target, type) {
def openstack = new com.mirantis.mk.Openstack()
- def services = []
- if (type == 'prx') {
- services.add('keepalived')
- services.add('nginx')
- } else if (type == 'ctl') {
- services.add('keepalived')
- services.add('haproxy')
- services.add('nova')
- services.add('cinder')
- services.add('glance')
- services.add('heat')
- services.add('neutron')
- services.add('apache2')
- }
- openstack.stopServices(pepperEnv, probe, target, services)
-}
-
-def retryStateRun(pepperEnv, target, state) {
def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("running ${state} state again")
- salt.enforceState(pepperEnv, target, state)
+
+ def services = openstack.getOpenStackUpgradeServices(env, target)
+ def st
+ for (service in services){
+ st = "${service}.upgrade.service_stopped".trim()
+ common.infoMsg("Stopping ${st} services on ${target}")
+ salt.enforceState(env, target, st)
}
}
-def stateRun(pepperEnv, target, state) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- try {
- salt.enforceState(pepperEnv, target, state)
- } catch (Exception e) {
- common.warningMsg("Some parts of ${state} state failed. We should continue to run.")
- }
+def snapshotVM(env, domain, snapshotName) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+
+ def target = salt.getNodeProvider(env, domain)
+
+ // TODO: gracefully migrate all workloads from VM, and stop it
+ salt.runSaltProcessStep(env, target, 'virt.shutdown', [domain], null, true, 3600)
+
+ //TODO: wait while VM is powered off
+
+ common.infoMsg("Creating snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.snapshot', [domain, snapshotName], null, true, 3600)
}
+def revertSnapshotVM(env, domain, snapshotName, ensureUp=true) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
-def vcpTestUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def test_upgrade_node = "upg01"
- salt.runSaltProcessStep(pepperEnv, 'I@salt:master', 'saltutil.refresh_pillar', [], null, true, 2)
+ def target = salt.getNodeProvider(env, domain)
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
- stateRun(pepperEnv, 'I@salt:master', 'salt.master')
- stateRun(pepperEnv, 'I@salt:master', 'reclass')
- stateRun(pepperEnv, 'I@salt:master', 'linux.system.repo')
+ common.infoMsg("Reverting snapshot ${snapshotName} for VM ${domain} on node ${target}")
+ salt.runSaltProcessStep(env, target, 'virt.revert_snapshot', [snapshotName, domain], null, true, 3600)
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ if (ensureUp){
+ salt.runSaltProcessStep(env, target, 'virt.start', [domain], null, true, 300)
+ }
+}
+
+def env = "env"
+timeout(time: 12, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
+ def upgradeTargets = salt.getMinionsSorted(env, TARGET_SERVERS)
+
+ if (upgradeTargets.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
}
- def domain = salt.getDomainName(pepperEnv)
-
- def backupninja_backup_host = salt.getReturnValues(salt.getPillar(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', '_param:backupninja_backup_host'))
-
- if (SKIP_VM_RELAUNCH.toBoolean() == false) {
-
- def upgNodeProvider = getNodeProvider(pepperEnv, test_upgrade_node)
-
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
- salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
-
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${test_upgrade_node}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg("${test_upgrade_node}.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
- }
-
- // salt 'kvm02*' state.sls salt.control
- stateRun(pepperEnv, "${upgNodeProvider}", 'salt.control')
- // wait until upg node is registered in salt-key
- salt.minionPresent(pepperEnv, 'I@salt:master', test_upgrade_node)
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
}
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
-
- try {
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- stateRun(pepperEnv, "${test_upgrade_node}*", ['ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", ['rabbitmq', 'memcached'])
- try {
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', ['openssh.client', 'salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'saltutil.sync_grains')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.flush')
- salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.update')
- salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
- try {
- salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
- } catch (Exception e) {
- common.warningMsg('salt-minion was restarted. We should continue to run')
- }
-
- salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"])
- try {
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', "arp -d ${backupninja_backup_host}")
- } catch (Exception e) {
- common.warningMsg('The ARP entry does not exist. We should continue to run.')
- }
- salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"])
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
- salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync > /tmp/backupninjalog')
-
- salt.enforceState(pepperEnv, 'I@xtrabackup:server', 'xtrabackup')
- salt.enforceState(pepperEnv, 'I@xtrabackup:client', 'openssh.client')
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
-
- def databases = salt.cmdRun(pepperEnv, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
- if(databases && databases != ""){
- def databasesList = salt.getReturnValues(databases).trim().tokenize("\n")
- for( i = 0; i < databasesList.size(); i++){
- if(databasesList[i].toLowerCase().contains('upgrade')){
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"])
- common.warningMsg("removing database ${databasesList[i]}")
- salt.runSaltProcessStep(pepperEnv, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"])
- }
- }
- salt.enforceState(pepperEnv, 'I@mysql:client', 'mysql.client')
- }else{
- common.errorMsg("No _upgrade databases were returned")
- }
-
- try {
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2')
- salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'service.restart', ['apache2'])
- }
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'keystone.client')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'glance')
- salt.enforceState(pepperEnv, "${test_upgrade_node}*", 'keystone.server')
-
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'nova') // run nova state again as sometimes nova does not enforce itself for some reason
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'cinder')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'neutron')
- retryStateRun(pepperEnv, "${test_upgrade_node}*", 'heat')
-
- salt.cmdRun(pepperEnv, "${test_upgrade_node}*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
-
- if (INTERACTIVE.toBoolean() && STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Do you want to continue with upgrade?"
+ for (target in upgradeTargets){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
}
}
-}
-
-def vcpRealUpgrade(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
-
- def upgrade_target = []
- upgrade_target.add('I@horizon:server')
- upgrade_target.add('I@keystone:server and not upg*')
-
- def proxy_general_target = "I@horizon:server"
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- def snapshotName = "upgradeSnapshot1"
-
- def domain = salt.getDomainName(pepperEnv)
- def errorOccured = false
-
- for (tgt in upgrade_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = ""
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- stopServices(pepperEnv, node, tgt, general_target)
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.undefine', ["${target}.${domain}"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- } else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Stop OpenStack services", target, interactive) {
+ stopOpenStackServices(env, target)
+ }
}
- if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c '/usr/local/bin/innobackupex-runner.sh -f -s'")
+ for (target in upgradeTargets) {
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
- salt.enforceState(pepperEnv, 'I@salt:control', 'salt.control')
-
- for (tgt in upgrade_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
}
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
}
-
- // salt '*' saltutil.refresh_pillar
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.refresh_pillar', [])
- // salt '*' saltutil.sync_all
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
-
- stateRun(pepperEnv, upgrade_general_target, ['linux.network.proxy'])
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion.base"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
-
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux.system.repo'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
-
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'pkg.install', ['salt-minion'], null, true, 5)
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- // Apply package upgrades
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades --allow-unauthenticated -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
- common.warningMsg("Running apt dist-upgrade on ${proxy_general_target} and ${control_general_target}, this might take a while...")
- out = salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'cmd.run', [args])
- // stop services again
- def proxy_node = salt.getFirstMinion(pepperEnv, proxy_general_target)
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
- stopServices(pepperEnv, proxy_node, proxy_general_target, 'prx')
- stopServices(pepperEnv, control_node, control_general_target, 'ctl')
- salt.printSaltCommandResult(out)
- if (out.toString().contains("dpkg returned an error code")){
- if (INTERACTIVE.toBoolean()) {
- input message: "Apt dist-upgrade failed, please fix it manually and then click on proceed. If unable to fix it, click on abort and run the rollback stage."
- } else {
- error("Apt dist-upgrade failed. And interactive mode was disabled, failing...")
- }
- }
- // run base states
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- } else {
- // initial VM setup
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
- } catch (Exception e) {
- common.warningMsg(e)
- }
- try {
- salt.enforceState(pepperEnv, upgrade_general_target, ['ntp', 'rsyslog'])
- } catch (Exception e) {
- common.warningMsg(e)
- }
- salt.enforceState(pepperEnv, upgrade_general_target, ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
- salt.enforceState(pepperEnv, control_general_target, ['keepalived', 'haproxy'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['rsyslog'])
- }
-
- try {
- try {
- salt.enforceState(pepperEnv, control_general_target, ['memcached', 'keystone.server'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- } catch (Exception e) {
- common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['apache2'])
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- }
- // salt 'ctl01*' state.sls keystone.client
- retryStateRun(pepperEnv, "I@keystone:client and ${control_general_target}", 'keystone.client')
- retryStateRun(pepperEnv, control_general_target, 'glance')
- salt.enforceState(pepperEnv, control_general_target, 'glusterfs.client')
- salt.enforceState(pepperEnv, control_general_target, 'keystone.server')
- retryStateRun(pepperEnv, control_general_target, 'nova')
- retryStateRun(pepperEnv, control_general_target, 'cinder')
- retryStateRun(pepperEnv, control_general_target, 'neutron')
- retryStateRun(pepperEnv, control_general_target, 'heat')
- } catch (Exception e) {
- errorOccured = true
- if (INTERACTIVE.toBoolean()){
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- input message: "Some states that require syncdb failed. Please check the reason. Click proceed only if you want to restore database into it's pre-upgrade state. If you want restore production database and also the VMs into its pre-upgrade state please click on abort and run the rollback stage."
- } else {
- input message: "Some states that require syncdb failed. Please check the reason and click proceed only if you want to restore database into it's pre-upgrade state. Otherwise, click abort."
- }
- } else {
- error("Stage Real control upgrade failed. And interactive mode was disabled, failing...")
- }
- openstack.restoreGaleraDb(pepperEnv)
- common.errorMsg("Stage Real control upgrade failed")
- }
- if(!errorOccured){
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
-
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:client and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:client and ${control_general_target}*", 'ceph.client')
- }
- } catch (Exception er) {
- common.warningMsg("Ceph client state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.enforceState(pepperEnv, "I@ceph:common and ${control_general_target}*", ['ceph.common', 'ceph.setup.keyring'])
- }
- } catch (Exception er) {
- common.warningMsg("Ceph common state on controllers failed. Please fix it manually")
- }
- try {
- if (salt.testTarget(pepperEnv, "I@ceph:common and ${control_general_target}*")) {
- salt.runSaltProcessStep(master, "I@ceph:common and ${control_general_target}*", 'service.restart', ['glance-api', 'glance-glare', 'glance-registry'])
- }
- } catch (Exception er) {
- common.warningMsg("Restarting Glance services on controllers failed. Please fix it manually")
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- retryStateRun(pepperEnv, proxy_general_target, 'keepalived')
- retryStateRun(pepperEnv, proxy_general_target, 'horizon')
- retryStateRun(pepperEnv, proxy_general_target, 'nginx')
- retryStateRun(pepperEnv, proxy_general_target, 'memcached')
-
- try {
- salt.enforceHighstate(pepperEnv, control_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on controller nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.enforceHighstate(pepperEnv, proxy_general_target)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on proxy nodes but something failed. Please check it and fix it accordingly.")
- }
-
- try {
- salt.cmdRun(pepperEnv, "${control_general_target}01*", '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
- } catch (Exception er) {
- common.errorMsg(er)
- }
-
- /*
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
- if (INTERACTIVE.toBoolean()){
- input message: "Please verify if the control upgrade was successful! If so, by clicking proceed the original VMs disk images will be backed up and snapshot will be merged to the upgraded VMs which will finalize the upgrade procedure"
- }
- node_count = 1
- for (t in proxy_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- node_count = 1
- for (t in control_target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
- try {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "[ ! -f /root/${target}.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/${target}.${domain}/system.qcow2 ./${target}.${domain}.qcow2.bak")
- } catch (Exception e) {
- common.warningMsg('File already exists')
- }
- virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, snapshotName)
- node_count++
- }
- if (INTERACTIVE.toBoolean()){
- input message: "Please scroll up and look for red highlighted messages containing 'virsh blockcommit' string.
- If there are any fix it manually. Otherwise click on proceed."
- }
- }
- */
- }
-}
-
-
-def vcpRollback(pepperEnv) {
- def common = new com.mirantis.mk.Common()
- def salt = new com.mirantis.mk.Salt()
- def openstack = new com.mirantis.mk.Openstack()
- def virsh = new com.mirantis.mk.Virsh()
- def snapshotName = "upgradeSnapshot1"
- try {
- salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.refresh_pillar', [], null, true, 2)
- } catch (Exception e) {
- common.warningMsg("No response from some minions. We should continue to run")
- }
-
- def domain = salt.getDomainName(pepperEnv)
-
- def rollback_target = []
- rollback_target.add('I@horizon:server')
- rollback_target.add('I@keystone:server and not upg*')
-
- def control_general_target = "I@keystone:server and not upg*"
- def upgrade_general_target = "( I@keystone:server and not upg* ) or I@horizon:server"
-
- openstack.restoreGaleraDb(pepperEnv)
-
- for (tgt in rollback_target) {
- def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
- def node = salt.getFirstMinion(pepperEnv, "${tgt}")
- def general_target = salt.getMinionsGeneralName(pepperEnv, "${tgt}")
-
- if (tgt.toString().contains('horizon:server')) {
- general_target = 'prx'
- } else if (tgt.toString().contains('keystone:server')) {
- general_target = 'ctl'
- }
-
- for (t in target_hosts) {
- def target = salt.stripDomainName(t)
- def nodeProvider = salt.getNodeProvider(pepperEnv, t)
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
- sleep(2)
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'file.copy', ["/root/${target}.${domain}.qcow2.bak", "/var/lib/libvirt/images/${target}.${domain}/system.qcow2"])
- try {
- salt.cmdRun(pepperEnv, 'I@salt:master', "salt-key -d ${target}.${domain} -y")
- } catch (Exception e) {
- common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
- }
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- } else {
- salt.cmdRun(pepperEnv, "${nodeProvider}", "virsh define /var/lib/libvirt/images/${target}.${domain}.xml")
- salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
- virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
- }
- }
- }
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(pepperEnv, 'I@nova:compute', 'service.restart', ['nova-compute'])
-
- if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
- for (tgt in rollback_target) {
- salt.minionsPresent(pepperEnv, 'I@salt:master', tgt)
- }
- }
-
- salt.minionsReachable(pepperEnv, 'I@salt:master', upgrade_general_target)
-
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-conductor'])
- salt.runSaltProcessStep(pepperEnv, control_general_target, 'service.restart', ['nova-scheduler'])
-
- def control_node = salt.getFirstMinion(pepperEnv, control_general_target)
-
- salt.cmdRun(pepperEnv, "${control_node}*", '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
-}
-
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node() {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- if (STAGE_TEST_UPGRADE.toBoolean() == true) {
- stage('Test upgrade') {
- vcpTestUpgrade(pepperEnv)
- }
- }
-
- if (STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Real upgrade') {
- // # actual upgrade
- vcpRealUpgrade(pepperEnv)
- }
-
- if (INTERACTIVE.toBoolean() && STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click on proceed to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
- }
- }
- }
-
- if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Rollback upgrade') {
- if (INTERACTIVE.toBoolean()){
- stage('Ask for manual confirmation') {
- input message: "Before rollback please check the documentation for reclass model changes. Do you really want to continue with the rollback?"
- }
- }
- vcpRollback(pepperEnv)
- }
- }
- }
+ }
}
diff --git a/openstack-data-upgrade.groovy b/openstack-data-upgrade.groovy
new file mode 100644
index 0000000..88bbf57
--- /dev/null
+++ b/openstack-data-upgrade.groovy
@@ -0,0 +1,185 @@
+/**
+ * Upgrade OpenStack packages on gateway nodes.
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * OS_DIST_UPGRADE Upgrade system packages including kernel (apt-get dist-upgrade)
+ * OS_UPGRADE Upgrade all installed applications (apt-get upgrade)
+ * TARGET_SERVERS Comma separated list of salt compound definitions to upgrade.
+ * INTERACTIVE Ask interactive questions during pipeline run (bool).
+ *
+ * TODO:
+ * * Add OS_RELEASE_UPGRADE
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+def openstack = new com.mirantis.mk.Openstack()
+def debian = new com.mirantis.mk.Debian()
+
+def interactive = INTERACTIVE.toBoolean()
+def LinkedHashMap upgradeStageMap = [:]
+
+upgradeStageMap.put('Pre upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Basic api, service verification will be performed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Verify API, perform basic CRUD operations for services.
+ * Verify that compute/neutron agents on hosts are up.
+ * Run some service built in checkers like keystone-manage doctor or nova-status upgrade.''',
+ 'State result': 'Basic checks around services API are passed.'
+ ])
+upgradeStageMap.put('Upgrade pre: migrate resources',
+ [
+ 'Description': 'In order to minimize workload downtime smooth resource migration is happening during this phase. Neutron agents on node are set to admin_disabled state, to make sure they are quickly migrated to new node (1-2 ping loss). Instances might be live-migrated from host (this stage is optional) and configured from pillar.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * Small workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin disabled sate
+ * Migrate instances if allowed (optional).''',
+ 'State result': '''
+ * Hosts are being removed from scheduling to host new resources.
+ * If instance migration was performed no instances should be present.'''
+ ])
+upgradeStageMap.put('Upgrade OpenStack',
+ [
+ 'Description': 'OpenStack python code will be upgraded during this stage. No workload downtime is expected.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Install new version of OpenStack packages
+ * Render version of configs
+ * Apply offline dbsync
+ * Start OpenStack services
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * OpenStack packages are upgraded
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected to new control plane
+'''
+ ])
+upgradeStageMap.put('Upgrade OS',
+ [
+ 'Description': 'Optional step. OS packages will be upgraded during this phase, depending on the job parameters dist-upgrade might be called. And reboot of node executed.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * OpenStack services might flap
+ * No workload downtime
+ * The nodes might be rebooted''',
+ 'Launched actions': '''
+ * Install new version of system packages
+ * If doing dist-upgrade new kernel might be installed and node rebooted
+ * Verify agents are alive/connected
+ * Run basic API validation''',
+ 'State result': '''
+ * System packages are updated
+ * Services are running
+ * Basic checks around services API are passed
+ * Verified that agents/services on data plane nodes are connected
+ * Node might be rebooted
+'''
+ ])
+upgradeStageMap.put('Upgrade post: enable resources',
+ [
+ 'Description': 'Verify that agents/services on node are up, add them back to scheduling.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Set neutron agents to admin sate enabled
+ * Enable nova-compute services''',
+ 'State result': 'Hosts are being added to scheduling to host new resources',
+ ])
+upgradeStageMap.put('Post upgrade',
+ [
+ 'Description': 'Only non destructive actions will be applied during this phase. Like cleanup old configs, cleanup temporary files. Online dbsyncs.',
+ 'Status': 'NOT_LAUNCHED',
+ 'Expected behaviors': '''
+ * No service downtime
+ * No workload downtime''',
+ 'Launched actions': '''
+ * Cleanup os client configs''',
+ 'State result': 'Temporary resources are being cleaned.'
+ ])
+
+
+def env = "env"
+timeout(time: 24, unit: 'HOURS') {
+ node() {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def targetNodes = salt.getMinionsSorted(env, TARGET_SERVERS)
+ def migrateResources = true
+
+ if (targetNodes.isEmpty()) {
+ error("No servers for upgrade matched by ${TARGET_SERVERS}")
+ }
+ if (targetNodes.size() == 1 ){
+ migrateResources = false
+ }
+
+ common.printStageMap(upgradeStageMap)
+ if (interactive){
+ input message: common.getColorizedString(
+ "Above you can find detailed info this pipeline will execute.\nThe info provides brief description of each stage, actions that will be performed and service/workload impact during each stage.\nPlease read it carefully.", "yellow")
+ }
+
+ for (target in targetNodes){
+ common.stageWrapper(upgradeStageMap, "Pre upgrade", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'pre')
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade pre: migrate resources", target, interactive) {
+ if (migrateResources) {
+ common.infoMsg("Migrating neutron resources from ${target}")
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.pre')
+ // Start upgrade only when resources were successfully migrated
+ }
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade OpenStack", target, interactive) {
+ // Stop services on node. //Do actual step by step orch here.
+ openstack.runOpenStackUpgradePhase(env, target, 'service_stopped')
+ openstack.runOpenStackUpgradePhase(env, target, 'pkgs_latest')
+ openstack.runOpenStackUpgradePhase(env, target, 'render_config')
+ openstack.runOpenStackUpgradePhase(env, target, 'service_running')
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+ common.stageWrapper(upgradeStageMap, "Upgrade OS", target, interactive) {
+ if (OS_DIST_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'dist-upgrade'
+ } else if (OS_UPGRADE.toBoolean() == true){
+ upgrade_mode = 'upgrade'
+ }
+ if (OS_DIST_UPGRADE.toBoolean() == true || OS_UPGRADE.toBoolean() == true) {
+ debian.osUpgradeNode(env, target, upgrade_mode, false)
+ }
+ openstack.applyOpenstackAppsStates(env, target)
+ openstack.runOpenStackUpgradePhase(env, target, 'verify')
+ }
+
+ common.stageWrapper(upgradeStageMap, "Upgrade post: enable resources", target, interactive) {
+ openstack.runOpenStackUpgradePhase(env, target, 'upgrade.post')
+ }
+ }
+ }
+}
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
deleted file mode 100644
index 87cf828..0000000
--- a/ovs-gateway-upgrade.groovy
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Update packages on given nodes
- *
- * Expected parameters:
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
- * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
- * INTERACTIVE Ask interactive questions during pipeline run (bool).
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-def targetTestSubset
-def targetLiveSubset
-def targetLiveAll
-def minions
-def result
-def args
-def command
-def commandKwargs
-def probe = 1
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- stage('List target servers') {
- minions = salt.getMinions(pepperEnv, TARGET_SERVERS)
-
- if (minions.isEmpty()) {
- throw new Exception("No minion was targeted")
- }
-
- if (TARGET_SUBSET_TEST != "") {
- targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
- } else {
- targetTestSubset = minions.join(' or ')
- }
- targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
- targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
- targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
-
- targetLiveAll = minions.join(' or ')
- common.infoMsg("Found nodes: ${targetLiveAll}")
- common.infoMsg("Selected test nodes: ${targetTestSubset}")
- common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
- }
-
-
- stage("Add new repos on test nodes") {
- salt.enforceState(pepperEnv, targetTestSubset, 'linux.system.repo')
- }
-
- stage("List package upgrades") {
- salt.runSaltProcessStep(pepperEnv, targetTestSubset, 'pkg.list_upgrades', [], null, true)
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample nodes') {
- input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
- }
- }
-
- stage("Add new repos on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
- }
-
- args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
-
- stage('Test upgrade on sample') {
- try {
- salt.cmdRun(pepperEnv, targetLiveSubset, args)
- } catch (Exception er) {
- print(er)
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on sample') {
- input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
- }
- }
-
- command = "cmd.run"
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on sample') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on sample nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on sample nodes") {
- salt.enforceState(pepperEnv, targetLiveSubset, ['neutron'])
- }
-
- stage("Run Highstate on sample nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveSubset)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- if (INTERACTIVE.toBoolean()){
- stage('Confirm upgrade on all targeted nodes') {
- timeout(time: 2, unit: 'HOURS') {
- input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
- }
- }
- }
-
- stage("Add new repos on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
- }
-
- args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
-
- stage('Apply package upgrades on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
-
- args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
-
- stage('Start ovs on all targeted nodes') {
- out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
- salt.printSaltCommandResult(out)
- }
- stage("Run Neutron state on all targeted nodes") {
- salt.enforceState(pepperEnv, targetLiveAll, ['neutron'])
- }
-
- stage("Run Highstate on all targeted nodes") {
- try {
- salt.enforceHighstate(pepperEnv, targetLiveAll)
- } catch (Exception er) {
- common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
- }
- }
-
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- }
- }
-}
diff --git a/promote-mirror-ubuntu-related.groovy b/promote-mirror-ubuntu-related.groovy
new file mode 100644
index 0000000..f5e97be
--- /dev/null
+++ b/promote-mirror-ubuntu-related.groovy
@@ -0,0 +1,27 @@
+/**
+ *
+ * Promote Ubuntu-related mirrors in same time.
+ * Promote ubuntu|maas|maas-ephermal should be always together.
+ *
+ * Expected parameters:
+ * MCP_VERSION
+ * SNAPSHOT_NAME - Snapshot name to set
+ * SNAPSHOT_ID - Set name for specified snapshot ID
+ */
+
+common = new com.mirantis.mk.Common()
+
+timeout(time: 1, unit: 'HOURS') {
+ node() {
+ stage("Promote") {
+ catchError {
+ for (String jobname : ['mirror-snapshot-name-maas-xenial', 'mirror-snapshot-name-ubuntu', 'ebf-hotfix-ubuntu', 'ebf-update-ubuntu', 'mirror-snapshot-name-maas-ephemeral-v3']) {
+ build job: jobname, parameters: [
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: SNAPSHOT_NAME],
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: SNAPSHOT_ID],
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
new file mode 100644
index 0000000..7a1763f
--- /dev/null
+++ b/promote-vcp-images.groovy
@@ -0,0 +1,136 @@
+/**
+ *
+ * Promote VCP(qcow2) images
+ *
+ * Expected parameters:
+ * VCP_IMAGE_LIST - multiline with qcow2 file names
+ * TAG - Target tag of image.Possible are: "nightly|testing|proposed|201X.X.X"
+ * SOURCE_TAG - Initial tag to be tagged with TAG. Will replace SUBS_SOURCE_VCP_IMAGE_TAG in VCP_IMAGE_LIST
+ * UPLOAD_URL - WebDav url with creds, from\to download images
+ *
+ */
+
+def common = new com.mirantis.mk.Common()
+def jenkinsUtils = new com.mirantis.mk.JenkinsUtils()
+
+// Better to chose slave with ssd and fast network to webDav host
+slaveNode = env.SLAVE_NODE ?: 'jsl23.mcp.mirantis.net'
+def job_env = env.getEnvironment().findAll { k, v -> v }
+def verify = job_env.VERIFY_DOWNLOAD ?: true
+def overwrite = job_env.FORCE_OVERWRITE.toBoolean() ?: false
+
+
+
+timeout(time: 6, unit: 'HOURS') {
+ node(slaveNode) {
+
+ String description = ''
+ insufficientPermissions = false
+ try {
+ // Pre-run verify
+ // promote is restricted to users in aptly-promote-users LDAP group
+ if (!jenkinsUtils.currentUserInGroups(["mcp-cicd-admins", "aptly-promote-users"])) {
+ insufficientPermissions = true
+ error(String.format("You don't have permissions to make promote from source:%s to target:%s! Only CI/CD and QA team can perform promote.", job_env.SOURCE_TAG, job_env.TAG))
+ }
+ // Check for required opts
+ for (opt in ['UPLOAD_URL', 'SOURCE_TAG', 'TAG', 'VCP_IMAGE_LIST']) {
+ if (!job_env.get(opt, null)) {
+ error("Invalid input params, at least ${opt} param missing")
+ }
+ }
+ def images = job_env.VCP_IMAGE_LIST.trim().tokenize()
+ for (image in images) {
+ if (image.startsWith('#')) {
+ common.warningMsg("Skipping image ${image}")
+ continue
+ }
+ common.infoMsg("Replacing SUBS_SOURCE_VCP_IMAGE_TAG => ${job_env.SOURCE_TAG}")
+ sourceImage = image.replace('SUBS_SOURCE_VCP_IMAGE_TAG', job_env.SOURCE_TAG)
+ targetImage = image.replace('SUBS_SOURCE_VCP_IMAGE_TAG', job_env.TAG)
+
+ // TODO: normalize url's?
+ sourceImageUrl = job_env.UPLOAD_URL + '/' + sourceImage
+ sourceImageMd5Url = job_env.UPLOAD_URL + '/' + sourceImage + '.md5'
+ targetImageUrl = job_env.UPLOAD_URL + '/' + targetImage
+ targetImageMd5Url = job_env.UPLOAD_URL + '/' + targetImage + '.md5'
+
+ common.infoMsg("Attempt to download: ${sourceImage} => ${targetImage}")
+ common.retry(3, 5) {
+ sh(script: "wget --progress=dot:giga --auth-no-challenge -O ${targetImage} ${sourceImageUrl}")
+ }
+ def targetImageMd5 = common.cutOrDie("md5sum ${targetImage} | tee ${targetImage}.md5", 0)
+ if (verify.toBoolean()) {
+ common.infoMsg("Checking md5's ")
+ sh(script: "wget --progress=dot:giga --auth-no-challenge -O ${targetImage}_source_md5 ${sourceImageMd5Url}")
+ def sourceImageMd5 = readFile(file: "${targetImage}_source_md5").tokenize(' ')[0]
+ // Compare downloaded and remote files
+ if (sourceImageMd5 != targetImageMd5) {
+ error("Image ${targetImage} md5sum verify failed!")
+ } else {
+ common.infoMsg("sourceImageMd5: ${sourceImageMd5} == target to upload ImageMd5: ${targetImageMd5}")
+ }
+ // Compare downloaded file, and remote file-to-be-promoted. If same - no sense to promote same file
+ remoteImageMd5Status = sh(script: "wget --progress=dot:giga --auth-no-challenge -O ${targetImage}_expected_target_md5 ${targetImageMd5Url}", returnStatus: true)
+ if (remoteImageMd5Status == '8') {
+ common.infoMsg("target to upload ImageMd5 file not even exist.Continue..")
+ } else {
+ def remoteImageMd5 = readFile(file: "${targetImage}_expected_target_md5").tokenize(' ')[0]
+ if (sourceImageMd5 == remoteImageMd5) {
+ common.infoMsg("sourceImageMd5: ${sourceImageMd5} and target to upload ImageMd5: ${targetImageMd5} are same")
+ common.warningMsg("Skipping to upload: ${targetImage} since it already same")
+ description += "Skipping to upload: ${targetImage} since it already same\n"
+ continue
+ }
+ }
+ common.infoMsg("Check, that we are not going to overwrite released file..")
+ if (['proposed', 'testing', 'nightly'].contains(job_env.TAG)) {
+ common.infoMsg("Uploading to ${job_env.TAG} looks safe..")
+ } else if (['stable'].contains(job_env.TAG)) {
+ common.warningMsg("Uploading to ${job_env.TAG} not safe! But still possible")
+ } else {
+ common.warningMsg("Looks like uploading to new release: ${job_env.TAG}. Checking, that it is not exist yet..")
+ remoteImageStatus = ''
+ remoteImageStatus = sh(script: "wget --auth-no-challenge --spider ${targetImageUrl} 2>/dev/null", returnStatus: true)
+ // wget return code 8 ,if file not exist
+ if (remoteImageStatus != 8 && !overwrite) {
+ error("Attempt to overwrite existing release! Target: ${targetImage} already exist!")
+ }
+ }
+ }
+
+ common.infoMsg("Attempt to UPLOAD: ${targetImage} => ${targetImageUrl}")
+ //
+ def uploadImageStatus = ''
+ def uploadImageMd5Status = ''
+ common.retry(3, 5) {
+ uploadImageStatus = sh(script: "curl -f -T ${targetImage} ${job_env.UPLOAD_URL}", returnStatus: true)
+ if (uploadImageStatus != 0) {
+ error("Uploading file: ${targetImage} failed!")
+ }
+ }
+ uploadImageMd5Status = sh(script: "curl -f -T ${targetImage}.md5 ${job_env.UPLOAD_URL}", returnStatus: true)
+ if (uploadImageMd5Status != 0) {
+ error("Uploading file: ${targetImage}.md5 failed!")
+ }
+
+ description += "<a href='http://apt.mcp.mirantis.net/images/${targetImage}'>${job_env.SOURCE_TAG}=>${targetImage}</a>"
+ }
+ currentBuild.description = description
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ if (insufficientPermissions) {
+ currentBuild.result = "ABORTED"
+ currentBuild.description = "Promote aborted due to insufficient permissions"
+ } else {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ }
+ throw e
+ }
+ finally {
+ common.infoMsg("Cleanup..")
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ }
+ }
+}
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index b1b3d77..0d9ce5e 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -16,92 +16,134 @@
* DOCKER_IMAGES
* GIT_CREDENTIALS
* GIT_REPO_LIST
+ * VCP_IMAGE_LIST - list of images
+ * SYNC_VCP_IMAGE_TO_S3 - boolean
+ * RELEASE_VCP_IMAGES - boolean
* EMAIL_NOTIFY
* NOTIFY_RECIPIENTS
- * NOTIFY_TEXT
- *
-*/
+ *
+ */
common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
-def triggerAptlyPromoteJob(aptlyUrl, components, diffOnly, dumpPublish, packages, recreate, source, storages, target){
- build job: "aptly-promote-all-testing-stable", parameters: [
- [$class: 'StringParameterValue', name: 'APTLY_URL', value: aptlyUrl],
- [$class: 'StringParameterValue', name: 'COMPONENTS', value: components],
- [$class: 'BooleanParameterValue', name: 'DIFF_ONLY', value: diffOnly],
- [$class: 'BooleanParameterValue', name: 'DUMP_PUBLISH', value: dumpPublish],
- [$class: 'StringParameterValue', name: 'PACKAGES', value: packages],
- [$class: 'BooleanParameterValue', name: 'RECREATE', value: recreate],
- [$class: 'StringParameterValue', name: 'SOURCE', value: source],
- [$class: 'StringParameterValue', name: 'STORAGES', value: storages],
- [$class: 'StringParameterValue', name: 'TARGET', value: target],
- ]
+syncVcpImagesToS3 = env.SYNC_VCP_IMAGE_TO_S3.toBoolean() ?: false
+emailNotify = env.EMAIL_NOTIFY.toBoolean() ?: false
+
+def triggerAptlyPromoteJob(aptlyUrl, components, diffOnly, dumpPublish, packages, recreate, source, storages, target) {
+ build job: "aptly-promote-all-testing-stable", parameters: [
+ [$class: 'StringParameterValue', name: 'APTLY_URL', value: aptlyUrl],
+ [$class: 'StringParameterValue', name: 'COMPONENTS', value: components],
+ [$class: 'BooleanParameterValue', name: 'DIFF_ONLY', value: diffOnly],
+ [$class: 'BooleanParameterValue', name: 'DUMP_PUBLISH', value: dumpPublish],
+ [$class: 'StringParameterValue', name: 'PACKAGES', value: packages],
+ [$class: 'BooleanParameterValue', name: 'RECREATE', value: recreate],
+ [$class: 'StringParameterValue', name: 'SOURCE', value: source],
+ [$class: 'StringParameterValue', name: 'STORAGES', value: storages],
+ [$class: 'StringParameterValue', name: 'TARGET', value: target],
+ ]
}
def triggerDockerMirrorJob(dockerCredentials, dockerRegistryUrl, targetTag, imageList, sourceImageTag) {
- build job: "docker-images-mirror", parameters: [
- [$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
- [$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
- [$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
- [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList],
- [$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
- ]
+ build job: "docker-images-mirror", parameters: [
+ [$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
+ [$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
+ [$class: 'StringParameterValue', name: 'IMAGE_TAG', value: targetTag],
+ [$class: 'TextParameterValue', name: 'IMAGE_LIST', value: imageList],
+ [$class: 'StringParameterValue', name: 'SOURCE_IMAGE_TAG', value: sourceImageTag],
+ ]
}
def triggerMirrorRepoJob(snapshotId, snapshotName) {
- build job: "mirror-snapshot-name-all", parameters: [
- [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: snapshotName],
- [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId],
- ]
+ build job: "mirror-snapshot-name-all", parameters: [
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: snapshotName],
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId],
+ ]
+}
+
+def triggerEbfRepoJob(snapshotId, snapshotName) {
+ build job: "ebf-snapshot-name-all", parameters: [
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_NAME', value: snapshotName],
+ [$class: 'StringParameterValue', name: 'SNAPSHOT_ID', value: snapshotId],
+ ]
}
def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
- build job: "tag-git-repos-stable", parameters: [
- [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
- [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
- [$class: 'StringParameterValue', name: 'TAG', value: tag],
- [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
- ]
+ // There is no `nightly` and `testing` build-IDs` in release process
+ // for git repos
+ if ( sourceTag in ['nightly', 'testing'] ) sourceTag = 'master'
+ build job: "tag-git-repos-all", parameters: [
+ [$class: 'TextParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+ [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
+ [$class: 'StringParameterValue', name: 'TAG', value: tag],
+ [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
+ ]
+}
+
+def triggerPromoteVCPJob(VcpImageList, tag, sourceTag) {
+ build job: "promote-vcp-images-all", parameters: [
+ [$class: 'TextParameterValue', name: 'VCP_IMAGE_LIST', value: VcpImageList],
+ [$class: 'StringParameterValue', name: 'TAG', value: tag],
+ [$class: 'StringParameterValue', name: 'SOURCE_TAG', value: sourceTag],
+ [$class: 'BooleanParameterValue', name: 'FORCE_OVERWRITE', value: true],
+ ]
+}
+
+def triggerSyncVCPJob(VcpImageList) {
+ build job: "upload-to-s3", parameters: [
+ [$class: 'TextParameterValue', name: 'FILENAMES',
+ value: VcpImageList + VcpImageList.collect({it + '.md5'})]
+ ]
}
timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
- stage("Promote"){
- if(RELEASE_APTLY.toBoolean())
- {
- common.infoMsg("Promoting Aptly")
- triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, "(.*)/${SOURCE_REVISION}", APTLY_STORAGES, "{0}/${TARGET_REVISION}")
- }
+ node() {
+ try {
+ stage("Promote") {
+ if (RELEASE_APTLY.toBoolean()) {
+ common.infoMsg("Promoting Aptly")
+ triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, "(.*)/${SOURCE_REVISION}", APTLY_STORAGES, "{0}/${TARGET_REVISION}")
+ }
- if(RELEASE_DEB_MIRRORS.toBoolean()){
- common.infoMsg("Promoting Debmirrors")
- triggerMirrorRepoJob(SOURCE_REVISION, TARGET_REVISION)
- }
+ if (RELEASE_DEB_MIRRORS.toBoolean()) {
+ common.infoMsg("Promoting Debmirrors")
+ triggerMirrorRepoJob(SOURCE_REVISION, TARGET_REVISION)
+ }
- if(RELEASE_DOCKER.toBoolean())
- {
- common.infoMsg("Promoting Docker images")
- triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, TARGET_REVISION, DOCKER_IMAGES, SOURCE_REVISION)
- }
+ if (RELEASE_EBF_MIRRORS.toBoolean()) {
+ common.infoMsg("Promoting Emergency Bug Fix Debmirrors")
+ triggerEbfRepoJob(SOURCE_REVISION, TARGET_REVISION)
+ }
- if(RELEASE_GIT.toBoolean())
- {
- common.infoMsg("Promoting Git repositories")
- triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION, SOURCE_REVISION)
+ if (RELEASE_DOCKER.toBoolean()) {
+ common.infoMsg("Promoting Docker images")
+ triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, TARGET_REVISION, DOCKER_IMAGES, SOURCE_REVISION)
+ }
- }
- if (EMAIL_NOTIFY.toBoolean()) {
- emailext(to: NOTIFY_RECIPIENTS,
- body: NOTIFY_TEXT,
- subject: "MCP Promotion has been done")
- }
- }
- } catch (Throwable e) {
+ if (RELEASE_GIT.toBoolean()) {
+ common.infoMsg("Promoting Git repositories")
+ triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION, SOURCE_REVISION)
+
+ }
+ if (RELEASE_VCP_IMAGES.toBoolean()) {
+ common.infoMsg("Promoting VCP images")
+ triggerPromoteVCPJob(VCP_IMAGE_LIST, TARGET_REVISION, SOURCE_REVISION)
+
+ }
+ if (syncVcpImagesToS3) {
+ common.infoMsg("Syncing VCP images from internal: http://apt.mcp.mirantis.net/images to s3: images.mirantis.com")
+ triggerSyncVCPJob('')
+ }
+ if (emailNotify) {
+ notify_text = "MCP Promotion ${env.SOURCE_REVISION} => ${env.TARGET_REVISION} has been done"
+ emailext(to: NOTIFY_RECIPIENTS,
+ body: notify_text,
+ subject: "MCP Promotion has been done")
+ }
+ }
+ } catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
throw e
- }
}
- }
+ }
+}
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index c98ff17..c20c3a0 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -20,70 +20,110 @@
}
stage('Restore') {
- try {
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
- } catch (Exception er) {
- common.warningMsg('neutron-server service already stopped')
+ // get opencontrail version
+ def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
+ def contrailVersion = _pillar['return'][0].values()[0]
+ common.infoMsg("Contrail version is ${contrailVersion}")
+ if (contrailVersion >= 4) {
+ common.infoMsg("There will be steps for OC4.0 restore")
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+ } catch (Exception err) {
+ common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+ }
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+ } catch (Exception err) {
+ common.warningMsg('cassandra data already removed? ' + err.getMessage())
+ }
+ try {
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+ } catch (Exception err) {
+ common.warningMsg('contrail-database already started? ' + err.getMessage())
+ }
+ // remove restore-already-happenned file if any is present
+ try {
+ salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm /var/backups/cassandra/dbrestored')
+ } catch (Exception err) {
+ common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
+ }
+ // perform actual backup
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+ sleep(5)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+ // the lovely wait-60-seconds mantra before restarting supervisor-database service
+ sleep(60)
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+ // another mantra
+ sleep(60)
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
}
- try {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
- } catch (Exception er) {
- common.warningMsg('Supervisor-config service already stopped')
+ else {
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('neutron-server service already stopped')
+ }
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Supervisor-config service already stopped')
+ }
+ // Cassandra restore section
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+ } catch (Exception er) {
+ common.warningMsg('Supervisor-database service already stopped')
+ }
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+ } catch (Exception er) {
+ common.warningMsg('Directory already exists')
+ }
+
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+ } catch (Exception er) {
+ common.warningMsg('Files were already moved')
+ }
+ try {
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+ } catch (Exception er) {
+ common.warningMsg('Directory already empty')
+ }
+
+ _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+ def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+ common.infoMsg("Backup directory is ${backupDir}")
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
+
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+
+ // wait until supervisor-database service is up
+ salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ sleep(60)
+
+ // performs restore
+ salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
+ salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ sleep(5)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+
+ // wait until supervisor-database service is up
+ salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+ sleep(5)
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
+
+ // wait until contrail-status is up
+ salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+ salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
- // Cassandra restore section
- try {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
- } catch (Exception er) {
- common.warningMsg('Supervisor-database service already stopped')
- }
- try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
- } catch (Exception er) {
- common.warningMsg('Directory already exists')
- }
-
- try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
- } catch (Exception er) {
- common.warningMsg('Files were already moved')
- }
- try {
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
- } catch (Exception er) {
- common.warningMsg('Directory already empty')
- }
-
- _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
- backup_dir = _pillar['return'][0].values()[0]
- if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
- print(backup_dir)
- salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
- salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
-
- // wait until supervisor-database service is up
- salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
- sleep(60)
-
- // performs restore
- salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
- salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
- sleep(5)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-
- // wait until supervisor-database service is up
- salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
- salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
- sleep(5)
-
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
- salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
-
- // wait until contrail-status is up
- salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
- salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
}
}
}
diff --git a/rollout-config-change.groovy b/rollout-config-change.groovy
deleted file mode 100644
index dcb9034..0000000
--- a/rollout-config-change.groovy
+++ /dev/null
@@ -1,96 +0,0 @@
-
-/**
- * Rollout changes to the node(s) configuration
- *
- * Expected parameters:
- * TST_SALT_MASTER_CREDENTIALS Credentials to the Salt API (QA environment).
- * TST_SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * PRD_SALT_MASTER_CREDENTIALS Credentials to the Salt API (PRD environment).
- * PRD_SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
- * Model parameters:
- * MODEL_REPO_CREDENTIALS Credentials to the Model.
- * MODEL_REPO_URL Full model repo address.
- * MODEL_REPO_SOURCE_BRANCH Source branch to merge from.
- * MODEL_REPO_TARGET_BRANCH Target branch to merge fo.
- * Change settings:
- * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
- * TARGET_STATES States to be applied, empty string means running highstate [linux, linux,openssh, salt.minion.grains].
- * TARGET_SUBSET_TEST Number of nodes to test config changes, empty string means all targetted nodes.
- * TARGET_SUBSET_LIVE Number of selected noded to live apply selected config changes.
- * TARGET_BATCH_LIVE Batch size for the complete live config changes on all nodes, empty string means apply to all targetted nodes.
- * Test settings:
- * TEST_SERVICE Comma separated list of services to test
- * TEST_K8S_API_SERVER Kubernetes API address
- * TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
- * TEST_DOCKER_INSTALL Install docker on the target if true
- * TEST_TEMPEST_IMAGE Tempest image link
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TEMPEST_TARGET Salt target for tempest node
- *
-**/
-
-def common = new com.mirantis.mk.Common()
-def salt = new com.mirantis.mk.Salt()
-timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
-
- stage('Run config change on test env') {
- build job: "deploy-update-service-config", parameters: [
- [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
- [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
- [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
- [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
- [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
- [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
- ]
- }
-
- stage('Test config change on test env') {
- build job: "deploy-test-service", parameters: [
- [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: TST_SALT_MASTER_URL],
- [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: TST_SALT_MASTER_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
- [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
- [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
- ]
- }
-
- stage('Promote config change in repo') {
- build job: "git-merge-branches", parameters: [
- [$class: 'StringParameterValue', name: 'REPO_URL', value: MODEL_REPO_URL],
- [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: MODEL_REPO_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'SOURCE_BRANCH', value: MODEL_REPO_SOURCE_BRANCH],
- [$class: 'StringParameterValue', name: 'TARGET_BRANCH', value: MODEL_REPO_TARGET_BRANCH],
- ]
- }
-
- stage('Run config change on production env') {
- build job: "deploy-update-service-config", parameters: [
- [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
- [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'TARGET_BATCH_LIVE', value: TARGET_BATCH_LIVE],
- [$class: 'StringParameterValue', name: 'TARGET_SERVERS', value: TARGET_SERVERS],
- [$class: 'StringParameterValue', name: 'TARGET_STATES', value: TARGET_STATES],
- [$class: 'StringParameterValue', name: 'TARGET_SUBSET_LIVE', value: TARGET_SUBSET_LIVE],
- [$class: 'StringParameterValue', name: 'TARGET_SUBSET_TEST', value: TARGET_SUBSET_TEST],
- ]
- }
-
- stage('Test config change on prod env') {
- def result = build job: "deploy-test-service", parameters: [
- [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: PRD_SALT_MASTER_URL],
- [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: PRD_SALT_MASTER_CREDENTIALS],
- [$class: 'StringParameterValue', name: 'TEST_SERVICE', value: TEST_SERVICE],
- [$class: 'StringParameterValue', name: 'TEST_K8S_API_SERVER', value: TEST_K8S_API_SERVER],
- [$class: 'StringParameterValue', name: 'TEST_K8S_CONFORMANCE_IMAGE', value: TEST_K8S_CONFORMANCE_IMAGE],
- ]
- }
-
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- }
- }
-}
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
new file mode 100644
index 0000000..7b79f4c
--- /dev/null
+++ b/stacklight-upgrade.groovy
@@ -0,0 +1,170 @@
+/**
+ *
+ * Upgrade Stacklight packages and components
+ *
+ * Requred parameters:
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ * STAGE_UPGRADE_SYSTEM_PART Set to True if upgrade of system part (telegraf, fluentd, prometheus-relay) is desired
+ * STAGE_UPGRADE_ES_KIBANA Set to True if Elasticsearch and Kibana upgrade is desired
+ * STAGE_UPGRADE_DOCKER_COMPONENTS Set to True if upgrade for components running in Docker Swarm is desired
+ *
+ */
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def args
+def commandKwargs
+def probe = 1
+def errorOccured = false
+
+def upgrade(master, target, service, pckg, state) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
+ stage("Change ${target} repos") {
+ salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
+ salt.enforceState(master, "${target}", 'linux.system.repo', true)
+ }
+ stage("Update ${pckg} package") {
+ common.infoMsg("Upgrade ${service} package")
+ try {
+ salt.runSaltProcessStep(master, "${target}", command, ["apt-get install --only-upgrade ${pckg}"], null, true)
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("${pckg} package is not upgraded.")
+ return
+ }
+ }
+ stage("Run ${state} state on ${target} nodes") {
+ try {
+ salt.enforceState(master, "${target}", ["${state}"], true)
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("${state} state was executed and failed. Please fix it manually.")
+ }
+ }
+ out = salt.runSaltCommand(master, 'local', ['expression': "${target}", 'type': 'compound'], command, null, "systemctl status ${service}.service", null)
+ salt.printSaltCommandResult(out)
+
+ common.warningMsg("Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.")
+ return
+}
+
+def upgrade_es_kibana(master) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
+ stage('Elasticsearch upgrade') {
+ try {
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["apt-get --only-upgrade install elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl daemon-reload"], null, true)
+ salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl start elasticsearch"], null, true)
+ salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Elasticsearch upgrade failed. Please fix it manually.")
+ return
+ }
+ }
+ stage('Verify that the Elasticsearch cluster status is green') {
+ try {
+ def retries_wait = 20
+ def retries = 15
+ def elasticsearch_vip
+ def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_vip = pillar['return'][0].values()[0]
+ } else {
+ errorOccured = true
+ common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+ }
+ pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+ def elasticsearch_port
+ if(!pillar['return'].isEmpty()) {
+ elasticsearch_port = pillar['return'][0].values()[0]
+ } else {
+ errorOccured = true
+ common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+ }
+ common.retry(retries,retries_wait) {
+ common.infoMsg('Waiting for Elasticsearch to become green..')
+ salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+ }
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Elasticsearch cluster status is not \'green\'. Please fix it manually.")
+ return
+ }
+ }
+ stage('Kibana upgrade') {
+ try {
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get --only-upgrade install kibana"], null, true)
+ salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl start kibana"], null, true)
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Kibana upgrade failed. Please fix it manually.")
+ return
+ }
+ out = salt.runSaltCommand(master, 'local', ['expression': 'I@kibana:server', 'type': 'compound'], command, null, 'systemctl status kibana.service', null)
+ salt.printSaltCommandResult(out)
+
+ common.warningMsg('Please check if kibana service is running.')
+ return
+ }
+}
+timeout(time: 12, unit: 'HOURS') {
+ node("python") {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ if (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured) {
+ upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
+ upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent", "fluentd")
+ if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
+ upgrade(pepperEnv, "I@prometheus:relay", "prometheus-relay", "prometheus-relay", "prometheus")
+ }
+ if (salt.testTarget(pepperEnv, "I@prometheus:exporters:libvirt")) {
+ upgrade(pepperEnv, "I@prometheus:exporters:libvirt", "libvirt-exporter", "libvirt-exporter", "prometheus")
+ }
+ if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
+ upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
+ }
+ }
+
+ if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
+ upgrade_es_kibana(pepperEnv)
+ }
+
+ if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
+
+ stage('Docker components upgrade') {
+
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm monitoring"], null, true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm dashboard"], null, true)
+ salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
+ salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+ salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Upgrade of docker components failed. Please fix it manually.")
+ return
+ }
+ }
+ }
+ }
+}
diff --git a/sync-http-to-s3.groovy b/sync-http-to-s3.groovy
new file mode 100644
index 0000000..108a394
--- /dev/null
+++ b/sync-http-to-s3.groovy
@@ -0,0 +1,29 @@
+def common = new com.mirantis.mk.Common()
+
+
+node("docker") {
+ stage('Prepare') {
+ img = docker.image(IMAGE)
+ img.pull()
+ }
+ stage('Upload') {
+ FILENAMES.split().each { filename ->
+ url = "${SOURCE}/${filename}"
+ img.withRun("--entrypoint='/bin/bash'") { c ->
+ withCredentials([[$class : 'UsernamePasswordMultiBinding', credentialsId: 'aws-s3',
+ usernameVariable: 'S3_ACCESS_KEY', passwordVariable: 'S3_SECRET_KEY']]) {
+ img.inside("-e S3_ACCESS_KEY=${S3_ACCESS_KEY} -e S3_SECRET_KEY=${S3_SECRET_KEY}") {
+ common.retry(3, 5) {
+ sh(script: "wget --progress=dot:giga -O ${filename} ${url}", returnStdout: true)
+ sh(script: "/usr/local/bin/s4cmd put ${filename} ${DEST}/${filename}", returnStdout: true)
+ }
+ }
+ }
+
+
+ }
+ sh("rm ${filename}")
+ }
+ }
+ deleteDir()
+}
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
index 312ec9e..68fcdcd 100644
--- a/tag-git-repos.groovy
+++ b/tag-git-repos.groovy
@@ -16,9 +16,23 @@
def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
common.infoMsg("Tagging: ${repoURL} ${ref} => ${tag}")
- git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+ checkout([
+ $class: 'GitSCM',
+ branches: [
+ [name: 'FETCH_HEAD'],
+ ],
+ userRemoteConfigs: [
+ [url: repoURL, refspec: ref, credentialsId: credentials],
+ ],
+ extensions: [
+ [$class: 'PruneStaleBranch'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: repoName],
+ [$class: 'SubmoduleOption', disableSubmodules: true],
+ [$class: 'UserIdentity', name: 'MCP CI', email: 'ci+infra@mirantis.com'],
+ ],
+ ])
dir(repoName) {
- sh "git tag -f -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+ sh "git tag -f -a ${tag} -m \"Release of mcp version ${tag}\""
sshagent([credentials]) {
sh "git push -f origin ${tag}:refs/tags/${tag}"
}
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
index 9e34cea..cdc6e1e 100644
--- a/test-cookiecutter-reclass-chunk.groovy
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -12,16 +12,39 @@
slaveNode = env.SLAVE_NODE ?: 'python&&docker'
timeout(time: 1, unit: 'HOURS') {
- node(slaveNode) {
- try {
- extraVars = readYaml text: EXTRA_VARIABLES_YAML
- currentBuild.description = extraVars.modelFile
- saltModelTesting.testCCModel(extraVars)
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
+ node(slaveNode) {
+ stage("RunTest") {
+ try {
+ extraVars = readYaml text: EXTRA_VARIABLES_YAML
+ currentBuild.description = extraVars.modelFile
+ sh(script: 'find . -mindepth 1 -delete || true', returnStatus: true)
+ sh(script: """
+ wget --progress=dot:mega --auth-no-challenge -O models.tar.gz ${extraVars.MODELS_TARGZ}
+ tar -xzf models.tar.gz
+ """)
+ common.infoMsg("Going to test exactly one context: ${extraVars.modelFile}\n, with params: ${extraVars}")
+
+ def content = readFile(file: extraVars.modelFile)
+ def templateContext = readYaml text: content
+ def config = [
+ 'dockerHostname': "cfg01.${templateContext.default_context.cluster_domain}",
+ 'clusterName': templateContext.default_context.cluster_name,
+ 'reclassEnv': extraVars.testReclassEnv,
+ 'distribRevision': extraVars.DISTRIB_REVISION,
+ 'dockerContainerName': extraVars.DockerCName,
+ 'testContext': extraVars.modelFile
+ ]
+ if (extraVars.useExtraRepos) {
+ config['extraRepos'] = extraVars.extraRepos ? extraVars.extraRepos : [:]
+ config['extraRepoMergeStrategy'] = extraVars.extraRepoMergeStrategy ? extraVars.extraRepoMergeStrategy : ''
+ }
+ saltModelTesting.testNode(config)
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+ }
}
- }
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index e6d3070..bd3373c 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -1,116 +1,100 @@
+/*
+Able to be triggered from Gerrit if :
+Variators:
+Modes:
+1) manual run via job-build , possible to pass refspec
+ TODO: currently impossible to use custom COOKIECUTTER_TEMPLATE_URL| RECLASS_SYSTEM_URL Gerrit-one always used.
+ - for CC
+ - Reclass
+
+2) gerrit trigger
+ Automatically switches if GERRIT_PROJECT variable detected
+ Always test GERRIT_REFSPEC VS GERRIT_BRANCH-master version of opposite project
+ */
+
common = new com.mirantis.mk.Common()
gerrit = new com.mirantis.mk.Gerrit()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
-gerritRef = env.GERRIT_REFSPEC ?: null
-slaveNode = (env.SLAVE_NODE ?: 'python&&docker')
-def alreadyMerged = false
-
-def reclassVersion = 'v1.5.4'
-if (common.validInputParam('RECLASS_VERSION')) {
- reclassVersion = RECLASS_VERSION
+extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
+if (extraVarsYAML) {
+ common.mergeEnv(env, extraVarsYAML)
}
-def generateSaltMaster(modEnv, clusterDomain, clusterName) {
- def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
- def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: cfg01
- domain: ${clusterDomain}
-"""
- sh "mkdir -p ${modEnv}/nodes/"
- println "Create file ${nodeFile}"
- writeFile(file: nodeFile, text: nodeString)
-}
+slaveNode = env.SLAVE_NODE ?: 'docker'
+checkIncludeOrder = env.CHECK_INCLUDE_ORDER ?: false
-/**
- *
- * @param contextFile - path to `contexts/XXX.yaml file`
- * @param virtualenv - pyvenv with CC and dep's
- * @param templateEnvDir - root of CookieCutter
- * @return
- */
+// Global var's
+alreadyMerged = false
+gerritConData = [credentialsId : env.CREDENTIALS_ID,
+ gerritName : env.GERRIT_NAME ?: 'mcp-jenkins',
+ gerritHost : env.GERRIT_HOST ?: 'gerrit.mcp.mirantis.com',
+ gerritScheme : env.GERRIT_SCHEME ?: 'ssh',
+ gerritPort : env.GERRIT_PORT ?: '29418',
+ gerritRefSpec : null,
+ gerritProject : null,
+ withWipeOut : true,
+ GERRIT_CHANGE_NUMBER: null]
+//
+//ccTemplatesRepo = env.COOKIECUTTER_TEMPLATE_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/mk/cookiecutter-templates'
+gerritDataCCHEAD = [:]
+gerritDataCC = [:]
+gerritDataCC << gerritConData
+gerritDataCC['gerritBranch'] = env.COOKIECUTTER_TEMPLATE_BRANCH ?: 'master'
+gerritDataCC['gerritRefSpec'] = env.COOKIECUTTER_TEMPLATE_REF ?: null
+gerritDataCC['gerritProject'] = 'mk/cookiecutter-templates'
+//
+//reclassSystemRepo = env.RECLASS_SYSTEM_URL ?: 'ssh://mcp-jenkins@gerrit.mcp.mirantis.com:29418/salt-models/reclass-system'
+gerritDataRSHEAD = [:]
+gerritDataRS = [:]
+gerritDataRS << gerritConData
+gerritDataRS['gerritBranch'] = env.RECLASS_SYSTEM_BRANCH ?: 'master'
+gerritDataRS['gerritRefSpec'] = env.RECLASS_SYSTEM_GIT_REF ?: null
+gerritDataRS['gerritProject'] = 'salt-models/reclass-system'
-def generateModel(contextFile, virtualenv, templateEnvDir) {
- def modelEnv = "${templateEnvDir}/model"
- def basename = common.GetBaseName(contextFile, '.yml')
- def generatedModel = "${modelEnv}/${basename}"
- def content = readFile(file: "${templateEnvDir}/contexts/${contextFile}")
- def templateContext = readYaml text: content
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
- def templateBaseDir = templateEnvDir
- def templateDir = "${templateEnvDir}/dir"
- def templateOutputDir = templateBaseDir
- dir(templateEnvDir) {
- sh(script: "rm -rf ${generatedModel} || true")
- common.infoMsg("Generating model from context ${contextFile}")
- def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
- for (product in productList) {
+// version of debRepos, aka formulas|reclass|ubuntu
+testDistribRevision = env.DISTRIB_REVISION ?: 'nightly'
- // get templateOutputDir and productDir
- if (product.startsWith("stacklight")) {
- templateOutputDir = "${templateEnvDir}/output/stacklight"
- try {
- productDir = "stacklight" + templateContext.default_context['stacklight_version']
- } catch (Throwable e) {
- productDir = "stacklight1"
- }
- } else {
- templateOutputDir = "${templateEnvDir}/output/${product}"
- productDir = product
- }
+// Name of sub-test chunk job
+chunkJobName = "test-mk-cookiecutter-templates-chunk"
+testModelBuildsData = [:]
- if (product == "infra" || (templateContext.default_context["${product}_enabled"]
- && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
- templateDir = "${templateEnvDir}/cluster_product/${productDir}"
- common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
- sh "rm -rf ${templateOutputDir} || true"
- sh "mkdir -p ${templateOutputDir}"
- sh "mkdir -p ${outputDestination}"
-
- python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, virtualenv, templateBaseDir)
- sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
- } else {
- common.warningMsg("Product " + product + " is disabled")
- }
+def getAndUnpackNodesInfoArtifact(jobName, copyTo, build) {
+ return {
+ dir(copyTo) {
+ copyArtifacts(projectName: jobName, selector: specific(build), filter: "nodesinfo.tar.gz")
+ sh "tar -xf nodesinfo.tar.gz"
+ sh "rm -v nodesinfo.tar.gz"
}
- generateSaltMaster(generatedModel, clusterDomain, clusterName)
}
}
-
-def testModel(modelFile, reclassVersion = 'v1.5.4') {
+def testModel(modelFile, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
//* Grub all models and send it to check in paralell - by one in thread.
-
- _values_string = """
- ---
- MODELS_TARGZ: "${env.BUILD_URL}/artifact/patched_reclass.tar.gz"
- DockerCName: "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}"
- testReclassEnv: "model/${modelFile}/"
- modelFile: "contexts/${modelFile}.yml"
- DISTRIB_REVISION: "${DISTRIB_REVISION}"
- EXTRA_FORMULAS: "${env.EXTRA_FORMULAS}"
- reclassVersion: "${reclassVersion}"
- """
- build job: "test-mk-cookiecutter-templates-chunk", parameters: [
- [$class: 'StringParameterValue', name: 'EXTRA_VARIABLES_YAML',
+ def _uuid = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}_" + UUID.randomUUID().toString().take(8)
+ def _values_string = """
+---
+MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
+DockerCName: "${_uuid}"
+testReclassEnv: "model/${modelFile}/"
+modelFile: "contexts/${modelFile}.yml"
+DISTRIB_REVISION: "${testDistribRevision}"
+useExtraRepos: ${useExtraRepos}
+${extraVarsYAML.replaceAll('---', '')}
+"""
+ def chunkJob = build job: chunkJobName, parameters: [
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML',
value : _values_string.stripIndent()],
]
+ // Put sub-job info into global map.
+ testModelBuildsData.put(_uuid, ['jobname' : chunkJob.fullProjectName,
+ 'copyToDir': "${artifactCopyPath}/${modelFile}",
+ 'buildId' : "${chunkJob.number}"])
}
-def StepTestModel(basename) {
+def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -118,32 +102,38 @@
// return node object
return {
node(slaveNode) {
- testModel(basename)
+ testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
}
}
}
-def StepPrepareCCenv(refchange, templateEnvFolder) {
+def StepPrepareGit(templateEnvFolder, gerrit_data) {
// return git clone object
return {
+ def checkouted = false
+ common.infoMsg("StepPrepareGit: ${gerrit_data}")
// fetch needed sources
dir(templateEnvFolder) {
- if (refchange) {
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+ if (gerrit_data['gerritRefSpec']) {
+ // Those part might be not work,in case manual var's pass
+ def gerritChange = gerrit.getGerritChange(gerrit_data['gerritName'], gerrit_data['gerritHost'],
+ gerrit_data['GERRIT_CHANGE_NUMBER'], gerrit_data['credentialsId'])
merged = gerritChange.status == "MERGED"
if (!merged) {
- checkouted = gerrit.gerritPatchsetCheckout([
- credentialsId: CREDENTIALS_ID
- ])
+ checkouted = gerrit.gerritPatchsetCheckout(gerrit_data)
} else {
- // update global variable for success return from pipeline
- //alreadyMerged = true
- common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
- currentBuild.result = 'ABORTED'
- throw new hudson.AbortException('change already merged')
+ // update global variable for pretty return from pipeline
+ alreadyMerged = true
+ common.successMsg("Change ${gerrit_data['GERRIT_CHANGE_NUMBER']} is already merged, no need to gate them")
+ error('change already merged')
}
} else {
- git.checkoutGitRepository(templateEnvFolder, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
+ // Get clean HEAD
+ gerrit_data['useGerritTriggerBuildChooser'] = false
+ checkouted = gerrit.gerritPatchsetCheckout(gerrit_data)
+ if (!checkouted) {
+ error("Failed to get repo:${gerrit_data}")
+ }
}
}
}
@@ -152,33 +142,155 @@
def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
return {
for (contextFile in _contextFileList) {
- generateModel(contextFile, _virtualenv, _templateEnvDir)
+ def basename = common.GetBaseName(contextFile, '.yml')
+ def context = readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+ python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
}
}
}
+def globalVariatorsUpdate() {
+ // Simple function, to check and define branch-around variables
+ // In general, simply make transition updates for non-master branch
+ // based on magic logic
+ def newline = '<br/>'
+ def messages = []
+ if (env.GERRIT_PROJECT) {
+ messages.add("<font color='red'>GerritTrigger detected! We are in auto-mode:</font>")
+ messages.add("Test env variables has been changed:")
+ messages.add("COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}")
+ messages.add("RECLASS_MODEL_BRANCH => ${gerritDataRS['gerritBranch']}")
+ // TODO are we going to have such branches?
+ if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
+ gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
+ gerritDataRS['gerritBranch'] = env.GERRIT_BRANCH
+ testDistribRevision = env.GERRIT_BRANCH
+ }
+ // Identify, who triggered. To whom we should pass refspec
+ if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
+ gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
+ gerritDataRS['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
+ messages.add("RECLASS_SYSTEM_GIT_REF => ${gerritDataRS['gerritRefSpec']}")
+ } else if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
+ gerritDataCC['gerritRefSpec'] = env.GERRIT_REFSPEC
+ gerritDataCC['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
+ messages.add("COOKIECUTTER_TEMPLATE_REF => ${gerritDataCC['gerritRefSpec']}")
+ } else {
+ error("Unsuported gerrit-project triggered:${env.GERRIT_PROJECT}")
+ }
+ } else {
+ messages.add("<font color='red'>Non-gerrit trigger run detected!</font>")
+ }
+ gerritDataCCHEAD << gerritDataCC
+ gerritDataCCHEAD['gerritRefSpec'] = null
+ gerritDataCCHEAD['GERRIT_CHANGE_NUMBER'] = null
+ gerritDataRSHEAD << gerritDataRS
+ gerritDataRSHEAD['gerritRefSpec'] = null
+ gerritDataRSHEAD['GERRIT_CHANGE_NUMBER'] = null
+ // 'binary' branch logic w\o 'release/' prefix
+ if (testDistribRevision.contains('/')) {
+ testDistribRevision = testDistribRevision.split('/')[-1]
+ }
+ // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+ if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
+ common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
+ testDistribRevision = 'proposed'
+ messages.add("DISTRIB_REVISION => ${testDistribRevision}")
+ }
+ def message = messages.join(newline) + newline
+ currentBuild.description = currentBuild.description ? message + currentBuild.description : message
+}
+
+def replaceGeneratedValues(path) {
+ def files = sh(script: "find ${path} -name 'secrets.yml'", returnStdout: true)
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ files.tokenize().each {
+ stepsForParallel.put("Removing generated passwords/secrets from ${it}",
+ {
+ def secrets = readYaml file: it
+ for (String key in secrets['parameters']['_param'].keySet()) {
+ secrets['parameters']['_param'][key] = 'generated'
+ }
+ // writeYaml can't write to already existing file
+ writeYaml file: "${it}.tmp", data: secrets
+ sh "mv ${it}.tmp ${it}"
+ })
+ }
+ parallel stepsForParallel
+}
+
+def linkReclassModels(contextList, envPath, archiveName) {
+ // to be able share reclass for all subenvs
+ // Also, makes artifact test more solid - use one reclass for all of sub-models.
+ // Archive Structure will be:
+ // tar.gz
+ // ├── contexts
+ // │ └── ceph.yml
+ // ├── classes-system <<< reclass system
+ // ├── model
+ // │ └── ceph <<< from `context basename`
+ // │ ├── classes
+ // │ │ ├── cluster
+ // │ │ └── system -> ../../../classes-system
+ // │ └── nodes
+ // │ └── cfg01.ceph-cluster-domain.local.yml
+ def archiveBaseName = common.GetBaseName(archiveName, '.tar.gz')
+ def classesSystemDir = 'classes-system'
+ // copy reclass system under envPath with -R and trailing / to support symlinks direct copy
+ sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
+ dir(envPath) {
+ for (String context : contextList) {
+ def basename = common.GetBaseName(context, '.yml')
+ dir("${envPath}/model/${basename}/classes") {
+ sh(script: "ln -sfv ../../../${classesSystemDir} system ")
+ }
+ }
+ // replace all generated passwords/secrets/keys with hardcode value for infra/secrets.yaml
+ replaceGeneratedValues("${envPath}/model")
+ // Save all models and all contexts. Warning! `h` flag must be used!
+ sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' contexts model ${classesSystemDir}", returnStatus: true)
+ }
+ archiveArtifacts artifacts: archiveName
+}
+
timeout(time: 1, unit: 'HOURS') {
node(slaveNode) {
+ globalVariatorsUpdate()
def templateEnvHead = "${env.WORKSPACE}/EnvHead/"
def templateEnvPatched = "${env.WORKSPACE}/EnvPatched/"
def contextFileListHead = []
def contextFileListPatched = []
def vEnv = "${env.WORKSPACE}/venv"
-
+ def headReclassArtifactName = "head_reclass.tar.gz"
+ def patchedReclassArtifactName = "patched_reclass.tar.gz"
+ def reclassNodeInfoDir = "${env.WORKSPACE}/reclassNodeInfo_compare/"
+ def reclassInfoHeadPath = "${reclassNodeInfoDir}/old"
+ def reclassInfoPatchedPath = "${reclassNodeInfoDir}/new"
try {
sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
stage('Download and prepare CC env') {
// Prepare 2 env - for patchset, and for HEAD
- paralellEnvs = [:]
+ def paralellEnvs = [:]
paralellEnvs.failFast = true
- paralellEnvs['downloadEnvHead'] = StepPrepareCCenv('', templateEnvHead)
- paralellEnvs['downloadEnvPatched'] = StepPrepareCCenv(gerritRef, templateEnvPatched)
- parallel paralellEnvs
+ paralellEnvs['downloadEnvHead'] = StepPrepareGit(templateEnvHead, gerritDataCCHEAD)
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ paralellEnvs['downloadEnvPatched'] = StepPrepareGit(templateEnvPatched, gerritDataCC)
+ parallel paralellEnvs
+ } else {
+ paralellEnvs['downloadEnvPatched'] = { common.warningMsg('No need to process: downloadEnvPatched') }
+ parallel paralellEnvs
+ sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
+ }
}
stage("Check workflow_definition") {
// Check only for patchset
python.setupVirtualenv(vEnv, 'python2', [], "${templateEnvPatched}/requirements.txt")
- common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ common.infoMsg(python.runVirtualenvCommand(vEnv, "python ${templateEnvPatched}/workflow_definition_test.py"))
+ } else {
+ common.infoMsg('No need to process: workflow_definition')
+ }
}
stage("generate models") {
@@ -193,108 +305,140 @@
}
}
// Generate over 2env's - for patchset, and for HEAD
- paralellEnvs = [:]
+ def paralellEnvs = [:]
paralellEnvs.failFast = true
- paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
paralellEnvs['GenerateEnvHead'] = StepGenerateModels(contextFileListHead, vEnv, templateEnvHead)
- parallel paralellEnvs
-
- // Collect artifacts
- dir(templateEnvPatched) {
- // Collect only models. For backward comparability - who know, probably someone use it..
- sh(script: "tar -czf model.tar.gz -C model ../contexts .", returnStatus: true)
- archiveArtifacts artifacts: "model.tar.gz"
+ if (gerritDataCC.get('gerritRefSpec', null)) {
+ paralellEnvs['GenerateEnvPatched'] = StepGenerateModels(contextFileListPatched, vEnv, templateEnvPatched)
+ parallel paralellEnvs
+ } else {
+ paralellEnvs['GenerateEnvPatched'] = { common.warningMsg('No need to process: GenerateEnvPatched') }
+ parallel paralellEnvs
+ sh("rsync -a --exclude '*@tmp' ${templateEnvHead} ${templateEnvPatched}")
}
- // to be able share reclass for all subenvs
- // Also, makes artifact test more solid - use one reclass for all of sub-models.
- // Archive Structure will be:
- // tar.gz
- // ├── contexts
- // │ └── ceph.yml
- // ├── global_reclass <<< reclass system
- // ├── model
- // │ └── ceph <<< from `context basename`
- // │ ├── classes
- // │ │ ├── cluster
- // │ │ └── system -> ../../../global_reclass
- // │ └── nodes
- // │ └── cfg01.ceph-cluster-domain.local.yml
-
- if (SYSTEM_GIT_URL == "") {
- git.checkoutGitRepository("${env.WORKSPACE}/global_reclass/", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+ // We need 2 git's, one for HEAD, one for PATCHed.
+ // if no patch, use head for both
+ RSHeadDir = common.GetBaseName(headReclassArtifactName, '.tar.gz')
+ RSPatchedDir = common.GetBaseName(patchedReclassArtifactName, '.tar.gz')
+ common.infoMsg("gerritDataRS= ${gerritDataRS}")
+ common.infoMsg("gerritDataRSHEAD= ${gerritDataRSHEAD}")
+ if (gerritDataRS.get('gerritRefSpec', null)) {
+ StepPrepareGit("${env.WORKSPACE}/${RSPatchedDir}/", gerritDataRS).call()
+ StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRSHEAD).call()
} else {
- dir("${env.WORKSPACE}/global_reclass/") {
- if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
- common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
- throw new RuntimeException("Failed to obtain system reclass")
- }
- }
+ StepPrepareGit("${env.WORKSPACE}/${RSHeadDir}/", gerritDataRS).call()
+ sh("cd ${env.WORKSPACE} ; ln -svf ${RSHeadDir} ${RSPatchedDir}")
}
// link all models, to use one global reclass
// For HEAD
- dir(templateEnvHead) {
- for (String context : contextFileListHead) {
- def basename = common.GetBaseName(context, '.yml')
- dir("${templateEnvHead}/model/${basename}") {
- sh(script: 'mkdir -p classes/; ln -sfv ../../../../global_reclass classes/system ')
- }
- }
- // Save all models and all contexts. Warning! `h` flag must be used.
- sh(script: "tar -chzf head_reclass.tar.gz --exclude='*@tmp' model contexts global_reclass", returnStatus: true)
- archiveArtifacts artifacts: "head_reclass.tar.gz"
- // move for "Compare Pillars" stage
- sh(script: "mv -v head_reclass.tar.gz ${env.WORKSPACE}")
- }
+ linkReclassModels(contextFileListHead, templateEnvHead, headReclassArtifactName)
// For patched
- dir(templateEnvPatched) {
- for (String context : contextFileListPatched) {
- def basename = common.GetBaseName(context, '.yml')
- dir("${templateEnvPatched}/model/${basename}") {
- sh(script: 'mkdir -p classes/; ln -sfv ../../../../global_reclass classes/system ')
- }
- }
- // Save all models and all contexts. Warning! `h` flag must be used.
- sh(script: "tar -chzf patched_reclass.tar.gz --exclude='*@tmp' model contexts global_reclass", returnStatus: true)
- archiveArtifacts artifacts: "patched_reclass.tar.gz"
- // move for "Compare Pillars" stage
- sh(script: "mv -v patched_reclass.tar.gz ${env.WORKSPACE}")
- }
+ linkReclassModels(contextFileListPatched, templateEnvPatched, patchedReclassArtifactName)
}
- stage("Compare Pillars") {
+ stage("Compare cluster lvl Head/Patched") {
// Compare patched and HEAD reclass pillars
- compareRoot = "${env.WORKSPACE}/test_compare/"
+ compareRoot = "${env.WORKSPACE}/cluster_compare/"
+ // extract archive and drop all copied classes/system before comparing
sh(script: """
mkdir -pv ${compareRoot}/new ${compareRoot}/old
- tar -xzf patched_reclass.tar.gz --directory ${compareRoot}/new
- tar -xzf head_reclass.tar.gz --directory ${compareRoot}/old
+ tar -xzf ${patchedReclassArtifactName} --directory ${compareRoot}/new
+ tar -xzf ${headReclassArtifactName} --directory ${compareRoot}/old
+ find ${compareRoot} -name classes -type d -exec rm -rf '{}/system' \\;
""")
common.warningMsg('infra/secrets.yml has been skipped from compare!')
- rezult = common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml\'")
- currentBuild.description = rezult
+ result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml|\\.git\'")
+ currentBuild.description = currentBuild.description ? currentBuild.description + result : result
}
- stage("test-contexts") {
- // Test contexts for patched only
- stepsForParallel = [:]
+ stage("TestContexts Head/Patched") {
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ common.infoMsg("Found: ${contextFileListHead.size()} HEAD contexts to test.")
+ for (String context : contextFileListHead) {
+ def basename = common.GetBaseName(context, '.yml')
+ stepsForParallel.put("ContextHeadTest:${basename}", StepTestModel(basename, headReclassArtifactName, reclassInfoHeadPath))
+ }
common.infoMsg("Found: ${contextFileListPatched.size()} patched contexts to test.")
for (String context : contextFileListPatched) {
def basename = common.GetBaseName(context, '.yml')
- stepsForParallel.put("ContextPatchTest:${basename}", StepTestModel(basename))
+ stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath, true))
}
parallel stepsForParallel
- common.infoMsg('All tests done')
+ common.infoMsg('All TestContexts tests done')
}
+ stage("Compare NodesInfo Head/Patched") {
+ // Download all artifacts
+ def stepsForParallel = [:]
+ stepsForParallel.failFast = true
+ common.infoMsg("Found: ${testModelBuildsData.size()} nodeinfo artifacts to download.")
+ testModelBuildsData.each { bname, bdata ->
+ stepsForParallel.put("FetchData:${bname}",
+ getAndUnpackNodesInfoArtifact(bdata.jobname, bdata.copyToDir, bdata.buildId))
+ }
+ parallel stepsForParallel
+ // remove timestamp field from rendered files
+ sh("find ${reclassNodeInfoDir} -type f -exec sed -i '/ timestamp: .*/d' {} \\;")
+ // Compare patched and HEAD reclass pillars
+ result = '\n' + common.comparePillars(reclassNodeInfoDir, env.BUILD_URL, '')
+ currentBuild.description = currentBuild.description ? currentBuild.description + result : result
+ }
+ stage('Check include order') {
+ if (!checkIncludeOrder) {
+ common.infoMsg('Check include order require to much time, and currently disabled!')
+ } else {
+ def correctIncludeOrder = ["service", "system", "cluster"]
+ dir(reclassInfoPatchedPath) {
+ def nodeInfoFiles = findFiles(glob: "**/*.reclass.nodeinfo")
+ def messages = ["<b>Wrong include ordering found</b><ul>"]
+ def stepsForParallel = [:]
+ nodeInfoFiles.each { nodeInfo ->
+ stepsForParallel.put("Checking ${nodeInfo.path}:", {
+ def node = readYaml file: nodeInfo.path
+ def classes = node['classes']
+ def curClassID = 0
+ def prevClassID = 0
+ def wrongOrder = false
+ for (String className in classes) {
+ def currentClass = className.tokenize('.')[0]
+ curClassID = correctIncludeOrder.indexOf(currentClass)
+ if (currentClass != correctIncludeOrder[prevClassID]) {
+ if (prevClassID > curClassID) {
+ wrongOrder = true
+ common.warningMsg("File ${nodeInfo.path} contains wrong order of classes including: Includes for ${className} should be declared before ${correctIncludeOrder[prevClassID]} includes")
+ } else {
+ prevClassID = curClassID
+ }
+ }
+ }
+ if (wrongOrder) {
+ messages.add("<li>${nodeInfo.path} contains wrong order of classes including</li>")
+ }
+ })
+ }
+ parallel stepsForParallel
+ def includerOrder = '<b>No wrong include order</b>'
+ if (messages.size() != 1) {
+ includerOrder = messages.join('')
+ }
+ currentBuild.description = currentBuild.description ? currentBuild.description + includerOrder : includerOrder
+ }
+ }
+ }
sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
} catch (Throwable e) {
+ if (alreadyMerged) {
+ currentBuild.result = 'ABORTED'
+ currentBuild.description = "Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them"
+ return
+ }
currentBuild.result = "FAILURE"
currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
throw e
} finally {
def dummy = "dummy"
- //FAILING common.sendNotification(currentBuild.result,"",["slack"])
}
}
}
diff --git a/test-customers-salt-models.groovy b/test-customers-salt-models.groovy
index 3c0ccaf..4a24918 100644
--- a/test-customers-salt-models.groovy
+++ b/test-customers-salt-models.groovy
@@ -22,8 +22,7 @@
// [$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: modelName],
// [$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
// [$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource]
- // [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
- // [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
+ // [$class: 'StringParameterValue', name: 'DISTRIB_REVISION', value: distribRevision],
// [$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
// [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
// [$class: 'StringParameterValue', name: 'MAX_CPU_PER_JOB', value: MAX_CPU_PER_JOB],
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
new file mode 100644
index 0000000..b886467
--- /dev/null
+++ b/test-openscap-pipeline.groovy
@@ -0,0 +1,325 @@
+/**
+ *
+ * Run openscap xccdf evaluation on given nodes
+ *
+ * Expected parametes:
+ * OPENSCAP_TEST_TYPE Type of OpenSCAP evaluation to run, either 'xccdf' or 'oval'
+ * SALT_MASTER_URL Full Salt API address.
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ *
+ * XCCDF_BENCHMARKS_DIR Base directory for XCCDF benchmarks (default /usr/share/xccdf-benchmarks/mirantis/)
+ * or OVAL devinitions (default /usr/share/oval-definitions/mirantis/)
+ * XCCDF_BENCHMARKS List of pairs XCCDF benchmark filename and corresponding profile separated with ','
+ * these pairs are separated with semicolon
+ * (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile).
+ * For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
+ * XCCDF_VERSION The XCCDF version (default 1.2)
+ * XCCDF_TAILORING_ID The tailoring id (default None)
+ *
+ * TARGET_SERVERS The target Salt nodes (default *)
+ *
+ * ARTIFACTORY_URL The artifactory URL
+ * ARTIFACTORY_NAMESPACE The artifactory namespace (default 'mirantis/openscap')
+ * ARTIFACTORY_REPO The artifactory repo (default 'binary-dev-local')
+ *
+ * UPLOAD_TO_DASHBOARD Boolean. Upload results to the WORP or not
+ * DASHBOARD_API_URL The WORP api base url. Mandatory if UPLOAD_TO_DASHBOARD is true
+ */
+
+
+
+/**
+ * Upload results to the `WORP` dashboard
+ *
+ * @param apiUrl The base dashboard api url
+ * @param cloudName The cloud name (mostly, the given node's domain name)
+ * @param nodeName The node name
+ * @param reportType Type of the report to create/use, either 'openscap' or 'cve'
+ * @param reportId Report Id to re-use, if empty report will be created
+ * @param results The scanning results as a json file content (string)
+ * @return reportId The Id of the report created if incoming reportId was empty, otherwise incoming reportId
+ */
+def uploadResultToDashboard(apiUrl, cloudName, nodeName, reportType, reportId, results) {
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+
+ // Yes, we do not care of performance and will create at least 4 requests per each result
+ def requestData = [:]
+
+ def cloudId
+ def nodeId
+
+ def worpApi = [:]
+ worpApi["url"] = apiUrl
+
+ // Let's take a look, may be our minion is already presented on the dashboard
+ // Get available environments
+ common.infoMsg("Making GET to ${worpApi.url}/environment/")
+ environments = http.restGet(worpApi, "/environment/")
+ for (environment in environments) {
+ if (environment['name'] == cloudName) {
+ cloudId = environment['uuid']
+ break
+ }
+ }
+ // Cloud wasn't presented, let's create it
+ if (! cloudId ) {
+ // Create cloud
+ requestData = [:]
+ requestData['name'] = cloudName
+ common.infoMsg("Making POST to ${worpApi.url}/environment/ with ${requestData}")
+ cloudId = http.restPost(worpApi, "/environment/", requestData)['env']['uuid']
+
+ // And the node
+ // It was done here to reduce count of requests to the api.
+ // Because if there was not cloud presented on the dashboard, then the node was not presented as well.
+ requestData = [:]
+ requestData['nodes'] = [nodeName]
+ common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+ nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+ }
+
+ if (! nodeId ) {
+ // Get available nodes in our environment
+ common.infoMsg("Making GET to ${worpApi.url}/environment/${cloudId}/nodes/")
+ nodes = http.restGet(worpApi, "/environment/${cloudId}/nodes/")
+ for (node in nodes) {
+ if (node['name'] == nodeName) {
+ nodeId = node['uuid']
+ break
+ }
+ }
+ }
+
+ // Node wasn't presented, let's create it
+ if (! nodeId ) {
+ // Create node
+ requestData = [:]
+ requestData['nodes'] = [nodeName]
+ common.infoMsg("Making PUT to ${worpApi.url}/environment/${cloudId}/nodes/ with ${requestData}")
+ nodeId = http.restCall(worpApi, "/environment/${cloudId}/nodes/", "PUT", requestData)['uuid']
+ }
+
+ // Create report if needed
+ if (! reportId ) {
+ requestData = [:]
+ requestData['env_uuid'] = cloudId
+ common.infoMsg("Making POST to ${worpApi.url}/reports/${reportType}/ with ${requestData}")
+ reportId = http.restPost(worpApi, "/reports/${reportType}/", requestData)['report']['uuid']
+ }
+
+ // Upload results
+ // NOTE(pas-ha) results should already be a dict with 'results' key
+ requestData = common.parseJSON(results)
+ requestData['node_name'] = nodeName
+ common.infoMsg("First result in results to PUT is ${requestData['results'][0]}")
+ // NOTE(pas-ha) not logging whole results to be sent, is too large and just spams the logs
+ common.infoMsg("Making PUT to ${worpApi.url}/reports/${reportType}/${reportId}/ with node name ${requestData['node_name']} and results")
+ http.restCall(worpApi, "/reports/${reportType}/${reportId}/", "PUT", requestData)
+ return reportId
+}
+
+
+node('python') {
+ def salt = new com.mirantis.mk.Salt()
+ def python = new com.mirantis.mk.Python()
+ def common = new com.mirantis.mk.Common()
+ def http = new com.mirantis.mk.Http()
+ def validate = new com.mirantis.mcp.Validate()
+
+ def pepperEnv = 'pepperEnv'
+
+ def benchmarkType = OPENSCAP_TEST_TYPE ?: 'xccdf'
+ def reportType
+ def benchmarksDir
+
+ switch (benchmarkType) {
+ case 'xccdf':
+ reportType = 'openscap';
+ benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/';
+ break;
+ case 'oval':
+ reportType = 'cve';
+ benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/oval-definitions/mirantis/';
+ break;
+ default:
+ throw new Exception('Unsupported value for OPENSCAP_TEST_TYPE, must be "oval" or "xccdf".')
+ }
+ // XCCDF related variables
+ def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+ def xccdfVersion = XCCDF_VERSION ?: '1.2'
+ def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+ def targetServers = TARGET_SERVERS ?: '*'
+
+ // To have an ability to work in heavy concurrency conditions
+ def scanUUID = UUID.randomUUID().toString()
+
+ def artifactsArchiveName = "openscap-${scanUUID}.zip"
+ def resultsBaseDir = "/var/log/openscap/${scanUUID}"
+ def artifactsDir = "openscap"
+
+ def liveMinions
+
+
+ stage ('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage ('Run openscap evaluation and attempt to upload the results to a dashboard') {
+ liveMinions = salt.getMinions(pepperEnv, targetServers)
+
+ if (liveMinions.isEmpty()) {
+ throw new Exception('There are no alive minions')
+ }
+
+ common.infoMsg("Scan UUID: ${scanUUID}")
+
+ // Clean all results before proceeding with results from every minion
+ dir(artifactsDir) {
+ deleteDir()
+ }
+
+ def reportId
+ def lastError
+ // Iterate oscap evaluation over the benchmarks
+ for (benchmark in benchmarksAndProfilesArray) {
+ def (benchmarkFilePath, profileName) = benchmark.tokenize(',').collect({it.trim()})
+
+ // Remove extension from the benchmark name
+ def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+
+ // Get benchmark name
+ def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+
+ // And build resultsDir based on this path
+ def resultsDir = "${resultsBaseDir}/${benchmarkName}"
+ if (profileName) {
+ resultsDir = "${resultsDir}/${profileName}"
+ }
+
+ def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+
+ // Evaluate the benchmark on all minions at once
+ salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
+ benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
+ "profile=${profileName}", "xccdf_version=${xccdfVersion}",
+ "tailoring_id=${xccdfTailoringId}"
+ ])
+
+ salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
+
+ // fetch and store results one by one
+ for (minion in liveMinions) {
+ def nodeShortName = minion.tokenize('.')[0]
+ def localResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}"
+
+ fileContentBase64 = validate.getFileContentEncoded(pepperEnv, minion, "/tmp/${scanUUID}.tar.xz")
+ writeFile file: "${scanUUID}.base64", text: fileContentBase64
+
+ sh "mkdir -p ${localResultsDir}"
+ sh "base64 -d ${scanUUID}.base64 | tar -xJ --strip-components 1 --directory ${localResultsDir}"
+ sh "rm -f ${scanUUID}.base64"
+ }
+
+ // Remove archives which is not needed anymore
+ salt.runSaltProcessStep(pepperEnv, targetServers, 'file.remove', "/tmp/${scanUUID}.tar.xz")
+
+ // publish results one by one
+ for (minion in liveMinions) {
+ def nodeShortName = minion.tokenize('.')[0]
+ def benchmarkResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}/${benchmarkName}"
+ if (profileName) {
+ benchmarkResultsDir = "${benchmarkResultsDir}/${profileName}"
+ }
+
+ // Attempt to upload the scanning results to the dashboard
+ if (UPLOAD_TO_DASHBOARD.toBoolean()) {
+ if (common.validInputParam('DASHBOARD_API_URL')) {
+ def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
+ try {
+ def nodeResults = readFile "${benchmarkResultsDir}/results.json"
+ reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, reportType, reportId, nodeResults)
+ common.infoMsg("Report ID is ${reportId}.")
+ } catch (Exception e) {
+ lastError = e
+ }
+ } else {
+ throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
+ }
+ }
+ }
+ }
+
+ // Prepare archive
+ sh "tar -cJf ${artifactsDir}.tar.xz ${artifactsDir}"
+
+ // Archive the build output artifacts
+ archiveArtifacts artifacts: "*.xz"
+ if (lastError) {
+ common.infoMsg('Uploading some results to the dashboard report ${reportId} failed. Raising last error.')
+ throw lastError
+ }
+ }
+
+/* // Will be implemented later
+ stage ('Attempt to upload results to an artifactory') {
+ if (common.validInputParam('ARTIFACTORY_URL')) {
+ for (minion in liveMinions) {
+ def destDir = "${artifactsDir}/${minion}"
+ def archiveName = "openscap-${scanUUID}.tar.gz"
+ def tempArchive = "/tmp/${archiveName}"
+ def destination = "${destDir}/${archiveName}"
+
+ dir(destDir) {
+ // Archive scanning results on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'archive.tar', ['czf', tempArchive, resultsBaseDir])
+
+ // Get it content and save it
+ writeFile file: destination, text: salt.getFileContent(pepperEnv, minion, tempArchive)
+
+ // Remove scanning results and the temp archive on the remote target
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', resultsBaseDir)
+ salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', tempArchive)
+ }
+ }
+
+ def artifactory = new com.mirantis.mcp.MCPArtifactory()
+ def artifactoryName = 'mcp-ci'
+ def artifactoryRepo = ARTIFACTORY_REPO ?: 'binary-dev-local'
+ def artifactoryNamespace = ARTIFACTORY_NAMESPACE ?: 'mirantis/openscap'
+ def artifactoryServer = Artifactory.server(artifactoryName)
+ def publishInfo = true
+ def buildInfo = Artifactory.newBuildInfo()
+ def zipName = "${env.WORKSPACE}/openscap/${scanUUID}/results.zip"
+
+ // Zip scan results
+ zip zipFile: zipName, archive: false, dir: artifactsDir
+
+ // Mandatory and additional properties
+ def properties = artifactory.getBinaryBuildProperties([
+ "scanUuid=${scanUUID}",
+ "project=openscap"
+ ])
+
+ // Build Artifactory spec object
+ def uploadSpec = """{
+ "files":
+ [
+ {
+ "pattern": "${zipName}",
+ "target": "${artifactoryRepo}/${artifactoryNamespace}/openscap",
+ "props": "${properties}"
+ }
+ ]
+ }"""
+
+ // Upload artifacts to the given Artifactory
+ artifactory.uploadBinariesToArtifactory(artifactoryServer, buildInfo, uploadSpec, publishInfo)
+
+ } else {
+ common.warningMsg('ARTIFACTORY_URL was not given, skip uploading to artifactory')
+ }
+ }
+*/
+
+}
diff --git a/test-openstack-component-pipeline.groovy b/test-openstack-component-pipeline.groovy
index c660c28..5f1730f 100644
--- a/test-openstack-component-pipeline.groovy
+++ b/test-openstack-component-pipeline.groovy
@@ -4,9 +4,6 @@
* Flow parameters:
* CREDENTIALS_ID
- * EXTRA_FORMULAS
- * FORMULAS_REVISION
- * FORMULAS_SOURCE
* SALT_OPTS
* STACK_DEPLOY_JOB
diff --git a/test-reclass-package.groovy b/test-reclass-package.groovy
new file mode 100644
index 0000000..109d986
--- /dev/null
+++ b/test-reclass-package.groovy
@@ -0,0 +1,45 @@
+/**
+ * Check new Reclass version against current model.
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * DISTRIB_REVISION Mirror version to use
+ * EXTRA_REPO_PREDEFINED Use mcp extra repo defined on host
+ * EXTRA_REPO Extra repo to use in format (for example, deb [arch=amd64] http://apt.mirantis.com/xenial/ nightly extra)
+ * EXTRA_REPO_GPG_KEY_URL GPG key URL for extra repo
+ * TARGET_NODES Target specification, e.g. 'I@openssh:server'
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def saltModel = new com.mirantis.mk.SaltModelTesting()
+def python = new com.mirantis.mk.Python()
+
+def env = "env"
+def extraRepo = env.EXTRA_REPO
+def extraRepoKey = env.EXTRA_REPO_GPG_KEY_URL
+def targetNodes = env.TARGET_NODES
+def distribRevision = env.DISTRIB_REVISION
+def usePredefinedExtra = env.EXTRA_REPO_PREDEFINED
+node('cfg') {
+
+ stage('Setup virtualenv for Pepper') {
+ python.setupPepperVirtualenv(env, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ def minions = salt.getMinionsSorted(env, targetNodes)
+ if (usePredefinedExtra) {
+ def mcp_extra = salt.getPillar(env, 'I@salt:master', "linux:system:repo:mcp_extra").get("return")[0].values()[0]
+ extraRepoKey = mcp_extra['key_url']
+ extraRepo = mcp_extra['source']
+ }
+ def config = [
+ 'distribRevision': distribRevision,
+ 'targetNodes': minions,
+ 'extraRepo': extraRepo,
+ 'extraRepoKey': extraRepoKey,
+ 'venv': env
+ ]
+ saltModel.compareReclassVersions(config)
+}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index be9c894..e2dbf83 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -64,8 +64,8 @@
common.infoMsg("Running part of kitchen test")
if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
- sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
- sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
def suite = ruby.getSuiteName(KITCHEN_ENV)
if (suite && suite != "") {
common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index ed525bd..27e0909 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -4,13 +4,11 @@
* DEFAULT_GIT_REF
* DEFAULT_GIT_URL
* CREDENTIALS_ID
- * EXTRA_FORMULAS
* CLUSTER_NAME
* NODE_TARGET
* SYSTEM_GIT_URL
* SYSTEM_GIT_REF
- * FORMULAS_SOURCE
- * RECLASS_VERSION
+ * DISTRIB_REVISION of apt mirrror to be used (http://mirror.mirantis.com/DISTRIB_REVISION/ by default)
* MAX_CPU_PER_JOB
* LEGACY_TEST_MODE
* RECLASS_IGNORE_CLASS_NOTFOUND
@@ -24,16 +22,12 @@
def ssh = new com.mirantis.mk.Ssh()
def saltModelTesting = new com.mirantis.mk.SaltModelTesting()
-def defaultGitRef = DEFAULT_GIT_REF
-def defaultGitUrl = DEFAULT_GIT_URL
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
+def distribRevision = env.DISTRIB_REVISION ?: 'nightly'
def checkouted = false
-def reclassVersion = 'v1.5.4'
-if (common.validInputParam('RECLASS_VERSION')) {
- reclassVersion = RECLASS_VERSION
-}
-
throttle(['test-model']) {
timeout(time: 1, unit: 'HOURS') {
node("python&&docker") {
@@ -67,38 +61,22 @@
stage("test node") {
if (checkouted) {
def workspace = common.getWorkspace()
- def testResult = false
common.infoMsg("Running salt model test for node ${NODE_TARGET} in cluster ${CLUSTER_NAME}")
- try {
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- testResult = saltModelTesting.setupAndTestNode(
- NODE_TARGET,
- CLUSTER_NAME,
- EXTRA_FORMULAS,
- workspace,
- FORMULAS_SOURCE,
- FORMULAS_REVISION,
- reclassVersion,
- MAX_CPU_PER_JOB.toInteger(),
- RECLASS_IGNORE_CLASS_NOTFOUND,
- LEGACY_TEST_MODE,
- APT_REPOSITORY,
- APT_REPOSITORY_GPG,
- DockerCName)
- } catch (Exception e) {
- if (e.getMessage() == "script returned exit code 124") {
- common.errorMsg("Impossible to test node due to timeout of salt-master, ABORTING BUILD")
- currentBuild.result = "ABORTED"
- } else {
- throw e
- }
- }
- if (testResult) {
- common.infoMsg("Test finished: SUCCESS")
- } else {
- error('Test node finished: FAILURE')
- throw new RuntimeException('Test node stage finished: FAILURE')
- }
+
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ def config = [
+ 'dockerHostname': NODE_TARGET,
+ 'clusterName': CLUSTER_NAME,
+ 'reclassEnv': workspace,
+ 'distribRevision': distribRevision,
+ 'dockerMaxCpus': MAX_CPU_PER_JOB.toInteger(),
+ 'ignoreClassNotfound': RECLASS_IGNORE_CLASS_NOTFOUND,
+ 'aptRepoUrl': APT_REPOSITORY,
+ 'aptRepoGPG': APT_REPOSITORY_GPG,
+ 'dockerContainerName': DockerCName,
+ 'testContext': 'salt-model-node'
+ ]
+ saltModelTesting.testNode(config)
}
}
} catch (Throwable e) {
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 6a37ac7..3b88aee 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -3,95 +3,57 @@
* DEFAULT_GIT_URL default git url (will be used if pipeline run is not triggered by gerrit)
* DEFAULT_GIT_RED default git ref (branch,tag,...) (will be used if pipeline run is not triggered by gerrit)
* CREDENTIALS_ID Jenkins credetials id for git checkout
- * EXTRA_FORMULAS extra formulas list for passing to salt bootstrap script
* MAX_CPU_PER_JOB max cpu count for one docket test instance
* SYSTEM_GIT_URL reclass system git URL (optional)
* SYSTEM_GIT_REF reclass system git URL (optional)
* TEST_CLUSTER_NAMES list of comma separated cluster names to test (optional, default all cluster levels)
* LEGACY_TEST_MODE legacy test mode flag
* RECLASS_IGNORE_CLASS_NOTFOUND ignore missing class flag for reclass config
- * RECLASS_VERSION Version of reclass to be used (branch, ...)
+ * DISTRIB_REVISION of apt mirrror to be used (http://mirror.mirantis.com/DISTRIB_REVISION/ by default)
* APT_REPOSITORY extra apt repository url
* APT_REPOSITORY_GPG extra apt repository url GPG
*/
def gerrit = new com.mirantis.mk.Gerrit()
+common = new com.mirantis.mk.Common()
def ssh = new com.mirantis.mk.Ssh()
def git = new com.mirantis.mk.Git()
-def config_node_name_pattern
-try {
- config_node_name_pattern = CONFIG_NODE_NAME_PATTERN
-} catch (MissingPropertyException e) {
- config_node_name_pattern = "cfg01"
-}
+def config_node_name_pattern = env.CONFIG_NODE_NAME_PATTERN ?: 'cfg01'
+def gerritRef = env.GERRIT_REFSPEC ?: null
+def formulasSource = env.FORMULAS_SOURCE ?: 'pkg'
+distribRevision = env.DISTRIB_REVISION ?: 'nightly'
-def gerritRef
-try {
- gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
- gerritRef = null
-}
-
-def formulasSource
-try {
- formulasSource = FORMULAS_SOURCE
-} catch (MissingPropertyException e) {
- formulasSource = "pkg"
-}
-
-def testClusterNames
-try {
- testClusterNames = TEST_CLUSTER_NAMES
-} catch (MissingPropertyException e) {
- testClusterNames = ""
-}
-
-def defaultGitRef, defaultGitUrl
-try {
- defaultGitRef = DEFAULT_GIT_REF
- defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
- defaultGitRef = null
- defaultGitUrl = null
-}
+def testClusterNames = env.TEST_CLUSTER_NAMES ?: ''
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
def checkouted = false
futureNodes = []
failedNodes = false
-common = new com.mirantis.mk.Common()
+
def setupRunner() {
-
- def branches = [:]
- for (int i = 0; i < PARALLEL_NODE_GROUP_SIZE.toInteger() && i < futureNodes.size(); i++) {
- branches["Runner ${i}"] = {
- while (futureNodes && !failedNodes) {
- def currentNode = futureNodes[0] ? futureNodes[0] : null
+ def branches = [:]
+ branches.failFast = true
+ for(int i = 0; i < futureNodes.size(); i++) {
+ def currentNode = futureNodes[i] ? futureNodes[i] : null
if (!currentNode) {
- continue
+ continue
}
-
- def clusterName = currentNode[2]
- futureNodes.remove(currentNode)
- try {
- triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
- } catch (Exception e) {
- if (e.getMessage().contains("completed with status ABORTED")) {
- common.warningMsg("Test of ${clusterName} failed because the test was aborted : ${e}")
- futureNodes << currentNode
- } else {
- common.warningMsg("Test of ${clusterName} failed : ${e}")
- failedNodes = true
- }
+ branches["Runner ${i}"] = {
+ try {
+ triggerTestNodeJob(currentNode[0], currentNode[1], currentNode[2], currentNode[3], currentNode[4])
+ } catch (Exception e) {
+ common.warningMsg("Test of ${currentNode[2]} failed : ${e}")
+ throw e
+ }
}
- }
}
- }
- if (branches) {
- parallel branches
- }
+ if (branches) {
+ common.runParallel(branches, PARALLEL_NODE_GROUP_SIZE.toInteger())
+ }
}
def triggerTestNodeJob(defaultGitUrl, defaultGitRef, clusterName, testTarget, formulasSource) {
@@ -102,11 +64,9 @@
[$class: 'StringParameterValue', name: 'CLUSTER_NAME', value: clusterName],
[$class: 'StringParameterValue', name: 'NODE_TARGET', value: testTarget],
[$class: 'StringParameterValue', name: 'FORMULAS_SOURCE', value: formulasSource],
- [$class: 'StringParameterValue', name: 'EXTRA_FORMULAS', value: EXTRA_FORMULAS],
- [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: FORMULAS_REVISION],
[$class: 'StringParameterValue', name: 'CREDENTIALS_ID', value: CREDENTIALS_ID],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: SYSTEM_GIT_URL],
- [$class: 'StringParameterValue', name: 'RECLASS_VERSION', value: RECLASS_VERSION],
+ [$class: 'StringParameterValue', name: 'DISTRIB_REVISION', value: distribRevision],
[$class: 'StringParameterValue', name: 'MAX_CPU_PER_JOB', value: MAX_CPU_PER_JOB],
[$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: SYSTEM_GIT_REF],
[$class: 'BooleanParameterValue', name: 'LEGACY_TEST_MODE', value: LEGACY_TEST_MODE.toBoolean()],
diff --git a/test-service.groovy b/test-service.groovy
deleted file mode 100644
index f9c34e3..0000000
--- a/test-service.groovy
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- *
- * Service test pipeline
- *
- * Expected parameters:
- * SALT_MASTER_URL URL of Salt master
- * SALT_MASTER_CREDENTIALS Credentials to the Salt API
- * Test settings:
- * TEST_SERVICE Comma separated list of services to test
- * TEST_K8S_API_SERVER Kubernetes API address
- * TEST_K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
- * TEST_DOCKER_INSTALL Install docker on the target if true
- * TEST_TEMPEST_IMAGE Tempest image link
- * TEST_TEMPEST_PATTERN If not false, run tests matched to pattern only
- * TEST_TEMPEST_TARGET Salt target for tempest node
- *
- */
-
-common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
-salt = new com.mirantis.mk.Salt()
-test = new com.mirantis.mk.Test()
-def python = new com.mirantis.mk.Python()
-
-def pepperEnv = "pepperEnv"
-timeout(time: 12, unit: 'HOURS') {
- node("python") {
- try {
-
- stage('Setup virtualenv for Pepper') {
- python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
- }
-
- //
- // Test
- //
- def artifacts_dir = '_artifacts/'
-
- if (common.checkContains('TEST_SERVICE', 'k8s')) {
- stage('Run k8s bootstrap tests') {
- def image = 'tomkukral/k8s-scripts'
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
-
- stage('Run k8s conformance e2e tests') {
- def image = K8S_CONFORMANCE_IMAGE
- def output_file = image.replaceAll('/', '-') + '.output'
-
- // run image
- test.runConformanceTests(pepperEnv, 'ctl01*', TEST_K8S_API_SERVER, image)
-
- // collect output
- sh "mkdir -p ${artifacts_dir}"
- file_content = salt.getFileContent(pepperEnv, 'ctl01*', '/tmp/' + output_file)
- writeFile file: "${artifacts_dir}${output_file}", text: file_content
- sh "cat ${artifacts_dir}${output_file}"
-
- // collect artifacts
- archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
- }
- }
-
- if (common.checkContains('TEST_SERVICE', 'openstack')) {
- if (common.checkContains('TEST_DOCKER_INSTALL', 'true')) {
- test.install_docker(pepperEnv, TEST_TEMPEST_TARGET)
- }
-
- stage('Run OpenStack tests') {
- test.runTempestTests(pepperEnv, TEST_TEMPEST_IMAGE, TEST_TEMPEST_TARGET, TEST_TEMPEST_PATTERN)
- }
-
- writeFile(file: 'report.xml', text: salt.getFileContent(pepperEnv, TEST_TEMPEST_TARGET, '/root/report.xml'))
- junit(keepLongStdio: true, testResults: 'report.xml', healthScaleFactor: Double.parseDouble(TEST_JUNIT_RATIO))
- def testResults = test.collectJUnitResults(currentBuild.rawBuild.getAction(hudson.tasks.test.AbstractTestResultAction.class))
- if(testResults){
- currentBuild.desc = String.format("result: %s", testResults["failed"] / testResults["total"])
- }
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- throw e
- }
- }
-}
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index fa16739..04eafeb 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -1,103 +1,103 @@
def gerrit = new com.mirantis.mk.Gerrit()
def common = new com.mirantis.mk.Common()
-def gerritCredentials
-try {
- gerritCredentials = CREDENTIALS_ID
-} catch (MissingPropertyException e) {
- gerritCredentials = "gerrit"
+// extraVarsYaml contains GERRIT_ vars from gate job
+// or will contain GERRIT_ vars from reclass-system patch
+def extraVarsYaml = env.EXTRA_VARIABLES_YAML ?: ''
+if (extraVarsYaml != '') {
+ common.mergeEnv(env, extraVarsYaml)
+} else {
+ extraVarsYaml = '\n---'
+ for (envVar in env.getEnvironment()) {
+ if (envVar.key.startsWith("GERRIT_")) {
+ extraVarsYaml += "\n${envVar.key}: '${envVar.value}'"
+ }
+ }
}
-def gerritRef
-try {
- gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
- gerritRef = null
-}
+def slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+def gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
-def defaultGitRef, defaultGitUrl
-try {
- defaultGitRef = DEFAULT_GIT_REF
- defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
- defaultGitRef = null
- defaultGitUrl = null
-}
+def gerritRef = env.GERRIT_REFSPEC ?: null
+def defaultGitRef = env.DEFAULT_GIT_REF ?: null
+def defaultGitUrl = env.DEFAULT_GIT_URL ?: null
+
def checkouted = false
def merged = false
def systemRefspec = "HEAD"
-def formulasRevision = 'testing'
+
timeout(time: 12, unit: 'HOURS') {
- node() {
- try {
- stage("Checkout") {
- if (gerritRef) {
- // job is triggered by Gerrit
- // test if change aren't already merged
- def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
- merged = gerritChange.status == "MERGED"
- if(!merged){
- checkouted = gerrit.gerritPatchsetCheckout ([
- credentialsId : gerritCredentials
- ])
- systemRefspec = GERRIT_REFSPEC
- }
- // change defaultGit variables if job triggered from Gerrit
- defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
- } else if(defaultGitRef && defaultGitUrl) {
- checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
+ node(slaveNode) {
+ try {
+ stage("Checkout") {
+ if (gerritRef) {
+ // job is triggered by Gerrit
+ // test if change aren't already merged
+ def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, gerritCredentials)
+ merged = gerritChange.status == "MERGED"
+ if (!merged) {
+ checkouted = gerrit.gerritPatchsetCheckout([
+ credentialsId: gerritCredentials
+ ])
+ systemRefspec = GERRIT_REFSPEC
+ }
+ // change defaultGit variables if job triggered from Gerrit
+ defaultGitUrl = "${GERRIT_SCHEME}://${GERRIT_NAME}@${GERRIT_HOST}:${GERRIT_PORT}/${GERRIT_PROJECT}"
+ } else if (defaultGitRef && defaultGitUrl) {
+ checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", gerritCredentials)
+ }
+ }
+
+ stage("Test") {
+ if (merged) {
+ common.successMsg("Gerrit change is already merged, no need to test them")
+ } else {
+ if (checkouted) {
+
+ def documentationOnly = false
+ if (gerritRef) {
+ documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
+ }
+
+ sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
+
+ def branches = [:]
+ def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
+ if (['master'].contains(env.GERRIT_BRANCH)) {
+ for (int i = 0; i < testModels.size(); i++) {
+ def cluster = testModels[i]
+ def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+ branches["${cluster}"] = {
+ build job: "test-salt-model-${cluster}", parameters: [
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+ [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+ [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+ [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+ ]
+ }
+ }
+ } else {
+ common.warningMsg("Tests for ${testModels} skipped!")
+ }
+ branches["cookiecutter"] = {
+ build job: "test-mk-cookiecutter-templates", parameters: [
+ [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_URL', value: defaultGitUrl],
+ [$class: 'StringParameterValue', name: 'RECLASS_SYSTEM_GIT_REF', value: systemRefspec],
+ [$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML', value: extraVarsYaml ]
+ ]
+ }
+ parallel branches
+ } else {
+ error("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
+ }
+ }
+ }
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ common.sendNotification(currentBuild.result, "", ["slack"])
}
- }
-
- stage("Test") {
- if(merged){
- common.successMsg("Gerrit change is already merged, no need to test them")
- }else{
- if(checkouted){
-
- def documentationOnly = false
- if (gerritRef) {
- documentationOnly = sh(script: "git diff-tree --no-commit-id --name-only -r HEAD | grep -v .releasenotes", returnStatus: true) == 1
- }
-
- sh("git diff-tree --no-commit-id --diff-filter=d --name-only -r HEAD | grep .yml | xargs -I {} python -c \"import yaml; yaml.load(open('{}', 'r'))\" \\;")
-
- def branches = [:]
- def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
- for (int i = 0; i < testModels.size(); i++) {
- def cluster = testModels[i]
- def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
- branches["${cluster}"] = {
- build job: "test-salt-model-${cluster}", parameters: [
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
- [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec],
- [$class: 'StringParameterValue', name: 'FORMULAS_REVISION', value: formulasRevision],
- ]
- }
- }
- branches["cookiecutter"] = {
- build job: "test-mk-cookiecutter-templates", parameters: [
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
- [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec],
- [$class: 'StringParameterValue', name: 'DISTRIB_REVISION', value: formulasRevision]
-
- ]
- }
- parallel branches
- }else{
- throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
- }
- }
- }
- } catch (Throwable e) {
- // If there was an error or exception thrown, the build failed
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- common.sendNotification(currentBuild.result,"",["slack"])
}
- }
}
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index d5c0e77..8c4d907 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -35,7 +35,7 @@
salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
}catch(Exception ex){}
- common.retry(10, 30){
+ common.retry(20, 60){
salt.minionsReachable(venvPepper, 'I@salt:master', '*')
def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
for(value in running.get("return")[0].values()){
@@ -95,8 +95,8 @@
def dateTime = common.getDatetime()
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
common.infoMsg("The following changes were made to the cluster model and will be commited. Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
- salt.cmdRun(venvPepper, 'I@salt.master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && git add -u && git commit -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && git add -u && git commit --allow-empty -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
}
try{
@@ -106,13 +106,19 @@
error("You have unstaged changes in your Reclass system model repository. Please reset them and rerun the pipeline.")
}
salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
+ // Add new defaults
+ common.infoMsg("Add new defaults")
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/*/infra/init.yml || " +
+ "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/*/infra/init.yml")
+ salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
}
if(UPDATE_LOCAL_REPOS.toBoolean()){
+ def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update local repos"){
common.infoMsg("Updating local repositories")
- def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
+ def engine = salt.getPillar(venvPepper, 'I@aptly:publisher', "aptly:publisher:source:engine")
runningOnDocker = engine.get("return")[0].containsValue("docker")
if (runningOnDocker) {
@@ -122,39 +128,41 @@
common.infoMsg("Aptly isn't running as Docker container. Going to use aptly user for executing aptly commands")
}
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name/cicd/aptly && git checkout $MCP_VERSION")
-
if(runningOnDocker){
- salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
}
else{
- salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
}
- salt.enforceState(venvPepper, 'I@aptly:server', 'aptly', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'aptly', true)
if(runningOnDocker){
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
}
else{
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-afrv", 'runas=aptly'], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-afrv", 'runas=aptly'], null, true)
}
- salt.enforceState(venvPepper, 'I@aptly:server', 'docker.client.registry', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'docker.client.registry', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'debmirror', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'debmirror', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'git.server', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'git.server', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'linux.system.file', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'linux.system.file', true)
}
}
stage("Update Drivetrain"){
salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$MCP_VERSION/4' /etc/apt/sources.list.d/mcp_salt.list")
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_salt.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
+ // Workaround for PROD-22108
+ salt.cmdRun(venvPepper, 'I@salt:master', "apt-get purge -y salt-formula-octavia && " +
+ "apt-get install -y salt-formula-octavia")
+ // End workaround for PROD-22108
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades salt-formula-*")
def inventoryBeforeFilename = "reclass-inventory-before.out"
diff --git a/validate-cloud.groovy b/validate-cloud.groovy
index 3c27dce..fa9a7a6 100644
--- a/validate-cloud.groovy
+++ b/validate-cloud.groovy
@@ -16,6 +16,7 @@
* RUN_TEMPEST_TESTS If not false, run Tempest tests
* RUN_RALLY_TESTS If not false, run Rally tests
* K8S_RALLY If not false, run Kubernetes Rally tests
+ * STACKLIGHT_RALLY If not false, run additional Stacklight tests
* RUN_K8S_TESTS If not false, run Kubernetes e2e/conformance tests
* RUN_SPT_TESTS If not false, run SPT tests
* SPT_SSH_USER The name of the user which should be used for ssh to nodes
@@ -31,6 +32,7 @@
* RALLY_CONFIG_REPO Git repository with files for Rally
* RALLY_CONFIG_BRANCH Git branch which will be used during the checkout
* RALLY_SCENARIOS Path to file or directory with rally scenarios
+ * RALLY_SL_SCENARIOS Path to file or directory with stacklight rally scenarios
* RALLY_TASK_ARGS_FILE Path to file with rally tests arguments
* REPORT_DIR Path for reports outside docker image
* TEST_K8S_API_SERVER Kubernetes API address
@@ -81,20 +83,21 @@
stage('Run Rally tests') {
if (RUN_RALLY_TESTS.toBoolean() == true) {
def report_dir = env.REPORT_DIR ?: '/root/qa_results'
- def platform
- def rally_variables
+ def platform = ["type":"unknown", "stacklight_enabled":false]
+ def rally_variables = []
if (K8S_RALLY.toBoolean() == false) {
- platform = 'openstack'
+ platform['type'] = 'openstack'
rally_variables = ["floating_network=${FLOATING_NETWORK}",
"rally_image=${RALLY_IMAGE}",
"rally_flavor=${RALLY_FLAVOR}",
"availability_zone=${AVAILABILITY_ZONE}"]
} else {
- platform = 'k8s'
- rally_variables = ["plugins_repo":"${RALLY_PLUGINS_REPO}",
- "plugins_branch":"${RALLY_PLUGINS_BRANCH}"]
+ platform['type'] = 'k8s'
}
- validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
+ if (STACKLIGHT_RALLY.toBoolean() == true) {
+ platform['stacklight_enabled'] = true
+ }
+ validate.runRallyTests(pepperEnv, TARGET_NODE, TEST_IMAGE, platform, artifacts_dir, RALLY_CONFIG_REPO, RALLY_CONFIG_BRANCH, RALLY_PLUGINS_REPO, RALLY_PLUGINS_BRANCH, RALLY_SCENARIOS, RALLY_SL_SCENARIOS, RALLY_TASK_ARGS_FILE, rally_variables, report_dir, SKIP_LIST)
} else {
common.infoMsg("Skipping Rally tests")
}
diff --git a/xtrabackup-restore-mysql-db.groovy b/xtrabackup-restore-mysql-db.groovy
index da3d463..b1d4a4e 100644
--- a/xtrabackup-restore-mysql-db.groovy
+++ b/xtrabackup-restore-mysql-db.groovy
@@ -67,7 +67,7 @@
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
print(backup_dir)
salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.cmdRun(pepperEnv, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
+ salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'state.apply', ["xtrabackup.client.restore"], null, true)
salt.runSaltProcessStep(pepperEnv, 'I@galera:master', 'service.start', ['mysql'], null, true)
// wait until mysql service on galera master is up