Merge "Add health check state into opencontrail4.0 upgrade pipeline"
diff --git a/branch-git-repos.groovy b/branch-git-repos.groovy
index 0624c40..b3bb3a2 100644
--- a/branch-git-repos.groovy
+++ b/branch-git-repos.groovy
@@ -81,17 +81,10 @@
gitSrcObj = gitSrcObj.replace('SUBS_SOURCE_REF', srcObj)
}
- // Remove preifix `origin/` from gitSrcObj
- java.util.regex.Pattern reOrigin = ~'^origin/'
- gitSrcObj = gitSrcObj - reOrigin
-
checkout([
$class: 'GitSCM',
- branches: [
- [name: 'FETCH_HEAD'],
- ],
userRemoteConfigs: [
- [url: gitRepoUrl, refspec: gitSrcObj, credentialsId: gitCredentialsId],
+ [url: gitRepoUrl, credentialsId: gitCredentialsId],
],
extensions: [
[$class: 'PruneStaleBranch'],
@@ -110,15 +103,30 @@
sh 'git config user.email "ci+infra@mirantis.com"'
// Update list of branches
- sh 'git remote update origin --prune'
+ sh 'git checkout master'
+
+ int is_branch = sh(script: "git ls-remote --exit-code --heads origin ${gitBranchNew}", returnStatus: true)
+ int is_tag = sh(script: "git ls-remote --exit-code --tags origin ${gitBranchNew}", returnStatus: true)
// Ensure there is no branch or tag with gitBranchNew name
- sh "git branch -d '${gitBranchNew}' && git push origin ':${gitBranchNew}' || :"
- sh "git tag -d '${gitBranchNew}' && git push origin ':refs/tags/${gitBranchNew}' || :"
+ if (is_branch == 0) {
+ sh """\
+ git checkout 'origin/${gitBranchNew}' -t || :
+ git checkout master
+ git branch -d '${gitBranchNew}'
+ git push origin ':refs/heads/${gitBranchNew}'
+ """.stripIndent()
+ }
+ if (is_tag == 0) {
+ sh """\
+ git tag -d '${gitBranchNew}'
+ git push origin ':refs/tags/${gitBranchNew}'
+ """
+ }
// Create new branch
- sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
- sh "git push origin '${gitBranchNew}'" // ... push new branch
+ sh "git branch '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
+ sh "git push --force origin '${gitBranchNew}'" // ... push new branch
}
}
}
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index da3e177..8a7a90d 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -194,7 +194,7 @@
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
- salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://apt.mcp.mirantis.net:8081/api/version >/dev/null && break; done')
}
}
salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a51f436..1f7dd1f 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -143,6 +143,10 @@
}
currentBuild.description = STACK_NAME
}
+ } else {
+ // In case name was copied with unicode zero-width space chars -
+ // remove them
+ STACK_NAME = STACK_NAME.trim().replaceAll("\\p{C}", "")
}
// no underscore in STACK_NAME
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 9fdeaad..ab72f76 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -58,7 +58,7 @@
def command
def commandKwargs
-def wait = 10
+wait = 10
if (common.validInputParam('MINIONS_TEST_TIMEOUT') && MINIONS_TEST_TIMEOUT.isInteger()) {
wait = "${MINIONS_TEST_TIMEOUT}".toInteger()
}
@@ -846,7 +846,7 @@
timeout(time: 12, unit: 'HOURS') {
node() {
try {
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests before upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
@@ -1581,7 +1581,7 @@
// verification is already present in restore pipelines
}
- if(RUN_CVP_TESTS.toBoolean() == True){
+ if(RUN_CVP_TESTS.toBoolean() == true){
stage('Run CVP tests after upgrade.') {
build job: "cvp-sanity"
build job: "cvp-func"
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 414ab46..b33cda6 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -44,7 +44,7 @@
if (!keystone_creds) {
keystone_creds = validate._get_keystone_creds_v2(saltMaster)
}
- validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
+ validate.runContainer(saltMaster, TEMPEST_TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
}
diff --git a/deploy-try-mcp.groovy b/deploy-try-mcp.groovy
new file mode 100644
index 0000000..a19b970
--- /dev/null
+++ b/deploy-try-mcp.groovy
@@ -0,0 +1,119 @@
+/**
+ * Generate cookiecutter cluster by individual products
+ *
+ * Expected parameters:
+ * COOKIECUTTER_TEMPLATE_CONTEXT Context parameters for the template generation.
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ **/
+
+import static groovy.json.JsonOutput.toJson
+
+common = new com.mirantis.mk.Common()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+ssh = new com.mirantis.mk.Ssh()
+
+pepperEnv = "pepperEnv"
+
+slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+model_job = 0
+
+timeout(time: 2, unit: 'HOURS') {
+ node(slaveNode) {
+ try {
+ def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+ def clusterName = templateContext.default_context.cluster_name
+ def aioNodeHostname = templateContext.default_context.aio_node_hostname
+ def aioInternalAddress = templateContext.default_context.aio_internal_address
+ def drivetrainInternalAddress = templateContext.default_context.drivetrain_internal_address
+ def artifact_tar_file = "${clusterName}.tar.gz"
+ def masterIP = templateContext.default_context.drivetrain_external_address
+ if ( templateContext.default_context.get("docker_deployment", "False").toBoolean() ) {
+ masterIP = drivetrainInternalAddress
+ }
+ def masterUrl = "http://" + masterIP + ":6969"
+ def outputDirectory = env.WORKSPACE + "/"
+ def outputDestination = outputDirectory + artifact_tar_file
+ def outputCluster = outputDirectory + "/classes/cluster/" + clusterName
+ def rsyncLocation = templateContext.default_context.get("rsync_location", "/srv/salt/reclass/classes/cluster")
+ def rsyncCredentials = templateContext.default_context.get("rsync_credentials", "lab")
+ c = common.getSshCredentials(rsyncCredentials)
+ def rsyncSSHKey = c.getPrivateKey()
+ def rsyncUser = c.getUsername()
+ def rsyncKeyFile = outputDirectory + "rsync_key"
+ def rsyncPath = rsyncUser + "@" + masterIP + ":" + rsyncLocation
+ currentBuild.description = "Cluster " + clusterName + " on " + masterIP
+
+ stage("Generate AIO model") {
+ model_job = build(job: 'generate-salt-model-separated-products',
+ parameters: [
+ [$class: 'StringParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT', value: COOKIECUTTER_TEMPLATE_CONTEXT ],
+ [$class: 'BooleanParameterValue', name: 'TEST_MODEL', value: false],
+ ])
+ }
+
+ stage("Download artifact with model") {
+ artifact_tar_url = "${env.JENKINS_URL}/job/generate-salt-model-separated-products/${model_job.number}/artifact/output-${clusterName}/${artifact_tar_file}"
+ sh "wget --progress=dot:mega --auth-no-challenge -O ${outputDestination} '${artifact_tar_url}'"
+ sh "tar -xzvf ${outputDestination}"
+ }
+
+ stage("Send model to Salt master node") {
+ ssh.ensureKnownHosts(masterIP)
+ writeFile(file: rsyncKeyFile, text: rsyncSSHKey)
+ sh("chmod 600 ${rsyncKeyFile}")
+ common.infoMsg("Copying cluster model to ${rsyncPath}")
+ sh("rsync -r -e \"ssh -i ${rsyncKeyFile}\" ${outputCluster} ${rsyncPath}")
+ }
+
+ stage("Setup virtualenv for Pepper") {
+ python.setupPepperVirtualenv(pepperEnv, masterUrl, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage("Prepare AIO node"){
+ tgt = "S@" + aioInternalAddress
+ // Classify AIO node
+ eventData = [:]
+ eventData["node_control_ip"] = aioInternalAddress
+ eventData["node_os"] = "xenial"
+ eventData["node_master_ip"] = drivetrainInternalAddress
+ eventData["node_hostname"] = aioNodeHostname
+ eventData["node_cluster"] = clusterName
+ eventJson = toJson(eventData)
+ event = "salt-call event.send \"reclass/minion/classify\" \'" + eventJson + "\'"
+ salt.cmdRun(pepperEnv, tgt, event)
+ sleep(30)
+ // Upgrade Salt minion
+ salt.runSaltProcessStep(pepperEnv, tgt, 'pkg.install', "salt-minion")
+ sleep(10)
+ // Run core states on AIO node
+ salt.fullRefresh(pepperEnv, '*')
+ salt.enforceState(pepperEnv, tgt, 'linux')
+ salt.enforceState(pepperEnv, tgt, 'salt')
+ salt.enforceState(pepperEnv, tgt, 'openssh')
+ salt.enforceState(pepperEnv, tgt, 'ntp')
+ salt.enforceState(pepperEnv, tgt, 'rsyslog')
+ }
+
+ stage("Deploy Openstack") {
+ build(job: 'deploy_openstack',
+ parameters: [
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: masterUrl],
+ [$class: 'StringParameterValue', name: 'STACK_INSTALL', value: 'openstack']
+ ])
+ }
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ }
+ // common.sendNotification(currentBuild.result,"",["slack"])
+ }
+ }
+}
\ No newline at end of file
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 25473fb..fb28837 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -4,59 +4,54 @@
* Expected parameters:
* COOKIECUTTER_TEMPLATE_CONTEXT Context parameters for the template generation.
* EMAIL_ADDRESS Email to send a created tar file
- *
+ * CREDENTIALS_ID Credentials id for git
**/
common = new com.mirantis.mk.Common()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
saltModelTesting = new com.mirantis.mk.SaltModelTesting()
-ssh = new com.mirantis.mk.Ssh()
+
slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
timeout(time: 2, unit: 'HOURS') {
node(slaveNode) {
- def templateEnv = "${env.WORKSPACE}/template"
- def modelEnv = "${env.WORKSPACE}/model"
- def testEnv = "${env.WORKSPACE}/test"
- def pipelineEnv = "${env.WORKSPACE}/pipelines"
+ sshagent(credentials: [gerritCredentials]) {
+ def templateEnv = "${env.WORKSPACE}/template"
+ def modelEnv = "${env.WORKSPACE}/model"
+ def testEnv = "${env.WORKSPACE}/test"
+ def pipelineEnv = "${env.WORKSPACE}/pipelines"
- try {
- def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
- def mcpVersion = templateContext.default_context.mcp_version
- def sharedReclassUrl = templateContext.default_context.shared_reclass_url
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def saltMaster = templateContext.default_context.salt_master_hostname
- def cutterEnv = "${env.WORKSPACE}/cutter"
- def jinjaEnv = "${env.WORKSPACE}/jinja"
- def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
- def systemEnv = "${modelEnv}/classes/system"
- def targetBranch = "feature/${clusterName}"
- def templateBaseDir = "${env.WORKSPACE}/template"
- def templateDir = "${templateEnv}/template/dir"
- def templateOutputDir = templateBaseDir
- def user
- def testResult = false
- wrap([$class: 'BuildUser']) {
- user = env.BUILD_USER_ID
- }
+ try {
+ def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+ def mcpVersion = templateContext.default_context.mcp_version
+ def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+ def clusterDomain = templateContext.default_context.cluster_domain
+ def clusterName = templateContext.default_context.cluster_name
+ def saltMaster = templateContext.default_context.salt_master_hostname
+ def cutterEnv = "${env.WORKSPACE}/cutter"
+ def jinjaEnv = "${env.WORKSPACE}/jinja"
+ def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+ def systemEnv = "${modelEnv}/classes/system"
+ def targetBranch = "feature/${clusterName}"
+ def templateBaseDir = "${env.WORKSPACE}/template"
+ def templateDir = "${templateEnv}/template/dir"
+ def templateOutputDir = templateBaseDir
+ def user
+ def testResult = false
+ wrap([$class: 'BuildUser']) {
+ user = env.BUILD_USER_ID
+ }
+ currentBuild.description = clusterName
+ common.infoMsg("Using context:\n" + templateContext)
- currentBuild.description = clusterName
- print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
- stage('Download Cookiecutter template') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
- def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
- git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master')
- // Use refspec if exists first of all
- if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
- dir(templateEnv) {
- ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
- }
- } else {
+ stage('Download Cookiecutter template') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+ def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
// Use mcpVersion git tag if not specified branch for cookiecutter-templates
if (cookiecutterTemplateBranch == '') {
cookiecutterTemplateBranch = mcpVersion
@@ -65,24 +60,20 @@
cookiecutterTemplateBranch = 'master'
}
}
- git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: templateEnv]],
+ userRemoteConfigs: [[url: cookiecutterTemplateUrl, refspec: cookiecutterTemplateBranch, credentialsId: gerritCredentials],],
+ ])
}
- }
-
- stage('Create empty reclass model') {
- dir(path: modelEnv) {
- sh "rm -rfv .git"
- sh "git init"
- ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
- }
-
- def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
- // Use refspec if exists first of all
- if (sharedReclassBranch.toString().startsWith('refs/')) {
- dir(systemEnv) {
- ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+ stage('Create empty reclass model') {
+ dir(path: modelEnv) {
+ sh "rm -rfv .git; git init"
+ sh "git submodule add ${sharedReclassUrl} 'classes/system'"
}
- } else {
+
+ def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
// Use mcpVersion git tag if not specified branch for reclass-system
if (sharedReclassBranch == '') {
sharedReclassBranch = mcpVersion
@@ -92,175 +83,179 @@
sharedReclassBranch = 'master'
}
}
- git.changeGitBranch(systemEnv, sharedReclassBranch)
- }
- git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
- }
-
- stage('Generate model') {
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
- git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
- }
-
- stage("Test") {
- if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
- distribRevision = mcpVersion
- if (['master'].contains(mcpVersion)) {
- distribRevision = 'nightly'
- }
- if (distribRevision.contains('/')) {
- distribRevision = distribRevision.split('/')[-1]
- }
- // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
- if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
- common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
- distribRevision = 'proposed'
- }
- sh("cp -r ${modelEnv} ${testEnv}")
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
- try {
- def config = [
- 'dockerHostname' : "${saltMaster}.${clusterDomain}",
- 'reclassEnv' : testEnv,
- 'distribRevision' : distribRevision,
- 'dockerContainerName': DockerCName,
- 'testContext' : 'salt-model-node'
- ]
- testResult = saltModelTesting.testNode(config)
- common.infoMsg("Test finished: SUCCESS")
- } catch (Exception ex) {
- common.warningMsg("Test finished: FAILED")
- testResult = false
- }
- } else {
- common.warningMsg("Test stage has been skipped!")
- }
- }
- stage("Generate config drives") {
- // apt package genisoimage is required for this stage
-
- // download create-config-drive
- // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
- def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
- if (mcpCommonScriptsBranch == '') {
- mcpCommonScriptsBranch = mcpVersion
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if (["nightly", "testing", "stable"].contains(mcpVersion)) {
- common.warningMsg("Fetching mcp-common-scripts from master!")
- mcpCommonScriptsBranch = 'master'
- }
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: systemEnv]],
+ userRemoteConfigs: [[url: sharedReclassUrl, refspec: sharedReclassBranch, credentialsId: gerritCredentials],],
+ ])
+ git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
}
- def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.com/mcp/mcp-common-scripts'
- checkout([
- $class : 'GitSCM',
- branches : [[name: 'FETCH_HEAD'],],
- extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
- userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch],],
- ])
-
- sh "cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive"
- sh "[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data"
-
- sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
- sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
- args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
- // load data from model
- def smc = [:]
- smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
- smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
- smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- if (templateContext['default_context'].get('deploy_network_mtu')) {
- smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
+ stage('Generate model') {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
+ git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
}
- smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
- smc['MCP_VERSION'] = "${mcpVersion}"
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def localRepoIP = templateContext['default_context']['local_repo_url']
- smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
- smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
- smc['PIPELINES_FROM_ISO'] = 'false'
- smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
- smc['LOCAL_REPOS'] = 'true'
- }
- if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
- if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+
+ stage("Test") {
+ if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+ distribRevision = mcpVersion
+ if (['master'].contains(mcpVersion)) {
+ distribRevision = 'nightly'
+ }
+ if (distribRevision.contains('/')) {
+ distribRevision = distribRevision.split('/')[-1]
+ }
+ // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+ if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
+ common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
+ distribRevision = 'proposed'
+ }
+ sh("cp -r ${modelEnv} ${testEnv}")
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
+ try {
+ def config = [
+ 'dockerHostname' : "${saltMaster}.${clusterDomain}",
+ 'reclassEnv' : testEnv,
+ 'distribRevision' : distribRevision,
+ 'dockerContainerName': DockerCName,
+ 'testContext' : 'salt-model-node'
+ ]
+ testResult = saltModelTesting.testNode(config)
+ common.infoMsg("Test finished: SUCCESS")
+ } catch (Exception ex) {
+ common.warningMsg("Test finished: FAILED")
+ testResult = false
+ }
} else {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ common.warningMsg("Test stage has been skipped!")
+ }
+ }
+ stage("Generate config drives") {
+ // apt package genisoimage is required for this stage
+
+ // download create-config-drive
+ // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+ def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
+ if (mcpCommonScriptsBranch == '') {
+ mcpCommonScriptsBranch = mcpVersion
+ // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(mcpVersion)) {
+ common.warningMsg("Fetching mcp-common-scripts from master!")
+ mcpCommonScriptsBranch = 'master'
+ }
+ }
+ def commonScriptsRepoUrl = templateContext['default_context']['mcp_common_scripts_repo'] ?: 'ssh://gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts'
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+ userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch, credentialsId: gerritCredentials],],
+ ])
+
+ sh 'cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive'
+ sh '[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data'
+
+ sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+ sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+ args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+ // load data from model
+ def smc = [:]
+ smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+ smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+ smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ if (templateContext['default_context'].get('deploy_network_mtu')) {
+ smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
+ }
+ smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+ smc['MCP_VERSION'] = "${mcpVersion}"
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def localRepoIP = templateContext['default_context']['local_repo_url']
+ smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+ smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+ smc['PIPELINES_FROM_ISO'] = 'false'
+ smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+ smc['LOCAL_REPOS'] = 'true'
+ }
+ if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
+ if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ } else {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ }
+ }
+
+ for (i in common.entries(smc)) {
+ sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ }
+
+ // create cfg config-drive
+ sh "./create-config-drive ${args}"
+ sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save cfg iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+ sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
+
+ def smc_apt = [:]
+ smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+ smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+ for (i in common.entries(smc_apt)) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+ }
+
+ // create apt config-drive
+ sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+ sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save apt iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
}
}
- for (i in common.entries(smc)) {
- sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
- }
+ stage('Save changes reclass model') {
+ sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
+ archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
- // create cfg config-drive
- sh "./create-config-drive ${args}"
- sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save cfg iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
- sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
-
- def smc_apt = [:]
- smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
- smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
- for (i in common.entries(smc_apt)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+ if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+ emailext(to: EMAIL_ADDRESS,
+ attachmentsPattern: "output-${clusterName}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: "Your Salt model ${clusterName}")
}
-
- // create apt config-drive
- sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
- sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save apt iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+ dir("output-${clusterName}") {
+ deleteDir()
+ }
}
- }
- stage('Save changes reclass model') {
- sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
- archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
- if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
- emailext(to: EMAIL_ADDRESS,
- attachmentsPattern: "output-${clusterName}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${clusterName}")
+ // Fail, but leave possibility to get failed artifacts
+ if (!testResult && TEST_MODEL.toBoolean()) {
+ common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+ error('Test stage finished: FAILURE')
}
- dir("output-${clusterName}") {
- deleteDir()
+
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
}
+ // common.sendNotification(currentBuild.result,"",["slack"])
}
-
- // Fail, but leave possibility to get failed artifacts
- if (!testResult && TEST_MODEL.toBoolean()) {
- common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
- error('Test stage finished: FAILURE')
- }
-
- } catch (Throwable e) {
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- stage('Clean workspace directories') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- }
- // common.sendNotification(currentBuild.result,"",["slack"])
}
}
}
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index 98a4338..be065a1 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -12,6 +12,10 @@
* PER_NODE Target nodes will be managed one by one (bool)
* SIMPLE_UPGRADE Use previous version of upgrade without conron/drain abilities
* UPGRADE_DOCKER Upgrade docker component
+ * CONFORMANCE_RUN_AFTER Run Kubernetes conformance tests after update
+ * CONFORMANCE_RUN_BEFORE Run Kubernetes conformance tests before update
+ * TEST_K8S_API_SERVER Kubernetes API server address for test execution
+ * ARTIFACTORY_URL Artifactory URL where docker images located. Needed to correctly fetch conformance images.
*
**/
def common = new com.mirantis.mk.Common()
@@ -114,6 +118,58 @@
}
}
+def runConformance(pepperEnv, target, k8s_api, image) {
+ def salt = new com.mirantis.mk.Salt()
+ def containerName = 'conformance_tests'
+ output_file = image.replaceAll('/', '-') + '.output'
+ def output_file_full_path = "/tmp/" + image.replaceAll('/', '-') + '.output'
+ def artifacts_dir = '_artifacts/'
+ salt.cmdRun(pepperEnv, target, "docker rm -f ${containerName}", false)
+ salt.cmdRun(pepperEnv, target, "docker run -d --name ${containerName} --net=host -e API_SERVER=${k8s_api} ${image}")
+ sleep(10)
+
+ print("Waiting for tests to run...")
+ salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker wait ${containerName}"], null, false)
+
+ print("Writing test results to output file...")
+ salt.runSaltProcessStep(pepperEnv, target, 'cmd.run', ["docker logs -t ${containerName} > ${output_file_full_path}"])
+ print("Conformance test output saved in " + output_file_full_path)
+
+ // collect output
+ sh "mkdir -p ${artifacts_dir}"
+ file_content = salt.getFileContent(pepperEnv, target, '/tmp/' + output_file)
+ writeFile file: "${artifacts_dir}${output_file}", text: file_content
+ sh "cat ${artifacts_dir}${output_file}"
+ try {
+ sh "cat ${artifacts_dir}${output_file} | grep 'Test Suite Failed' && exit 1 || exit 0"
+ } catch (Throwable e) {
+ print("Conformance tests failed. Please check output")
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ }
+}
+
+def buildImageURL(pepperEnv, target, mcp_repo) {
+ def salt = new com.mirantis.mk.Salt()
+ def raw_version = salt.cmdRun(pepperEnv, target, "kubectl version --short -o json")['return'][0].values()[0].replaceAll('Salt command execution success','')
+ print("Kubernetes version: " + raw_version)
+ def serialized_version = readJSON text: raw_version
+ def short_version = (serialized_version.serverVersion.gitVersion =~ /([v])(\d+\.)(\d+\.)(\d+\-)(\d+)/)[0][0]
+ print("Kubernetes short version: " + short_version)
+ def conformance_image = mcp_repo + "/mirantis/kubernetes/k8s-conformance:" + short_version
+ return conformance_image
+}
+
+def executeConformance(pepperEnv, target, k8s_api, mcp_repo) {
+ stage("Running conformance tests") {
+ def image = buildImageURL(pepperEnv, target, mcp_repo)
+ print("Using image: " + image)
+ runConformance(pepperEnv, target, k8s_api, image)
+ }
+}
+
+
timeout(time: 12, unit: 'HOURS') {
node() {
try {
@@ -122,6 +178,14 @@
python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ if (CONFORMANCE_RUN_BEFORE.toBoolean()) {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
+
if ((common.validInputParam('KUBERNETES_HYPERKUBE_IMAGE')) && (common.validInputParam('KUBERNETES_PAUSE_IMAGE'))) {
overrideKubernetesImage(pepperEnv)
}
@@ -185,6 +249,14 @@
performKubernetesComputeUpdate(pepperEnv, target)
}
}
+
+ if (CONFORMANCE_RUN_AFTER.toBoolean()) {
+ def target = CTL_TARGET
+ def mcp_repo = ARTIFACTORY_URL
+ def k8s_api = TEST_K8S_API_SERVER
+ firstTarget = salt.getFirstMinion(pepperEnv, target)
+ executeConformance(pepperEnv, firstTarget, k8s_api, mcp_repo)
+ }
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index ce44d04..c2cb8cc 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,9 +26,9 @@
def command = 'cmd.shell'
def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
-def thirdPartyControlPkgsToRemove = 'redis-server,ifmap-server,supervisor'
+def thirdPartyControlPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,redis-server,ifmap-server,supervisor'
def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
-def thirdPartyAnalyticsPkgsToRemove = 'redis-server,supervisor'
+def thirdPartyAnalyticsPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,python-cassandra,cassandra-cpp-driver,redis-server,supervisor'
//def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop; rmmod vrouter; sync && echo 3 > /proc/sys/vm/drop_caches && echo 1 > /proc/sys/vm/compact_memory; service contrail-vrouter-agent start; service contrail-vrouter-nodemgr start'
@@ -86,9 +86,9 @@
}
try {
- controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control and *01*", "docker:client:compose:opencontrail_api:service:controller:image")
- analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analytics:image")
- analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
+ controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control:role:primary", "docker:client:compose:opencontrail_api:service:controller:image")
+ analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail_api:service:analytics:image")
+ analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector:role:primary", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
salt.enforceState(pepperEnv, 'I@opencontrail:database', 'docker.host')
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
@@ -133,7 +133,7 @@
}
check = 'doctrail all contrail-status'
salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
- runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:collector:role:primary', command, args, check, salt, pepperEnv, common)
} catch (Exception er) {
common.errorMsg("Opencontrail Analytics failed to be upgraded.")
throw er
@@ -152,20 +152,20 @@
}
for (service in controlServices) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'service.stop', [service])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'service.stop', [service])
}
- salt.enforceState(pepperEnv, 'I@opencontrail:control and *0[23]*', 'docker.client')
+ salt.enforceState(pepperEnv, 'I@opencontrail:control:role:secondary', 'docker.client')
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
sleep(120)
for (service in controlServices) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'service.stop', [service])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'service.stop', [service])
}
- salt.enforceState(pepperEnv, 'I@opencontrail:control and *01*', 'docker.client')
+ salt.enforceState(pepperEnv, 'I@opencontrail:control:role:primary', 'docker.client')
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', ['neutron-plugin-contrail,contrail-heat,python-contrail'])
salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'])
@@ -173,20 +173,44 @@
common.errorMsg("Opencontrail Controller failed to be upgraded.")
throw er
}
+ }
+ stage('Opencontrail controllers backup and cleanup') {
+ try {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
+
+ for (service in (controlServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
+ }
+ for (service in (analyticsServices + thirdPartyServicesToDisable)) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
+ }
+
+ def tmpCfgBackupDir = '/tmp/cfg_backup'
+ def thirdPartyCfgFilesToBackup = ['/var/lib/zookeeper/myid', '/etc/zookeeper/conf/', '/usr/share/kafka/config/']
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [tmpCfgBackupDir])
+
+ for (cfgFilePath in thirdPartyCfgFilesToBackup) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [tmpCfgBackupDir + cfgFilePath])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.copy', [cfgFilePath, tmpCfgBackupDir + cfgFilePath, 'recurse=True'])
+ }
+
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs + ',' + thirdPartyControlPkgsToRemove])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs + ',' + thirdPartyAnalyticsPkgsToRemove])
+
+ for (cfgFilePath in thirdPartyCfgFilesToBackup) {
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.makedirs', [cfgFilePath])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control or I@opencontrail:collector', 'file.copy', [tmpCfgBackupDir + cfgFilePath, cfgFilePath, 'recurse=True'])
+ }
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail Controllers backup and cleanup stage has failed.")
+ throw er
+ }
}
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
- for (service in (controlServices + thirdPartyServicesToDisable)) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
- }
- for (service in (analyticsServices + thirdPartyServicesToDisable)) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
- }
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs + ',' + thirdPartyControlPkgsToRemove])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs + ',' + thirdPartyAnalyticsPkgsToRemove])
}
@@ -313,19 +337,19 @@
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
for (service in config4Services) {
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ["doctrail controller systemctl stop ${service}"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ["doctrail controller systemctl stop ${service}"], null, true)
}
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *0[23]*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:secondary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
check = 'contrail-status'
- runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, pepperEnv, common)
+ runCommonCommands('I@opencontrail:control:role:secondary', command, args, check, salt, pepperEnv, common)
sleep(120)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
- salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control:role:primary', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
for (service in (controlServices + thirdPartyServicesToDisable)) {
salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.enable', [service])
}
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
index 7b4f80e..f960d75 100644
--- a/promote-vcp-images.groovy
+++ b/promote-vcp-images.groovy
@@ -114,7 +114,7 @@
error("Uploading file: ${targetImage}.md5 failed!")
}
- description += "<a href='http://apt.mirantis.net:8085/images/${targetImage}'>${job_env.SOURCE_TAG}=>${targetImage}</a>"
+ description += "<a href='http://images.mcp.mirantis.net/${targetImage}'>${job_env.SOURCE_TAG}=>${targetImage}</a>"
}
currentBuild.description = description
} catch (Throwable e) {
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 0d9ce5e..bd2ccd7 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -130,7 +130,7 @@
}
if (syncVcpImagesToS3) {
- common.infoMsg("Syncing VCP images from internal: http://apt.mcp.mirantis.net/images to s3: images.mirantis.com")
+ common.infoMsg("Syncing VCP images from internal: http://images.mcp.mirantis.net/ to s3: images.mirantis.com")
triggerSyncVCPJob('')
}
if (emailNotify) {
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
index 8c445ac..7b79f4c 100644
--- a/stacklight-upgrade.groovy
+++ b/stacklight-upgrade.groovy
@@ -25,9 +25,11 @@
def commandKwargs
def probe = 1
def errorOccured = false
-def command = 'cmd.run'
def upgrade(master, target, service, pckg, state) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
stage("Change ${target} repos") {
salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
salt.enforceState(master, "${target}", 'linux.system.repo', true)
@@ -42,22 +44,25 @@
return
}
}
- stage("Run ${state} on ${target}") {
+ stage("Run ${state} state on ${target} nodes") {
try {
- salt.enforceState(master, '${target}', '${state}')
+ salt.enforceState(master, "${target}", ["${state}"], true)
} catch (Exception er) {
errorOccured = true
- common.errorMsg('${state} state was executed and failed. Please fix it manually.')
+ common.errorMsg("${state} state was executed and failed. Please fix it manually.")
}
}
- out = salt.runSaltCommand(master, 'local', ['expression': '${target}', 'type': 'compound'], command, null, 'systemctl status ${service}.service', null)
+ out = salt.runSaltCommand(master, 'local', ['expression': "${target}", 'type': 'compound'], command, null, "systemctl status ${service}.service", null)
salt.printSaltCommandResult(out)
- common.warningMsg('Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.')
+ common.warningMsg("Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.")
return
}
def upgrade_es_kibana(master) {
+ def common = new com.mirantis.mk.Common()
+ def salt = new com.mirantis.mk.Salt()
+ def command = 'cmd.run'
stage('Elasticsearch upgrade') {
try {
salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
@@ -76,6 +81,7 @@
def retries_wait = 20
def retries = 15
def elasticsearch_vip
+ def pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:host')
if(!pillar['return'].isEmpty()) {
elasticsearch_vip = pillar['return'][0].values()[0]
} else {
@@ -136,9 +142,10 @@
if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
}
- if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
- upgrade_es_kibana(pepperEnv)
- }
+ }
+
+ if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
+ upgrade_es_kibana(pepperEnv)
}
if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
@@ -146,9 +153,9 @@
stage('Docker components upgrade') {
try {
- salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm monitoring"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm monitoring"], null, true)
salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
- salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm dashboard"], null, true)
+ salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'cmd.run', ["docker stack rm dashboard"], null, true)
salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
diff --git a/test-cookiecutter-reclass-chunk.groovy b/test-cookiecutter-reclass-chunk.groovy
index b1266a3..cdc6e1e 100644
--- a/test-cookiecutter-reclass-chunk.groovy
+++ b/test-cookiecutter-reclass-chunk.groovy
@@ -34,6 +34,10 @@
'dockerContainerName': extraVars.DockerCName,
'testContext': extraVars.modelFile
]
+ if (extraVars.useExtraRepos) {
+ config['extraRepos'] = extraVars.extraRepos ? extraVars.extraRepos : [:]
+ config['extraRepoMergeStrategy'] = extraVars.extraRepoMergeStrategy ? extraVars.extraRepoMergeStrategy : ''
+ }
saltModelTesting.testNode(config)
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 0bab394..bd3373c 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -49,12 +49,13 @@
gerritDataRSHEAD = [:]
gerritDataRS = [:]
gerritDataRS << gerritConData
-gerritDataRS['gerritBranch'] = env.RECLASS_MODEL_BRANCH ?: 'master'
+gerritDataRS['gerritBranch'] = env.RECLASS_SYSTEM_BRANCH ?: 'master'
gerritDataRS['gerritRefSpec'] = env.RECLASS_SYSTEM_GIT_REF ?: null
gerritDataRS['gerritProject'] = 'salt-models/reclass-system'
// version of debRepos, aka formulas|reclass|ubuntu
testDistribRevision = env.DISTRIB_REVISION ?: 'nightly'
+
// Name of sub-test chunk job
chunkJobName = "test-mk-cookiecutter-templates-chunk"
testModelBuildsData = [:]
@@ -69,18 +70,20 @@
}
}
-def testModel(modelFile, reclassArtifactName, artifactCopyPath) {
+def testModel(modelFile, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// modelFile - `modelfiname` from model/modelfiname/modelfiname.yaml
//* Grub all models and send it to check in paralell - by one in thread.
def _uuid = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}_${modelFile.toLowerCase()}_" + UUID.randomUUID().toString().take(8)
def _values_string = """
- ---
- MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
- DockerCName: "${_uuid}"
- testReclassEnv: "model/${modelFile}/"
- modelFile: "contexts/${modelFile}.yml"
- DISTRIB_REVISION: "${testDistribRevision}"
- """
+---
+MODELS_TARGZ: "${env.BUILD_URL}/artifact/${reclassArtifactName}"
+DockerCName: "${_uuid}"
+testReclassEnv: "model/${modelFile}/"
+modelFile: "contexts/${modelFile}.yml"
+DISTRIB_REVISION: "${testDistribRevision}"
+useExtraRepos: ${useExtraRepos}
+${extraVarsYAML.replaceAll('---', '')}
+"""
def chunkJob = build job: chunkJobName, parameters: [
[$class: 'TextParameterValue', name: 'EXTRA_VARIABLES_YAML',
value : _values_string.stripIndent()],
@@ -91,7 +94,7 @@
'buildId' : "${chunkJob.number}"])
}
-def StepTestModel(basename, reclassArtifactName, artifactCopyPath) {
+def StepTestModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos = false) {
// We need to wrap what we return in a Groovy closure, or else it's invoked
// when this method is called, not when we pass it to parallel.
// To do this, you need to wrap the code below in { }, and either return
@@ -99,7 +102,7 @@
// return node object
return {
node(slaveNode) {
- testModel(basename, reclassArtifactName, artifactCopyPath)
+ testModel(basename, reclassArtifactName, artifactCopyPath, useExtraRepos)
}
}
}
@@ -359,7 +362,7 @@
common.infoMsg("Found: ${contextFileListPatched.size()} patched contexts to test.")
for (String context : contextFileListPatched) {
def basename = common.GetBaseName(context, '.yml')
- stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath))
+ stepsForParallel.put("ContextPatchedTest:${basename}", StepTestModel(basename, patchedReclassArtifactName, reclassInfoPatchedPath, true))
}
parallel stepsForParallel
common.infoMsg('All TestContexts tests done')
diff --git a/test-drivetrain.groovy b/test-drivetrain.groovy
index fe7c87c..c421c17 100644
--- a/test-drivetrain.groovy
+++ b/test-drivetrain.groovy
@@ -119,11 +119,11 @@
throw e
} finally{
if(DELETE_STACK.toBoolean() && ENVIRONMENT_IP == ""){
- mcpEnvJob = build(job: "delete-heat-stack-for-mcp-env", parameters: [
+ mcpEnvJob = build(job: "delete-heat-stack-for-mcp-env", wait: false, parameters: [
[$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: 'mcp-mk'],
[$class: 'StringParameterValue', name: 'STACK_NAME', value: 'jenkins-drivetrain-test-' + currentBuild.number],
])
}
}
}
-}
\ No newline at end of file
+}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index be9c894..e2dbf83 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -64,8 +64,8 @@
common.infoMsg("Running part of kitchen test")
if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
- sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
- sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
def suite = ruby.getSuiteName(KITCHEN_ENV)
if (suite && suite != "") {
common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index d5c0e77..566caa9 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -35,7 +35,7 @@
salt.runSaltProcessStep(venvPepper, target, 'pkg.install', ["force_yes=True", "pkgs='$pkgs'"], null, true, 5)
}catch(Exception ex){}
- common.retry(10, 30){
+ common.retry(20, 60){
salt.minionsReachable(venvPepper, 'I@salt:master', '*')
def running = salt.runSaltProcessStep(venvPepper, target, 'saltutil.running', [], null, true, 5)
for(value in running.get("return")[0].values()){
@@ -85,34 +85,36 @@
stage("Update Reclass"){
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
- if(UPDATE_CLUSTER_MODEL.toBoolean()){
- try{
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
- }
- catch(Exception ex){
- error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
- }
- def dateTime = common.getDatetime()
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
- common.infoMsg("The following changes were made to the cluster model and will be commited. Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
- salt.cmdRun(venvPepper, 'I@salt.master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && git add -u && git commit -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
- }
-
try{
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git diff-index --quiet HEAD --")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
}
catch(Exception ex){
- error("You have unstaged changes in your Reclass system model repository. Please reset them and rerun the pipeline.")
+ error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
}
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
+ if(UPDATE_CLUSTER_MODEL.toBoolean()){
+ def dateTime = common.getDatetime()
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=apty -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
+ // Add new defaults
+ common.infoMsg("Add new defaults")
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
+ "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
+ common.infoMsg("The following changes were made to the cluster model and will be commited. " +
+ "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
+ "git add -u && git commit --allow-empty -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
+ }
+ salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
}
if(UPDATE_LOCAL_REPOS.toBoolean()){
+ def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
stage("Update local repos"){
common.infoMsg("Updating local repositories")
- def engine = salt.getPillar(venvPepper, 'I@aptly:server', "aptly:server:source:engine")
+ def engine = salt.getPillar(venvPepper, 'I@aptly:publisher', "aptly:publisher:source:engine")
runningOnDocker = engine.get("return")[0].containsValue("docker")
if (runningOnDocker) {
@@ -122,39 +124,41 @@
common.infoMsg("Aptly isn't running as Docker container. Going to use aptly user for executing aptly commands")
}
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name/cicd/aptly && git checkout $MCP_VERSION")
-
if(runningOnDocker){
- salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true)
}
else{
- salt.cmdRun(venvPepper, 'I@aptly:server', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
+ salt.cmdRun(venvPepper, 'I@aptly:publisher', "aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop -force", true, null, true, ['runas=aptly'])
}
- salt.enforceState(venvPepper, 'I@aptly:server', 'aptly', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'aptly', true)
if(runningOnDocker){
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv"], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-frv -u http://10.99.0.1:8080"], null, true)
}
else{
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
- salt.runSaltProcessStep(venvPepper, 'I@aptly:server', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-afrv", 'runas=aptly'], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
+ salt.runSaltProcessStep(venvPepper, 'I@aptly:publisher', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-afrv", 'runas=aptly'], null, true)
}
- salt.enforceState(venvPepper, 'I@aptly:server', 'docker.client.registry', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'docker.client.registry', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'debmirror', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'debmirror', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'git.server', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'git.server', true)
- salt.enforceState(venvPepper, 'I@aptly:server', 'linux.system.file', true)
+ salt.enforceState(venvPepper, 'I@aptly:publisher', 'linux.system.file', true)
}
}
stage("Update Drivetrain"){
salt.cmdRun(venvPepper, 'I@salt:master', "sed -i -e 's/[^ ]*[^ ]/$MCP_VERSION/4' /etc/apt/sources.list.d/mcp_salt.list")
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get -o Dir::Etc::sourcelist='/etc/apt/sources.list.d/mcp_salt.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update")
+ // Workaround for PROD-22108
+ salt.cmdRun(venvPepper, 'I@salt:master', "apt-get purge -y salt-formula-octavia && " +
+ "apt-get install -y salt-formula-octavia")
+ // End workaround for PROD-22108
salt.cmdRun(venvPepper, 'I@salt:master', "apt-get install -y --allow-downgrades salt-formula-*")
def inventoryBeforeFilename = "reclass-inventory-before.out"