Merge "Support upgrade of different amount of contrail controller nodes"
diff --git a/branch-git-repos.groovy b/branch-git-repos.groovy
index 0624c40..b3bb3a2 100644
--- a/branch-git-repos.groovy
+++ b/branch-git-repos.groovy
@@ -81,17 +81,10 @@
gitSrcObj = gitSrcObj.replace('SUBS_SOURCE_REF', srcObj)
}
- // Remove preifix `origin/` from gitSrcObj
- java.util.regex.Pattern reOrigin = ~'^origin/'
- gitSrcObj = gitSrcObj - reOrigin
-
checkout([
$class: 'GitSCM',
- branches: [
- [name: 'FETCH_HEAD'],
- ],
userRemoteConfigs: [
- [url: gitRepoUrl, refspec: gitSrcObj, credentialsId: gitCredentialsId],
+ [url: gitRepoUrl, credentialsId: gitCredentialsId],
],
extensions: [
[$class: 'PruneStaleBranch'],
@@ -110,15 +103,30 @@
sh 'git config user.email "ci+infra@mirantis.com"'
// Update list of branches
- sh 'git remote update origin --prune'
+ sh 'git checkout master'
+
+ int is_branch = sh(script: "git ls-remote --exit-code --heads origin ${gitBranchNew}", returnStatus: true)
+ int is_tag = sh(script: "git ls-remote --exit-code --tags origin ${gitBranchNew}", returnStatus: true)
// Ensure there is no branch or tag with gitBranchNew name
- sh "git branch -d '${gitBranchNew}' && git push origin ':${gitBranchNew}' || :"
- sh "git tag -d '${gitBranchNew}' && git push origin ':refs/tags/${gitBranchNew}' || :"
+ if (is_branch == 0) {
+ sh """\
+ git checkout 'origin/${gitBranchNew}' -t || :
+ git checkout master
+ git branch -d '${gitBranchNew}'
+ git push origin ':refs/heads/${gitBranchNew}'
+ """.stripIndent()
+ }
+ if (is_tag == 0) {
+ sh """\
+ git tag -d '${gitBranchNew}'
+ git push origin ':refs/tags/${gitBranchNew}'
+ """
+ }
// Create new branch
- sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
- sh "git push origin '${gitBranchNew}'" // ... push new branch
+ sh "git branch '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
+ sh "git push --force origin '${gitBranchNew}'" // ... push new branch
}
}
}
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index da3e177..8a7a90d 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -194,7 +194,7 @@
// XXX: retry to workaround magical VALUE_TRIMMED
// response from salt master + to give slow cloud some
// more time to settle down
- salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://172.16.10.254:8084/api/version >/dev/null && break; done')
+ salt.cmdRun(pepperEnv, 'I@aptly:server', 'while true; do curl -sf http://apt.mcp.mirantis.net:8081/api/version >/dev/null && break; done')
}
}
salt.enforceState(pepperEnv, 'I@aptly:server', 'aptly', true)
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index a51f436..1f7dd1f 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -143,6 +143,10 @@
}
currentBuild.description = STACK_NAME
}
+ } else {
+ // In case name was copied with unicode zero-width space chars -
+ // remove them
+ STACK_NAME = STACK_NAME.trim().replaceAll("\\p{C}", "")
}
// no underscore in STACK_NAME
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 56f9351..ab72f76 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -58,7 +58,7 @@
def command
def commandKwargs
-def wait = 10
+wait = 10
if (common.validInputParam('MINIONS_TEST_TIMEOUT') && MINIONS_TEST_TIMEOUT.isInteger()) {
wait = "${MINIONS_TEST_TIMEOUT}".toInteger()
}
diff --git a/deploy-try-mcp.groovy b/deploy-try-mcp.groovy
new file mode 100644
index 0000000..a19b970
--- /dev/null
+++ b/deploy-try-mcp.groovy
@@ -0,0 +1,119 @@
+/**
+ * Generate cookiecutter cluster by individual products
+ *
+ * Expected parameters:
+ * COOKIECUTTER_TEMPLATE_CONTEXT Context parameters for the template generation.
+ * SALT_MASTER_URL URL of Salt master
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ *
+ **/
+
+import static groovy.json.JsonOutput.toJson
+
+common = new com.mirantis.mk.Common()
+python = new com.mirantis.mk.Python()
+salt = new com.mirantis.mk.Salt()
+ssh = new com.mirantis.mk.Ssh()
+
+pepperEnv = "pepperEnv"
+
+slaveNode = env.SLAVE_NODE ?: 'python&&docker'
+model_job = 0
+
+timeout(time: 2, unit: 'HOURS') {
+ node(slaveNode) {
+ try {
+ def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+ def clusterName = templateContext.default_context.cluster_name
+ def aioNodeHostname = templateContext.default_context.aio_node_hostname
+ def aioInternalAddress = templateContext.default_context.aio_internal_address
+ def drivetrainInternalAddress = templateContext.default_context.drivetrain_internal_address
+ def artifact_tar_file = "${clusterName}.tar.gz"
+ def masterIP = templateContext.default_context.drivetrain_external_address
+ if ( templateContext.default_context.get("docker_deployment", "False").toBoolean() ) {
+ masterIP = drivetrainInternalAddress
+ }
+ def masterUrl = "http://" + masterIP + ":6969"
+ def outputDirectory = env.WORKSPACE + "/"
+ def outputDestination = outputDirectory + artifact_tar_file
+ def outputCluster = outputDirectory + "/classes/cluster/" + clusterName
+ def rsyncLocation = templateContext.default_context.get("rsync_location", "/srv/salt/reclass/classes/cluster")
+ def rsyncCredentials = templateContext.default_context.get("rsync_credentials", "lab")
+ c = common.getSshCredentials(rsyncCredentials)
+ def rsyncSSHKey = c.getPrivateKey()
+ def rsyncUser = c.getUsername()
+ def rsyncKeyFile = outputDirectory + "rsync_key"
+ def rsyncPath = rsyncUser + "@" + masterIP + ":" + rsyncLocation
+ currentBuild.description = "Cluster " + clusterName + " on " + masterIP
+
+ stage("Generate AIO model") {
+ model_job = build(job: 'generate-salt-model-separated-products',
+ parameters: [
+ [$class: 'StringParameterValue', name: 'COOKIECUTTER_TEMPLATE_CONTEXT', value: COOKIECUTTER_TEMPLATE_CONTEXT ],
+ [$class: 'BooleanParameterValue', name: 'TEST_MODEL', value: false],
+ ])
+ }
+
+ stage("Download artifact with model") {
+ artifact_tar_url = "${env.JENKINS_URL}/job/generate-salt-model-separated-products/${model_job.number}/artifact/output-${clusterName}/${artifact_tar_file}"
+ sh "wget --progress=dot:mega --auth-no-challenge -O ${outputDestination} '${artifact_tar_url}'"
+ sh "tar -xzvf ${outputDestination}"
+ }
+
+ stage("Send model to Salt master node") {
+ ssh.ensureKnownHosts(masterIP)
+ writeFile(file: rsyncKeyFile, text: rsyncSSHKey)
+ sh("chmod 600 ${rsyncKeyFile}")
+ common.infoMsg("Copying cluster model to ${rsyncPath}")
+ sh("rsync -r -e \"ssh -i ${rsyncKeyFile}\" ${outputCluster} ${rsyncPath}")
+ }
+
+ stage("Setup virtualenv for Pepper") {
+ python.setupPepperVirtualenv(pepperEnv, masterUrl, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage("Prepare AIO node"){
+ tgt = "S@" + aioInternalAddress
+ // Classify AIO node
+ eventData = [:]
+ eventData["node_control_ip"] = aioInternalAddress
+ eventData["node_os"] = "xenial"
+ eventData["node_master_ip"] = drivetrainInternalAddress
+ eventData["node_hostname"] = aioNodeHostname
+ eventData["node_cluster"] = clusterName
+ eventJson = toJson(eventData)
+ event = "salt-call event.send \"reclass/minion/classify\" \'" + eventJson + "\'"
+ salt.cmdRun(pepperEnv, tgt, event)
+ sleep(30)
+ // Upgrade Salt minion
+ salt.runSaltProcessStep(pepperEnv, tgt, 'pkg.install', "salt-minion")
+ sleep(10)
+ // Run core states on AIO node
+ salt.fullRefresh(pepperEnv, '*')
+ salt.enforceState(pepperEnv, tgt, 'linux')
+ salt.enforceState(pepperEnv, tgt, 'salt')
+ salt.enforceState(pepperEnv, tgt, 'openssh')
+ salt.enforceState(pepperEnv, tgt, 'ntp')
+ salt.enforceState(pepperEnv, tgt, 'rsyslog')
+ }
+
+ stage("Deploy Openstack") {
+ build(job: 'deploy_openstack',
+ parameters: [
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_CREDENTIALS', value: SALT_MASTER_CREDENTIALS],
+ [$class: 'StringParameterValue', name: 'SALT_MASTER_URL', value: masterUrl],
+ [$class: 'StringParameterValue', name: 'STACK_INSTALL', value: 'openstack']
+ ])
+ }
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ }
+ // common.sendNotification(currentBuild.result,"",["slack"])
+ }
+ }
+}
\ No newline at end of file
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 2cbbad0..fb28837 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -11,54 +11,47 @@
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
saltModelTesting = new com.mirantis.mk.SaltModelTesting()
-ssh = new com.mirantis.mk.Ssh()
+
slaveNode = env.SLAVE_NODE ?: 'python&&docker'
gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
timeout(time: 2, unit: 'HOURS') {
node(slaveNode) {
- def templateEnv = "${env.WORKSPACE}/template"
- def modelEnv = "${env.WORKSPACE}/model"
- def testEnv = "${env.WORKSPACE}/test"
- def pipelineEnv = "${env.WORKSPACE}/pipelines"
+ sshagent(credentials: [gerritCredentials]) {
+ def templateEnv = "${env.WORKSPACE}/template"
+ def modelEnv = "${env.WORKSPACE}/model"
+ def testEnv = "${env.WORKSPACE}/test"
+ def pipelineEnv = "${env.WORKSPACE}/pipelines"
- try {
- def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
- def mcpVersion = templateContext.default_context.mcp_version
- def sharedReclassUrl = templateContext.default_context.shared_reclass_url
- def clusterDomain = templateContext.default_context.cluster_domain
- def clusterName = templateContext.default_context.cluster_name
- def saltMaster = templateContext.default_context.salt_master_hostname
- def cutterEnv = "${env.WORKSPACE}/cutter"
- def jinjaEnv = "${env.WORKSPACE}/jinja"
- def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
- def systemEnv = "${modelEnv}/classes/system"
- def targetBranch = "feature/${clusterName}"
- def templateBaseDir = "${env.WORKSPACE}/template"
- def templateDir = "${templateEnv}/template/dir"
- def templateOutputDir = templateBaseDir
- def user
- def testResult = false
- wrap([$class: 'BuildUser']) {
- user = env.BUILD_USER_ID
- }
- currentBuild.description = clusterName
- print("Using context:\n" + COOKIECUTTER_TEMPLATE_CONTEXT)
+ try {
+ def templateContext = readYaml text: COOKIECUTTER_TEMPLATE_CONTEXT
+ def mcpVersion = templateContext.default_context.mcp_version
+ def sharedReclassUrl = templateContext.default_context.shared_reclass_url
+ def clusterDomain = templateContext.default_context.cluster_domain
+ def clusterName = templateContext.default_context.cluster_name
+ def saltMaster = templateContext.default_context.salt_master_hostname
+ def cutterEnv = "${env.WORKSPACE}/cutter"
+ def jinjaEnv = "${env.WORKSPACE}/jinja"
+ def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+ def systemEnv = "${modelEnv}/classes/system"
+ def targetBranch = "feature/${clusterName}"
+ def templateBaseDir = "${env.WORKSPACE}/template"
+ def templateDir = "${templateEnv}/template/dir"
+ def templateOutputDir = templateBaseDir
+ def user
+ def testResult = false
+ wrap([$class: 'BuildUser']) {
+ user = env.BUILD_USER_ID
+ }
+ currentBuild.description = clusterName
+ common.infoMsg("Using context:\n" + templateContext)
- stage('Download Cookiecutter template') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
- def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
- git.checkoutGitRepository(templateEnv, cookiecutterTemplateUrl, 'master', gerritCredentials)
- // Use refspec if exists first of all
- if (cookiecutterTemplateBranch.toString().startsWith('refs/')) {
- dir(templateEnv) {
- withCredentials(gerritCredentials){
- ssh.agentSh("git fetch ${cookiecutterTemplateUrl} ${cookiecutterTemplateBranch} && git checkout FETCH_HEAD")
- }
- }
- } else {
+
+ stage('Download Cookiecutter template') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
+ def cookiecutterTemplateUrl = templateContext.default_context.cookiecutter_template_url
+ def cookiecutterTemplateBranch = templateContext.default_context.cookiecutter_template_branch
// Use mcpVersion git tag if not specified branch for cookiecutter-templates
if (cookiecutterTemplateBranch == '') {
cookiecutterTemplateBranch = mcpVersion
@@ -67,24 +60,20 @@
cookiecutterTemplateBranch = 'master'
}
}
- git.changeGitBranch(templateEnv, cookiecutterTemplateBranch)
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: templateEnv]],
+ userRemoteConfigs: [[url: cookiecutterTemplateUrl, refspec: cookiecutterTemplateBranch, credentialsId: gerritCredentials],],
+ ])
}
- }
-
- stage('Create empty reclass model') {
- dir(path: modelEnv) {
- sh "rm -rfv .git"
- sh "git init"
- ssh.agentSh("git submodule add ${sharedReclassUrl} 'classes/system'")
- }
-
- def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
- // Use refspec if exists first of all
- if (sharedReclassBranch.toString().startsWith('refs/')) {
- dir(systemEnv) {
- ssh.agentSh("git fetch ${sharedReclassUrl} ${sharedReclassBranch} && git checkout FETCH_HEAD")
+ stage('Create empty reclass model') {
+ dir(path: modelEnv) {
+ sh "rm -rfv .git; git init"
+ sh "git submodule add ${sharedReclassUrl} 'classes/system'"
}
- } else {
+
+ def sharedReclassBranch = templateContext.default_context.shared_reclass_branch
// Use mcpVersion git tag if not specified branch for reclass-system
if (sharedReclassBranch == '') {
sharedReclassBranch = mcpVersion
@@ -94,175 +83,179 @@
sharedReclassBranch = 'master'
}
}
- git.changeGitBranch(systemEnv, sharedReclassBranch)
- }
- git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
- }
-
- stage('Generate model') {
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
- git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
- }
-
- stage("Test") {
- if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
- distribRevision = mcpVersion
- if (['master'].contains(mcpVersion)) {
- distribRevision = 'nightly'
- }
- if (distribRevision.contains('/')) {
- distribRevision = distribRevision.split('/')[-1]
- }
- // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
- if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
- common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
- distribRevision = 'proposed'
- }
- sh("cp -r ${modelEnv} ${testEnv}")
- def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
- common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
- try {
- def config = [
- 'dockerHostname' : "${saltMaster}.${clusterDomain}",
- 'reclassEnv' : testEnv,
- 'distribRevision' : distribRevision,
- 'dockerContainerName': DockerCName,
- 'testContext' : 'salt-model-node'
- ]
- testResult = saltModelTesting.testNode(config)
- common.infoMsg("Test finished: SUCCESS")
- } catch (Exception ex) {
- common.warningMsg("Test finished: FAILED")
- testResult = false
- }
- } else {
- common.warningMsg("Test stage has been skipped!")
- }
- }
- stage("Generate config drives") {
- // apt package genisoimage is required for this stage
-
- // download create-config-drive
- // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
- def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
- if (mcpCommonScriptsBranch == '') {
- mcpCommonScriptsBranch = mcpVersion
- // Don't have n/t/s for mcp-common-scripts repo, therefore use master
- if (["nightly", "testing", "stable"].contains(mcpVersion)) {
- common.warningMsg("Fetching mcp-common-scripts from master!")
- mcpCommonScriptsBranch = 'master'
- }
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: systemEnv]],
+ userRemoteConfigs: [[url: sharedReclassUrl, refspec: sharedReclassBranch, credentialsId: gerritCredentials],],
+ ])
+ git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
}
- def commonScriptsRepoUrl = 'https://gerrit.mcp.mirantis.com/mcp/mcp-common-scripts'
- checkout([
- $class : 'GitSCM',
- branches : [[name: 'FETCH_HEAD'],],
- extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
- userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch],],
- ])
-
- sh "cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive"
- sh "[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data"
-
- sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
- sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
- args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
-
- // load data from model
- def smc = [:]
- smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
- smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
- smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- if (templateContext['default_context'].get('deploy_network_mtu')) {
- smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
+ stage('Generate model') {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
+ git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
}
- smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
- smc['MCP_VERSION'] = "${mcpVersion}"
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def localRepoIP = templateContext['default_context']['local_repo_url']
- smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
- smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
- smc['PIPELINES_FROM_ISO'] = 'false'
- smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
- smc['LOCAL_REPOS'] = 'true'
- }
- if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
- if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+
+ stage("Test") {
+ if (TEST_MODEL.toBoolean() && sharedReclassUrl != '') {
+ distribRevision = mcpVersion
+ if (['master'].contains(mcpVersion)) {
+ distribRevision = 'nightly'
+ }
+ if (distribRevision.contains('/')) {
+ distribRevision = distribRevision.split('/')[-1]
+ }
+ // Check if we are going to test bleeding-edge release, which doesn't have binary release yet
+ if (!common.checkRemoteBinary([apt_mk_version: distribRevision]).linux_system_repo_url) {
+ common.errorMsg("Binary release: ${distribRevision} not exist. Fallback to 'proposed'! ")
+ distribRevision = 'proposed'
+ }
+ sh("cp -r ${modelEnv} ${testEnv}")
+ def DockerCName = "${env.JOB_NAME.toLowerCase()}_${env.BUILD_TAG.toLowerCase()}"
+ common.infoMsg("Attempt to run test against distribRevision: ${distribRevision}")
+ try {
+ def config = [
+ 'dockerHostname' : "${saltMaster}.${clusterDomain}",
+ 'reclassEnv' : testEnv,
+ 'distribRevision' : distribRevision,
+ 'dockerContainerName': DockerCName,
+ 'testContext' : 'salt-model-node'
+ ]
+ testResult = saltModelTesting.testNode(config)
+ common.infoMsg("Test finished: SUCCESS")
+ } catch (Exception ex) {
+ common.warningMsg("Test finished: FAILED")
+ testResult = false
+ }
} else {
- smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
- smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ common.warningMsg("Test stage has been skipped!")
+ }
+ }
+ stage("Generate config drives") {
+ // apt package genisoimage is required for this stage
+
+ // download create-config-drive
+ // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
+ def mcpCommonScriptsBranch = templateContext['default_context']['mcp_common_scripts_branch']
+ if (mcpCommonScriptsBranch == '') {
+ mcpCommonScriptsBranch = mcpVersion
+ // Don't have n/t/s for mcp-common-scripts repo, therefore use master
+ if (["nightly", "testing", "stable"].contains(mcpVersion)) {
+ common.warningMsg("Fetching mcp-common-scripts from master!")
+ mcpCommonScriptsBranch = 'master'
+ }
+ }
+ def commonScriptsRepoUrl = templateContext['default_context']['mcp_common_scripts_repo'] ?: 'ssh://gerrit.mcp.mirantis.com:29418/mcp/mcp-common-scripts'
+ checkout([
+ $class : 'GitSCM',
+ branches : [[name: 'FETCH_HEAD'],],
+ extensions : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'mcp-common-scripts']],
+ userRemoteConfigs: [[url: commonScriptsRepoUrl, refspec: mcpCommonScriptsBranch, credentialsId: gerritCredentials],],
+ ])
+
+ sh 'cp mcp-common-scripts/config-drive/create_config_drive.sh create-config-drive && chmod +x create-config-drive'
+ sh '[ -f mcp-common-scripts/config-drive/master_config.sh ] && cp mcp-common-scripts/config-drive/master_config.sh user_data || cp mcp-common-scripts/config-drive/master_config.yaml user_data'
+
+ sh "git clone --mirror https://github.com/Mirantis/mk-pipelines.git ${pipelineEnv}/mk-pipelines"
+ sh "git clone --mirror https://github.com/Mirantis/pipeline-library.git ${pipelineEnv}/pipeline-library"
+ args = "--user-data user_data --hostname ${saltMaster} --model ${modelEnv} --mk-pipelines ${pipelineEnv}/mk-pipelines/ --pipeline-library ${pipelineEnv}/pipeline-library/ ${saltMaster}.${clusterDomain}-config.iso"
+
+ // load data from model
+ def smc = [:]
+ smc['SALT_MASTER_MINION_ID'] = "${saltMaster}.${clusterDomain}"
+ smc['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
+ smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ if (templateContext['default_context'].get('deploy_network_mtu')) {
+ smc['DEPLOY_NETWORK_MTU'] = templateContext['default_context']['deploy_network_mtu']
+ }
+ smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+ smc['MCP_VERSION'] = "${mcpVersion}"
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def localRepoIP = templateContext['default_context']['local_repo_url']
+ smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+ smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
+ smc['PIPELINES_FROM_ISO'] = 'false'
+ smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+ smc['LOCAL_REPOS'] = 'true'
+ }
+ if (templateContext['default_context']['upstream_proxy_enabled'] == 'True') {
+ if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True') {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_user'] + ':' + templateContext['default_context']['upstream_proxy_password'] + '@' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ } else {
+ smc['http_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ smc['https_proxy'] = 'http://' + templateContext['default_context']['upstream_proxy_address'] + ':' + templateContext['default_context']['upstream_proxy_port']
+ }
+ }
+
+ for (i in common.entries(smc)) {
+ sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
+ }
+
+ // create cfg config-drive
+ sh "./create-config-drive ${args}"
+ sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save cfg iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
+
+ if (templateContext['default_context']['local_repositories'] == 'True') {
+ def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
+ sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
+
+ def smc_apt = [:]
+ smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
+ smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
+ smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
+ smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
+
+ for (i in common.entries(smc_apt)) {
+ sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+ }
+
+ // create apt config-drive
+ sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+ sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
+
+ // save apt iso to artifacts
+ archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
}
}
- for (i in common.entries(smc)) {
- sh "sed -i 's,${i[0]}=.*,${i[0]}=${i[1]},' user_data"
- }
+ stage('Save changes reclass model') {
+ sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
+ archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
- // create cfg config-drive
- sh "./create-config-drive ${args}"
- sh("mkdir output-${clusterName} && mv ${saltMaster}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save cfg iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${saltMaster}.${clusterDomain}-config.iso"
-
- if (templateContext['default_context']['local_repositories'] == 'True') {
- def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
- sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
-
- def smc_apt = [:]
- smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
- smc_apt['APTLY_DEPLOY_IP'] = templateContext['default_context']['aptly_server_deploy_address']
- smc_apt['APTLY_DEPLOY_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
- smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
-
- for (i in common.entries(smc_apt)) {
- sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
+ if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
+ emailext(to: EMAIL_ADDRESS,
+ attachmentsPattern: "output-${clusterName}/*",
+ body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
+ subject: "Your Salt model ${clusterName}")
}
-
- // create apt config-drive
- sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
- sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
-
- // save apt iso to artifacts
- archiveArtifacts artifacts: "output-${clusterName}/${aptlyServerHostname}.${clusterDomain}-config.iso"
+ dir("output-${clusterName}") {
+ deleteDir()
+ }
}
- }
- stage('Save changes reclass model') {
- sh(returnStatus: true, script: "tar -czf output-${clusterName}/${clusterName}.tar.gz --exclude='*@tmp' -C ${modelEnv} .")
- archiveArtifacts artifacts: "output-${clusterName}/${clusterName}.tar.gz"
-
- if (EMAIL_ADDRESS != null && EMAIL_ADDRESS != "") {
- emailext(to: EMAIL_ADDRESS,
- attachmentsPattern: "output-${clusterName}/*",
- body: "Mirantis Jenkins\n\nRequested reclass model ${clusterName} has been created and attached to this email.\nEnjoy!\n\nMirantis",
- subject: "Your Salt model ${clusterName}")
+ // Fail, but leave possibility to get failed artifacts
+ if (!testResult && TEST_MODEL.toBoolean()) {
+ common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
+ error('Test stage finished: FAILURE')
}
- dir("output-${clusterName}") {
- deleteDir()
+
+ } catch (Throwable e) {
+ currentBuild.result = "FAILURE"
+ currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+ throw e
+ } finally {
+ stage('Clean workspace directories') {
+ sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
}
+ // common.sendNotification(currentBuild.result,"",["slack"])
}
-
- // Fail, but leave possibility to get failed artifacts
- if (!testResult && TEST_MODEL.toBoolean()) {
- common.warningMsg('Test finished: FAILURE. Please check logs and\\or debug failed model manually!')
- error('Test stage finished: FAILURE')
- }
-
- } catch (Throwable e) {
- currentBuild.result = "FAILURE"
- currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
- throw e
- } finally {
- stage('Clean workspace directories') {
- sh(script: 'find . -mindepth 1 -delete > /dev/null || true')
- }
- // common.sendNotification(currentBuild.result,"",["slack"])
}
}
}
diff --git a/promote-vcp-images.groovy b/promote-vcp-images.groovy
index 7b4f80e..f960d75 100644
--- a/promote-vcp-images.groovy
+++ b/promote-vcp-images.groovy
@@ -114,7 +114,7 @@
error("Uploading file: ${targetImage}.md5 failed!")
}
- description += "<a href='http://apt.mirantis.net:8085/images/${targetImage}'>${job_env.SOURCE_TAG}=>${targetImage}</a>"
+ description += "<a href='http://images.mcp.mirantis.net/${targetImage}'>${job_env.SOURCE_TAG}=>${targetImage}</a>"
}
currentBuild.description = description
} catch (Throwable e) {
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 0d9ce5e..bd2ccd7 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -130,7 +130,7 @@
}
if (syncVcpImagesToS3) {
- common.infoMsg("Syncing VCP images from internal: http://apt.mcp.mirantis.net/images to s3: images.mirantis.com")
+ common.infoMsg("Syncing VCP images from internal: http://images.mcp.mirantis.net/ to s3: images.mirantis.com")
triggerSyncVCPJob('')
}
if (emailNotify) {
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index d93a618..bd3373c 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -49,7 +49,7 @@
gerritDataRSHEAD = [:]
gerritDataRS = [:]
gerritDataRS << gerritConData
-gerritDataRS['gerritBranch'] = env.RECLASS_MODEL_BRANCH ?: 'master'
+gerritDataRS['gerritBranch'] = env.RECLASS_SYSTEM_BRANCH ?: 'master'
gerritDataRS['gerritRefSpec'] = env.RECLASS_SYSTEM_GIT_REF ?: null
gerritDataRS['gerritProject'] = 'salt-models/reclass-system'
diff --git a/test-drivetrain.groovy b/test-drivetrain.groovy
index fe7c87c..c421c17 100644
--- a/test-drivetrain.groovy
+++ b/test-drivetrain.groovy
@@ -119,11 +119,11 @@
throw e
} finally{
if(DELETE_STACK.toBoolean() && ENVIRONMENT_IP == ""){
- mcpEnvJob = build(job: "delete-heat-stack-for-mcp-env", parameters: [
+ mcpEnvJob = build(job: "delete-heat-stack-for-mcp-env", wait: false, parameters: [
[$class: 'StringParameterValue', name: 'OS_PROJECT_NAME', value: 'mcp-mk'],
[$class: 'StringParameterValue', name: 'STACK_NAME', value: 'jenkins-drivetrain-test-' + currentBuild.number],
])
}
}
}
-}
\ No newline at end of file
+}
diff --git a/test-salt-formulas-env.groovy b/test-salt-formulas-env.groovy
index be9c894..e2dbf83 100644
--- a/test-salt-formulas-env.groovy
+++ b/test-salt-formulas-env.groovy
@@ -64,8 +64,8 @@
common.infoMsg("Running part of kitchen test")
if (KITCHEN_ENV != null && !KITCHEN_ENV.isEmpty() && KITCHEN_ENV != "") {
def cleanEnv = KITCHEN_ENV.replaceAll("\\s?SUITE=[^\\s]*", "")
- sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
- sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mirantis.net:8085/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
+ sh("find . -type f -exec sed -i 's/apt-mk.mirantis.com/apt.mcp.mirantis.net/g' {} \\;")
def suite = ruby.getSuiteName(KITCHEN_ENV)
if (suite && suite != "") {
common.infoMsg("Running kitchen test with environment:" + KITCHEN_ENV.trim())
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
index 8c4d907..566caa9 100644
--- a/upgrade-mcp-release.groovy
+++ b/upgrade-mcp-release.groovy
@@ -85,31 +85,27 @@
stage("Update Reclass"){
def cluster_name = salt.getPillar(venvPepper, 'I@salt:master', "_param:cluster_name").get("return")[0].values()[0]
- if(UPDATE_CLUSTER_MODEL.toBoolean()){
- try{
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
- }
- catch(Exception ex){
- error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
- }
- def dateTime = common.getDatetime()
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && grep -r --exclude-dir=aptly -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
- common.infoMsg("The following changes were made to the cluster model and will be commited. Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && git add -u && git commit --allow-empty -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
- }
-
try{
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git diff-index --quiet HEAD --")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/ && git diff-index --quiet HEAD --")
}
catch(Exception ex){
- error("You have unstaged changes in your Reclass system model repository. Please reset them and rerun the pipeline.")
+ error("You have uncommited changes in your Reclass cluster model repository. Please commit or reset them and rerun the pipeline.")
}
- salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
- // Add new defaults
- common.infoMsg("Add new defaults")
- salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/*/infra/init.yml || " +
- "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/*/infra/init.yml")
+ if(UPDATE_CLUSTER_MODEL.toBoolean()){
+ def dateTime = common.getDatetime()
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && " +
+ "grep -r --exclude-dir=apty -l 'apt_mk_version: .*' * | xargs sed -i 's/apt_mk_version: .*/apt_mk_version: \"$MCP_VERSION\"/g'")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/system && git checkout $gitMcpVersion")
+ // Add new defaults
+ common.infoMsg("Add new defaults")
+ salt.cmdRun(venvPepper, 'I@salt:master', "grep '^- system.defaults\$' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml || " +
+ "sed -i 's/^classes:/classes:\\n- system.defaults/' /srv/salt/reclass/classes/cluster/$cluster_name/infra/init.yml")
+ common.infoMsg("The following changes were made to the cluster model and will be commited. " +
+ "Please consider if you want to push them to the remote repository or not. You have to do this manually when the run is finished.")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git diff")
+ salt.cmdRun(venvPepper, 'I@salt:master', "cd /srv/salt/reclass/classes/cluster/$cluster_name && git status && " +
+ "git add -u && git commit --allow-empty -m 'Cluster model update to the release $MCP_VERSION on $dateTime'")
+ }
salt.enforceState(venvPepper, 'I@salt:master', 'reclass.storage', true)
}