Merge "Use image name instead of the gerrit project name"
diff --git a/cicd-lab-pipeline.groovy b/cicd-lab-pipeline.groovy
index df4b477..7cb6f55 100644
--- a/cicd-lab-pipeline.groovy
+++ b/cicd-lab-pipeline.groovy
@@ -303,7 +303,7 @@
// Cleanup
if (HEAT_STACK_DELETE.toBoolean() == true) {
stage('Trigger cleanup job') {
- build job: 'deploy-heat-cleanup', parameters: [[$class: 'StringParameterValue', name: 'HEAT_STACK_NAME', value: HEAT_STACK_NAME]]
+ build job: 'deploy-stack-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: HEAT_STACK_NAME]]
}
}
}
diff --git a/cleanup-pipeline.groovy b/cleanup-pipeline.groovy
index 8b04990..4b67213 100644
--- a/cleanup-pipeline.groovy
+++ b/cleanup-pipeline.groovy
@@ -2,41 +2,71 @@
*
* Delete heat stack pipeline
*
- * Expected parameters:
- * OPENSTACK_API_URL OpenStack API address
- * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
- * OPENSTACK_API_PROJECT OpenStack project to connect to
- * OPENSTACK_API_CLIENT Versions of OpenStack python clients
- * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
- * HEAT_STACK_NAME Heat stack name
+ * General
+ * STACK_NAME Heat stack name
+ * STACK_TYPE Type of the stack (heat, aws)
*
+ * Heat parameters:
+ * OPENSTACK_API_URL OpenStack API address
+ * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
+ * OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_API_CLIENT Versions of OpenStack python clients
+ * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
+ *
+ * AWS parameters:
+ * AWS_API_CREDENTIALS Credentials id AWS EC2 API
+ * AWS_DEFAULT_REGION EC2 region
*/
+
common = new com.mirantis.mk.Common()
git = new com.mirantis.mk.Git()
openstack = new com.mirantis.mk.Openstack()
+aws = new com.mirantis.mk.Aws()
salt = new com.mirantis.mk.Salt()
node {
- // connection objects
- def openstackCloud
+ def venv_path = "${env.WORKSPACE}/venv"
+ def env_vars
- // value defaults
- def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
- def openstackEnv = "${env.WORKSPACE}/venv"
-
- stage('Install OpenStack env') {
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+ // default STACK_TYPE is heat
+ if (!env.getEnvironment().containsKey("STACK_TYPE") || STACK_TYPE == '') {
+ STACK_TYPE = 'heat'
}
- stage('Connect to OpenStack cloud') {
- openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
- openstack.getKeystoneToken(openstackCloud, openstackEnv)
+ stage('Install environment') {
+ if (STACK_TYPE == 'heat') {
+
+ def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+ openstack.setupOpenstackVirtualenv(venv_path, openstackVersion)
+
+ } else if (STACK_TYPE == 'aws') {
+
+ env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+ aws.setupVirtualEnv(venv_path)
+
+ } else {
+ throw new Exception('Stack type is not supported')
+ }
+
}
- stage('Delete Heat stack') {
- common.infoMsg("Deleting Heat Stack " + HEAT_STACK_NAME)
- openstack.deleteHeatStack(openstackCloud, HEAT_STACK_NAME, openstackEnv)
+ stage('Delete stack') {
+ if (STACK_TYPE == 'heat') {
+ def openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+ openstack.getKeystoneToken(openstackCloud, venv_path)
+
+ common.infoMsg("Deleting Heat Stack " + STACK_NAME)
+ openstack.deleteHeatStack(openstackCloud, STACK_NAME, venv_path)
+ } else if (STACK_TYPE == 'aws') {
+
+ aws.deteteStack(venv_path, env_vars, STACK_NAME)
+ aws.waitForStatus(venv_path, evn_vars, STACK_NAME, 'DELETE_COMPLETE', ['DELETE_FAILED'])
+
+ } else {
+ throw new Exception('Stack type is not supported')
+ }
+
}
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index b34db68..44a536a 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -344,7 +344,7 @@
if (STACK_DELETE.toBoolean() == true) {
common.errorMsg('Heat job cleanup triggered')
stage('Trigger cleanup job') {
- build job: 'deploy-heat-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
+ build job: 'deploy-stack-cleanup', parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
}
} else {
if (currentBuild.result == 'FAILURE') {
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index a1e1082..1b7facb 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -23,6 +23,9 @@
* OPENSTACK_API_URL OpenStack API address
* OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
* OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_PROJECT_DOMAIN Domain for OpenStack project
+ * OPENSTACK_PROJECT_ID ID for OpenStack project
+ * OPENSTACK_USER_DOMAIN Domain for OpenStack user
* OPENSTACK_API_CLIENT Versions of OpenStack python clients
* OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
*
@@ -108,7 +111,11 @@
// create openstack env
openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
- openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+ openstackCloud = openstack.createOpenstackEnv(
+ OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS,
+ OPENSTACK_API_PROJECT,OPENSTACK_PROJECT_DOMAIN,
+ OPENSTACK_PROJECT_ID, OPENSTACK_USER_DOMAIN,
+ OPENSTACK_API_VERSION)
openstack.getKeystoneToken(openstackCloud, openstackEnv)
//
// Verify possibility of create stack for given user and stack type
diff --git a/mk-k8s-simple-deploy-pipeline.groovy b/mk-k8s-simple-deploy-pipeline.groovy
index 4aae816..b86e6da 100644
--- a/mk-k8s-simple-deploy-pipeline.groovy
+++ b/mk-k8s-simple-deploy-pipeline.groovy
@@ -55,7 +55,7 @@
stage('Connect to OpenStack cloud') {
openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT,
- OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID)
+ "", OPENSTACK_API_PROJECT_DOMAIN_ID, OPENSTACK_API_USER_DOMAIN_ID, OPENSTACK_API_VERSION)
openstack.getKeystoneToken(openstackCloud, openstackEnv)
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 0d8d319..b1e9d2f 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -1,10 +1,11 @@
common = new com.mirantis.mk.Common()
+gerrit = new com.mirantis.mk.Gerrit()
git = new com.mirantis.mk.Git()
python = new com.mirantis.mk.Python()
saltModelTesting = new com.mirantis.mk.SaltModelTesting()
-def generateSaltMaster(modelEnv) {
- def nodeFile = "${modelEnv}/nodes/cfg01.${clusterDomain}.yml"
+def generateSaltMaster(modEnv, clusterDomain, clusterName) {
+ def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
def nodeString = """classes:
- cluster.${clusterName}.infra.config
parameters:
@@ -16,57 +17,71 @@
name: cfg01
domain: ${clusterDomain}
"""
- sh "mkdir -p ${modelEnv}/nodes/"
+ sh "mkdir -p ${modEnv}/nodes/"
+ println "Create file ${nodeFile}"
writeFile(file: nodeFile, text: nodeString)
}
-def generate(contextFile) {
- def templateEnv = "${env.WORKSPACE}/template"
- def baseName = sh(script: "basename ${contextFile} .yml", returnStdout: true)
- def modelEnv = "${env.WORKSPACE}/model-${baseName}"
- def cookiecutterTemplateContext = readFile(file: "${env.WORKSPACE}/contexts/contextFile")
- def templateContext = readYaml text: cookiecutterTemplateContext
+def generateModel(modelFile, cutterEnv) {
+ def templateEnv = "${env.WORKSPACE}"
+ def modelEnv = "${env.WORKSPACE}/model"
+ def basename = sh(script: "basename ${modelFile} .yml", returnStdout: true).trim()
+ def generatedModel = "${modelEnv}/${basename}"
+ def testEnv = "${env.WORKSPACE}/test"
+ def content = readFile(file: "${templateEnv}/contexts/${modelFile}")
+ def templateContext = readYaml text: content
def clusterDomain = templateContext.default_context.cluster_domain
def clusterName = templateContext.default_context.cluster_name
- def cutterEnv = "${env.WORKSPACE}/cutter"
- def jinjaEnv = "${env.WORKSPACE}/jinja"
- def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
+ def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
def targetBranch = "feature/${clusterName}"
- def templateBaseDir = "${env.WORKSPACE}/template"
- def templateDir = "${templateEnv}/template/dir"
+ def templateBaseDir = "${env.WORKSPACE}"
+ def templateDir = "${templateEnv}/dir"
def templateOutputDir = templateBaseDir
- sh("rm -rf ${templateBaseDir} || true")
+ sh "rm -rf ${generatedModel} || true"
def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "stacklight"]
for (product in productList) {
- def stagename = (product == "infra") ? "Generate base infrastructure" : "Generate product ${product}"
- println stagename
if (product == "infra" || (templateContext.default_context["${product}_enabled"]
&& templateContext.default_context["${product}_enabled"].toBoolean())) {
templateDir = "${templateEnv}/cluster_product/${product}"
- templateOutputDir = "${env.WORKSPACE}/template/output/${product}"
+ templateOutputDir = "${env.WORKSPACE}/output/${product}"
+ sh "rm -rf ${templateOutputDir} || true"
sh "mkdir -p ${templateOutputDir}"
sh "mkdir -p ${outputDestination}"
- python.setupCookiecutterVirtualenv(cutterEnv)
- python.buildCookiecutterTemplate(templateDir, cookiecutterTemplateContext, templateOutputDir, cutterEnv, templateBaseDir)
+ python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, cutterEnv, templateBaseDir)
sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
}
}
- generateSaltMaster(modelEnv)
+ generateSaltMaster(generatedModel, clusterDomain, clusterName)
}
-def testModel(contextFile) {
- def baseName = sh(script: "basename ${contextFile} .yml", returnStdout: true)
- def modelEnv = "${env.WORKSPACE}/model-${baseName}"
- git.checkoutGitRepository("${modelEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, RECLASS_MODEL_CREDENTIALS)
- saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", modelEnv)
+def testModel(modelFile, testEnv) {
+ def templateEnv = "${env.WORKSPACE}"
+ def content = readFile(file: "${templateEnv}/contexts/${modelFile}.yml")
+ def templateContext = readYaml text: content
+ def clusterDomain = templateContext.default_context.cluster_domain
+ git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+ saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", "", testEnv)
+}
+
+def gerritRef
+try {
+ gerritRef = GERRIT_REFSPEC
+} catch (MissingPropertyException e) {
+ gerritRef = null
}
timestamps {
node("python&&docker") {
- def templateEnv = "${env.WORKSPACE}/template"
+ def templateEnv = "${env.WORKSPACE}"
+ def cutterEnv = "${env.WORKSPACE}/cutter"
+ def jinjaEnv = "${env.WORKSPACE}/jinja"
try {
+ stage("Cleanup") {
+ sh("rm -rf * || true")
+ }
+
stage ('Download Cookiecutter template') {
if (gerritRef) {
def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
@@ -79,29 +94,40 @@
common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
}
} else {
- gerrit.gerritPatchsetCheckout(COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, "HEAD", CREDENTIALS_ID)
+ git.checkoutGitRepository(templateEnv, COOKIECUTTER_TEMPLATE_URL, COOKIECUTTER_TEMPLATE_BRANCH, CREDENTIALS_ID)
}
}
+ stage("Setup") {
+ python.setupCookiecutterVirtualenv(cutterEnv)
+ }
+
def contextFiles
- dir("contexts") {
+ dir("${templateEnv}/contexts") {
contextFiles = findFiles(glob: "*.yml")
}
- for (contextFile in contextFiles) {
- generate(contextFile)
+ def contextFileList = []
+ for (int i = 0; i < contextFiles.size(); i++) {
+ contextFileList << contextFiles[i]
+ }
+
+ stage("generate-model") {
+ for (contextFile in contextFileList) {
+ generateModel(contextFile, cutterEnv)
+ }
}
stage("test-nodes") {
- def partitions = common.partitionList(contextFiles, 3)
+ def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
def buildSteps = [:]
for (int i = 0; i < partitions.size(); i++) {
def partition = partitions[i]
buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
for(int k = 0; k < partition.size; k++){
def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
- def modelEnv = "${env.WORKSPACE}/model-${baseName}"
- buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, "", modelEnv) })
+ def testEnv = "${env.WORKSPACE}/model/${basename}"
+ buildSteps.get("partition-${i}").put(basename, { testModel(basename, testEnv) })
}
}
common.serial(buildSteps)
@@ -112,7 +138,7 @@
throw e
} finally {
stage ('Clean workspace directories') {
- sh(returnStatus: true, script: "rm -rfv *")
+ sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
}
common.sendNotification(currentBuild.result,"",["slack"])
}
diff --git a/test-nodejs-pipeline.groovy b/test-nodejs-pipeline.groovy
index d659b7e..b57d351 100644
--- a/test-nodejs-pipeline.groovy
+++ b/test-nodejs-pipeline.groovy
@@ -39,7 +39,7 @@
}
def checkouted = false
-node("docker") {
+node("vm") {
def containerId
def uniqId
try {
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 6baa25e..4875a6c 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -41,9 +41,16 @@
stage("checkout") {
if (gerritRef) {
// job is triggered by Gerrit
- checkouted = gerrit.gerritPatchsetCheckout ([
- credentialsId : CREDENTIALS_ID
- ])
+ // test if change aren't already merged
+ def gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
+ def merged = gerritChange.status == "MERGED"
+ if(!merged){
+ checkouted = gerrit.gerritPatchsetCheckout ([
+ credentialsId : CREDENTIALS_ID
+ ])
+ } else{
+ common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
+ }
} else if(defaultGitRef && defaultGitUrl) {
checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
}
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 3ed8f61..1df659e 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -52,7 +52,7 @@
credentialsId : CREDENTIALS_ID
])
} else{
- common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to gate them")
+ common.successMsg("Change ${GERRIT_CHANGE_NUMBER} is already merged, no need to test them")
}
} else if(defaultGitRef && defaultGitUrl) {
checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
@@ -72,37 +72,38 @@
}
stage("test-nodes") {
- def workspace = common.getWorkspace()
- def nodes = sh(script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true).tokenize()
- def buildSteps = [:]
- if(nodes.size() > 1){
- if(nodes.size() <= 3 && PARALLEL_NODE_GROUP_SIZE.toInteger() != 1) {
- common.infoMsg("Found <=3 cfg nodes, running parallel test")
- for(int i=0; i < nodes.size();i++){
- def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
- buildSteps.put("node-${basename}", { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
- }
- parallel buildSteps
- }else{
- common.infoMsg("Found more than 3 cfg nodes or debug enabled, running parallel group test with ${PARALLEL_NODE_GROUP_SIZE} nodes")
- def partitions = common.partitionList(nodes, PARALLEL_NODE_GROUP_SIZE.toInteger())
- for (int i=0; i < partitions.size();i++) {
- def partition = partitions[i]
- buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
- for(int k=0; k < partition.size;k++){
- def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
- buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
+ if(!merged){
+ def workspace = common.getWorkspace()
+ def nodes = sh(script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true).tokenize()
+ def buildSteps = [:]
+ if(nodes.size() > 1){
+ if(nodes.size() <= 3 && PARALLEL_NODE_GROUP_SIZE.toInteger() != 1) {
+ common.infoMsg("Found <=3 cfg nodes, running parallel test")
+ for(int i=0; i < nodes.size();i++){
+ def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+ buildSteps.put("node-${basename}", { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
+ }
+ parallel buildSteps
+ }else{
+ common.infoMsg("Found more than 3 cfg nodes or debug enabled, running parallel group test with ${PARALLEL_NODE_GROUP_SIZE} nodes")
+ def partitions = common.partitionList(nodes, PARALLEL_NODE_GROUP_SIZE.toInteger())
+ for (int i=0; i < partitions.size();i++) {
+ def partition = partitions[i]
+ buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
+ for(int k=0; k < partition.size;k++){
+ def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+ buildSteps.get("partition-${i}").put(basename, { saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace) })
+ }
}
+ common.serial(buildSteps)
}
- common.serial(buildSteps)
- }
- }else{
- common.infoMsg("Found one cfg node, running single test")
- def basename = sh(script: "basename ${nodes[0]} .yml", returnStdout: true).trim()
- saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace)
+ }else{
+ common.infoMsg("Found one cfg node, running single test")
+ def basename = sh(script: "basename ${nodes[0]} .yml", returnStdout: true).trim()
+ saltModelTesting.setupAndTestNode(basename, EXTRA_FORMULAS, workspace)
+ }
}
}
-
} catch (Throwable e) {
// If there was an error or exception thrown, the build failed
currentBuild.result = "FAILURE"