Merge "Use SALT_OVERRIDES parameter"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 69f75d0..6348164 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -53,16 +53,20 @@
_MAX_PERMITTED_STACKS = 2
overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
+// Define global variables
def saltMaster
+def venv
if (STACK_TYPE == 'aws') {
- venv_path = 'aws_venv'
- env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+ def aws_env_vars
}
timestamps {
node {
try {
+ // Set build-specific variables
+ venv = "${env.WORKSPACE}/venv"
+
//
// Prepare machines
//
@@ -72,7 +76,6 @@
// value defaults
def openstackCloud
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
- def openstackEnv = "${env.WORKSPACE}/venv"
if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
error("If you want to reuse existing stack you need to provide it's name")
@@ -97,15 +100,15 @@
git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
// create openstack env
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+ openstack.setupOpenstackVirtualenv(venv, openstackVersion)
openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
- openstack.getKeystoneToken(openstackCloud, openstackEnv)
+ openstack.getKeystoneToken(openstackCloud, venv)
//
// Verify possibility of create stack for given user and stack type
//
wrap([$class: 'BuildUser']) {
if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
- def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+ def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", venv)
if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
STACK_DELETE = "false"
throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
@@ -120,17 +123,19 @@
'instance_zone': HEAT_STACK_ZONE,
'public_net': HEAT_STACK_PUBLIC_NET
]
- openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv, false)
+ openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, false)
}
}
// get SALT_MASTER_URL
- saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
+ saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
SALT_MASTER_URL = "http://${saltMasterHost}:6969"
} else if (STACK_TYPE == 'aws') {
+ // set aws_env_vars
+ aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
error("If you want to reuse existing stack you need to provide it's name")
@@ -158,7 +163,7 @@
git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
// setup environment
- aws.setupVirtualEnv(venv_path)
+ aws.setupVirtualEnv(venv)
// start stack
def stack_params = [
@@ -166,14 +171,14 @@
"ParameterKey=CmpNodeCount,ParameterValue=" + STACK_COMPUTE_COUNT
]
def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
- aws.createStack(venv_path, env_vars, template_file, STACK_NAME, stack_params)
+ aws.createStack(venv, aws_env_vars, template_file, STACK_NAME, stack_params)
}
// wait for stack to be ready
- aws.waitForStatus(venv_path, env_vars, STACK_NAME, 'CREATE_COMPLETE')
+ aws.waitForStatus(venv, aws_env_vars, STACK_NAME, 'CREATE_COMPLETE')
// get outputs
- saltMasterHost = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'SaltMasterIP')
+ saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
SALT_MASTER_URL = "http://${saltMasterHost}:6969"
@@ -212,10 +217,12 @@
// install k8s
if (common.checkContains('STACK_INSTALL', 'k8s')) {
stage('Install Kubernetes infra') {
- // configure kubernetes_control_address - save loadbalancer
- def kubernetes_control_address = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ControlLoadBalancer')
- print(kubernetes_control_address)
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+ if (STACK_TYPE == 'aws') {
+ // configure kubernetes_control_address - save loadbalancer
+ def kubernetes_control_address = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ControlLoadBalancer')
+ print(kubernetes_control_address)
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+ }
// ensure certificates are generated properly
salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
@@ -227,21 +234,19 @@
stage('Install Kubernetes control') {
orchestrate.installKubernetesControl(saltMaster)
-
}
stage('Scale Kubernetes computes') {
if (STACK_COMPUTE_COUNT > 0) {
if (STACK_TYPE == 'aws') {
-
// get stack info
- def scaling_group = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ComputesScalingGroup')
+ def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
//update autoscaling group
- aws.updateAutoscalingGroup(venv_path, env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+ aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
// wait for computes to boot up
- aws.waitForAutoscalingInstances(venv_path, env_vars, scaling_group)
+ aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
sleep(60)
}
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index f7960c8..fa98b9a 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -120,6 +120,11 @@
}
}
+ dir("${env.WORKSPACE}") {
+ sh(returnStatus: true, script: "tar -zcvf model.tar.gz -C model .")
+ archiveArtifacts artifacts: "model.tar.gz"
+ }
+
stage("test-nodes") {
def partitions = common.partitionList(contextFileList, PARALLEL_NODE_GROUP_SIZE.toInteger())
def buildSteps = [:]
@@ -135,13 +140,14 @@
common.serial(buildSteps)
}
+ stage ('Clean workspace directories') {
+ sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
+ }
+
} catch (Throwable e) {
currentBuild.result = "FAILURE"
throw e
} finally {
- stage ('Clean workspace directories') {
- sh(returnStatus: true, script: "rm -rfv * > /dev/null || true")
- }
common.sendNotification(currentBuild.result,"",["slack"])
}
}