Merge "Fixed custom install of kitchen tests envs"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
new file mode 100644
index 0000000..799b806
--- /dev/null
+++ b/cloud-deploy-pipeline.groovy
@@ -0,0 +1,315 @@
+/**
+ *
+ * Launch heat/cloudformation stack
+ *
+ * Expected parameters:
+ * STACK_NAME Infrastructure stack name
+ * STACK_TEMPLATE Stack HOT/CFN template
+ * STACK_TYPE Deploy OpenStack/AWS [heat/aws]
+ *
+ * STACK_TEMPLATE_URL URL to git repo with stack templates
+ * STACK_TEMPLATE_CREDENTIALS Credentials to the templates repo
+ * STACK_TEMPLATE_BRANCH Stack templates repo branch
+ *
+ * STACK_DELETE Delete stack when finished (bool)
+ * STACK_REUSE Reuse existing stack (don't create one)
+ * STACK_INSTALL What should be installed (k8s, openstack, ...)
+ * STACK_TEST Run tests (bool)
+ * STACK_CLEANUP_JOB Name of job for deleting stack
+ *
+ * AWS_STACK_REGION CloudFormation AWS region
+ * AWS_API_CREDENTIALS AWS Access key ID with AWS secret access key
+ *
+ * HEAT_STACK_ENVIRONMENT Heat stack environmental parameters
+ * HEAT_STACK_ZONE Heat stack availability zone
+ * HEAT_STACK_PUBLIC_NET Heat stack floating IP pool
+ * OPENSTACK_API_URL OpenStack API address
+ * OPENSTACK_API_CREDENTIALS Credentials to the OpenStack API
+ * OPENSTACK_API_PROJECT OpenStack project to connect to
+ * OPENSTACK_API_CLIENT Versions of OpenStack python clients
+ * OPENSTACK_API_VERSION Version of the OpenStack API (2/3)
+ *
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API
+ * SALT_MASTER_URL URL of Salt master
+ *
+ * K8S_API_SERVER Kubernetes API address
+ * K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
+ *
+ * TEMPEST_IMAGE_LINK Tempest image link
+ *
+ */
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+openstack = new com.mirantis.mk.Openstack()
+orchestrate = new com.mirantis.mk.Orchestrate()
+salt = new com.mirantis.mk.Salt()
+test = new com.mirantis.mk.Test()
+
+_MAX_PERMITTED_STACKS = 2
+overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
+
+timestamps {
+ node {
+ try {
+ //
+ // Prepare machines
+ //
+ stage ('Create infrastructure') {
+
+ if (STACK_TYPE == 'heat') {
+ // value defaults
+ def openstackCloud
+ def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
+ def openstackEnv = "${env.WORKSPACE}/venv"
+
+ if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
+ error("If you want to reuse existing stack you need to provide it's name")
+ }
+
+ if (STACK_REUSE.toBoolean() == false) {
+ // Don't allow to set custom heat stack name
+ wrap([$class: 'BuildUser']) {
+ if (env.BUILD_USER_ID) {
+ STACK_NAME = "${env.BUILD_USER_ID}-${JOB_NAME}-${BUILD_NUMBER}"
+ } else {
+ STACK_NAME = "jenkins-${JOB_NAME}-${BUILD_NUMBER}"
+ }
+ currentBuild.description = STACK_NAME
+ }
+ }
+
+ // set description
+ currentBuild.description = "${STACK_NAME}"
+
+ // get templates
+ git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
+
+ // create openstack env
+ openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+ openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
+ openstack.getKeystoneToken(openstackCloud, openstackEnv)
+ //
+ // Verify possibility of create stack for given user and stack type
+ //
+ wrap([$class: 'BuildUser']) {
+ if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
+ def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+ if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
+ STACK_DELETE = "false"
+ throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
+ }
+ }
+ }
+ // launch stack
+ if (STACK_REUSE.toBoolean() == false) {
+ stage('Launch new Heat stack') {
+ // create stack
+ envParams = [
+ 'instance_zone': HEAT_STACK_ZONE,
+ 'public_net': HEAT_STACK_PUBLIC_NET
+ ]
+ openstack.createHeatStack(openstackCloud, STACK_NAME, HEAT_STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv)
+ }
+ }
+
+ // get SALT_MASTER_URL
+ saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
+ currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
+
+ SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+ }
+
+ if (STACK_TYPE == 'aws') {
+ saltMasterHost = ''
+ currentBuild.description = "${STACK_NAME}: ${saltMasterHost}"
+ SALT_MASTER_URL = "http://${saltMasterHost}:6969"
+ }
+
+ }
+
+ //
+ // Connect to Salt master
+ //
+
+ def saltMaster
+ stage('Connect to Salt API') {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ //
+ // Install
+ //
+
+ if (common.checkContains('STACK_INSTALL', 'core')) {
+ stage('Install core infrastructure') {
+ orchestrate.installFoundationInfra(saltMaster)
+
+ if (common.checkContains('STACK_INSTALL', 'kvm')) {
+ orchestrate.installInfraKvm(saltMaster)
+ orchestrate.installFoundationInfra(saltMaster)
+ }
+
+ orchestrate.validateFoundationInfra(saltMaster)
+ }
+ }
+
+ // install k8s
+ if (common.checkContains('STACK_INSTALL', 'k8s')) {
+ stage('Install Kubernetes infra') {
+ orchestrate.installKubernetesInfra(saltMaster)
+ }
+
+ stage('Install Kubernetes control') {
+
+ // Overwrite Kubernetes vars if specified
+ if (env.getEnvironment().containsKey("KUBERNETES_HYPERKUBE_IMAGE")) {
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}")
+ }
+
+ orchestrate.installKubernetesControl(saltMaster)
+ }
+
+
+ if (common.checkContains('STACK_INSTALL', 'contrail')) {
+ state('Install Contrail for Kubernetes') {
+ orchestrate.installContrailNetwork(saltMaster)
+ orchestrate.installContrailCompute(saltMaster)
+ }
+ }
+ }
+
+ // install openstack
+ if (common.checkContains('STACK_INSTALL', 'openstack')) {
+ // install Infra and control, tests, ...
+
+ stage('Install OpenStack infra') {
+ orchestrate.installOpenstackInfra(saltMaster)
+ }
+
+ stage('Install OpenStack control') {
+ orchestrate.installOpenstackControl(saltMaster)
+ }
+
+ stage('Install OpenStack network') {
+
+ if (common.checkContains('STACK_INSTALL', 'contrail')) {
+ orchestrate.installContrailNetwork(saltMaster)
+ } else if (common.checkContains('STACK_INSTALL', 'ovs')) {
+ orchestrate.installOpenstackNetwork(saltMaster)
+ }
+
+ salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; neutron net-list'])
+ salt.runSaltProcessStep(saltMaster, 'I@keystone:server', 'cmd.run', ['. /root/keystonerc; nova net-list'])
+ }
+
+ stage('Install OpenStack compute') {
+ orchestrate.installOpenstackCompute(saltMaster)
+
+ if (common.checkContains('STACK_INSTALL', 'contrail')) {
+ orchestrate.installContrailCompute(saltMaster)
+ }
+ }
+
+ }
+
+
+ if (common.checkContains('STACK_INSTALL', 'stacklight')) {
+ stage('Install StackLight') {
+ orchestrate.installStacklightControl(saltMaster)
+ orchestrate.installStacklightClient(saltMaster)
+ }
+ }
+
+ //
+ // Test
+ //
+ def artifacts_dir = '_artifacts/'
+
+ if (common.checkContains('STACK_TEST', 'k8s')) {
+ stage('Run k8s bootstrap tests') {
+ def image = 'tomkukral/k8s-scripts'
+ def output_file = image.replaceAll('/', '-') + '.output'
+
+ // run image
+ test.runConformanceTests(saltMaster, K8S_API_SERVER, image)
+
+ // collect output
+ sh "mkdir -p ${artifacts_dir}"
+ file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+ writeFile file: "${artifacts_dir}${output_file}", text: file_content
+ sh "cat ${artifacts_dir}${output_file}"
+
+ // collect artifacts
+ archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+ }
+
+ stage('Run k8s conformance e2e tests') {
+ //test.runConformanceTests(saltMaster, K8S_API_SERVER, K8S_CONFORMANCE_IMAGE)
+
+ def image = K8S_CONFORMANCE_IMAGE
+ def output_file = image.replaceAll('/', '-') + '.output'
+
+ // run image
+ test.runConformanceTests(saltMaster, K8S_API_SERVER, image)
+
+ // collect output
+ sh "mkdir -p ${artifacts_dir}"
+ file_content = salt.getFileContent(saltMaster, 'ctl01*', '/tmp/' + output_file)
+ writeFile file: "${artifacts_dir}${output_file}", text: file_content
+ sh "cat ${artifacts_dir}${output_file}"
+
+ // collect artifacts
+ archiveArtifacts artifacts: "${artifacts_dir}${output_file}"
+ }
+ }
+
+ if (common.checkContains('STACK_TEST', 'openstack')) {
+ stage('Run deployment tests') {
+ test.runTempestTests(saltMaster, TEMPEST_IMAGE_LINK)
+ }
+
+ stage('Copy test results to config node') {
+ test.copyTempestResults(saltMaster)
+ }
+ }
+
+ stage('Finalize') {
+ if (STACK_INSTALL != '') {
+ try {
+ salt.runSaltProcessStep(saltMaster, '*', 'state.apply', [], null, true)
+ } catch (Exception e) {
+ common.warningMsg('State apply failed but we should continue to run')
+ }
+ }
+ }
+ } catch (Throwable e) {
+ currentBuild.result = 'FAILURE'
+ throw e
+ } finally {
+
+
+ //
+ // Clean
+ //
+
+ if (STACK_TYPE == 'heat') {
+ // send notification
+ common.sendNotification(currentBuild.result, STACK_NAME, ["slack"])
+
+ if (STACK_DELETE.toBoolean() == true) {
+ common.errorMsg('Heat job cleanup triggered')
+ stage('Trigger cleanup job') {
+ build job: STACK_CLEANUP_JOB, parameters: [[$class: 'StringParameterValue', name: 'STACK_NAME', value: STACK_NAME]]
+ }
+ } else {
+ if (currentBuild.result == 'FAILURE') {
+ common.errorMsg("Deploy job FAILED and was not deleted. Please fix the problem and delete stack on you own.")
+ if (SALT_MASTER_URL) {
+ common.errorMsg("Salt master URL: ${SALT_MASTER_URL}")
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index fb8559c..5cb11db 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -37,6 +37,9 @@
*
* optional parameters for overwriting soft params
* KUBERNETES_HYPERKUBE_IMAGE Docker repository and tag for hyperkube image
+ * CALICO_CNI_IMAGE Docker repository and tag for calico CNI image
+ * CALICO_NODE_IMAGE Docker repository and tag for calico node image
+ * CALICOCTL_IMAGE Docker repository and tag for calicoctl image
*
*/
@@ -160,6 +163,16 @@
if (env.getEnvironment().containsKey("KUBERNETES_HYPERKUBE_IMAGE")) {
salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_hyperkube_image: ${KUBERNETES_HYPERKUBE_IMAGE}")
}
+ // Overwrite Calico vars if specified
+ if (env.getEnvironment().containsKey("CALICO_CNI_IMAGE")) {
+ salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calico_cni_image: ${CALICO_CNI_IMAGE}")
+ }
+ if (env.getEnvironment().containsKey("CALICO_NODE_IMAGE")) {
+ salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calico_node_image: ${CALICO_NODE_IMAGE}")
+ }
+ if (env.getEnvironment().containsKey("CALICOCTL_IMAGE")) {
+ salt.runSaltProcessStep(saltmaster, 'I@salt:master', 'file.append', overwriteFile, " kubernetes_calicoctl_image: ${CALICOCTL_IMAGE}")
+ }
orchestrate.installKubernetesControl(saltMaster)
}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 9e6fd01..60fa160 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -26,11 +26,15 @@
if (STAGE_TEST_UPGRADE.toBoolean() == true) {
stage('Test upgrade') {
- //salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
- // salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
- // salt '*' saltutil.sync_all
- // salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+ try {
+ salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
+ } catch (Exception e) {
+ common.warningMsg(" Some parts of Reclass state failed. The most probable reasons were uncommited changes. We should continue to run")
+ }
+
+ salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+ salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
@@ -46,12 +50,8 @@
_pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
def kvm01 = _pillar['return'][0].values()[0].values()[0]
- def kvm03 = _pillar['return'][0].values()[2].values()[0]
- def kvm02 = _pillar['return'][0].values()[1].values()[0]
print(_pillar)
print(kvm01)
- print(kvm02)
- print(kvm03)
_pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
def upgNodeProvider = _pillar['return'][0].values()[0]
@@ -131,7 +131,7 @@
try {
salt.enforceState(saltMaster, 'upg*', 'keystone.server')
} catch (Exception e) {
- common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+ common.warningMsg('Restarting Apache2')
salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
}
try {
@@ -153,6 +153,13 @@
common.warningMsg('running nova state again')
salt.enforceState(saltMaster, 'upg*', 'nova')
}
+ // run nova state again as sometimes nova does not enforce itself for some reason
+ try {
+ salt.enforceState(saltMaster, 'upg*', 'nova')
+ } catch (Exception e) {
+ common.warningMsg('running nova state again')
+ salt.enforceState(saltMaster, 'upg*', 'nova')
+ }
try {
salt.enforceState(saltMaster, 'upg*', 'cinder')
} catch (Exception e) {
@@ -172,12 +179,12 @@
salt.enforceState(saltMaster, 'upg*', 'heat')
}
salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
- }
- }
- if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Do you want to continue with upgrade?"
+ if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
+ stage('Ask for manual confirmation') {
+ input message: "Do you want to continue with upgrade?"
+ }
+ }
}
}
@@ -192,12 +199,10 @@
_pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
kvm01 = _pillar['return'][0].values()[0].values()[0]
- kvm03 = _pillar['return'][0].values()[2].values()[0]
- kvm02 = _pillar['return'][0].values()[1].values()[0]
print(_pillar)
print(kvm01)
- print(kvm02)
- print(kvm03)
+
+ def errorOccured = false
_pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
def ctl01NodeProvider = _pillar['return'][0].values()[0]
@@ -293,7 +298,7 @@
try {
salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
} catch (Exception e) {
- common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+ common.warningMsg('Restarting Apache2 and enforcing keystone.server state again')
salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
}
@@ -342,6 +347,7 @@
}
} catch (Exception e) {
+ errorOccured = true
common.warningMsg('Some states that require syncdb failed. Restoring production databases')
databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
if(databases && databases != ""){
@@ -355,41 +361,41 @@
}
salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
}else{
- common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
+ common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases, remove none upgrade database files from /root/mysql/flags/ and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
}
common.errorMsg("Stage Real control upgrade failed")
}
-
- // salt 'cmp*' cmd.run 'service nova-compute restart'
- salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+ if(!errorOccured){
+ // salt 'cmp*' cmd.run 'service nova-compute restart'
+ salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
- // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
- // salt 'ctl*' state.sls keepalived
- // salt 'prx*' state.sls keepalived
- salt.enforceState(saltMaster, 'prx*', 'keepalived')
- // salt 'prx*' state.sls horizon
- salt.enforceState(saltMaster, 'prx*', 'horizon')
- // salt 'prx*' state.sls nginx
- salt.enforceState(saltMaster, 'prx*', 'nginx')
+ // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
+ // salt 'ctl*' state.sls keepalived
+ // salt 'prx*' state.sls keepalived
+ salt.enforceState(saltMaster, 'prx*', 'keepalived')
+ // salt 'prx*' state.sls horizon
+ salt.enforceState(saltMaster, 'prx*', 'horizon')
+ // salt 'prx*' state.sls nginx
+ salt.enforceState(saltMaster, 'prx*', 'nginx')
- salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+ }
}
- }
-
-
- if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
- stage('Ask for manual confirmation') {
- input message: "Please verify that control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
- }
- stage('Ask for manual confirmation') {
- input message: "Do you really want to continue with the rollback?"
+ if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+ stage('Ask for manual confirmation') {
+ input message: "Please verify if the control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
+ }
}
}
if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
stage('Rollback upgrade') {
+ stage('Ask for manual confirmation') {
+ input message: "Do you really want to continue with the rollback?"
+ }
+
_pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
domain = _pillar['return'][0].values()[0].values()[0]
print(_pillar)
@@ -397,12 +403,8 @@
_pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
kvm01 = _pillar['return'][0].values()[0].values()[0]
- kvm03 = _pillar['return'][0].values()[2].values()[0]
- kvm02 = _pillar['return'][0].values()[1].values()[0]
print(_pillar)
print(kvm01)
- print(kvm02)
- print(kvm03)
_pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
def ctl01NodeProvider = _pillar['return'][0].values()[0]