Merge "Add case of infra salt-model"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..591ce2c
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,12 @@
+============
+Mk Pipelines
+============
+
+Jenkins Groovy scripts for MCP operation that reuse common `pipeline
+libraries <https://github.com/Mirantis/pipeline-library>`_.
+
+Licensing
+=========
+
+Unless specifically noted, all parts of this project are licensed
+under the Apache 2.0 `license <https://github.com/Mirantis/mk-pipelines/LICENSE>`_.
diff --git a/change-config.groovy b/change-config.groovy
index af1fe8b..44832ed 100644
--- a/change-config.groovy
+++ b/change-config.groovy
@@ -16,7 +16,6 @@
def salt = new com.mirantis.mk.Salt()
def saltMaster
-def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -39,7 +38,7 @@
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, targetAll)
+ minions = salt.getMinions(saltMaster, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 4b1a521..12db6d1 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -37,6 +37,7 @@
*
* K8S_API_SERVER Kubernetes API address
* K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
+ * SALT_OVERRIDES YAML with overrides for Salt deployment
*
* TEMPEST_IMAGE_LINK Tempest image link
*
@@ -52,16 +53,20 @@
_MAX_PERMITTED_STACKS = 2
overwriteFile = "/srv/salt/reclass/classes/cluster/override.yml"
+// Define global variables
def saltMaster
+def venv
if (STACK_TYPE == 'aws') {
- venv_path = 'aws_venv'
- env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
+ def aws_env_vars
}
timestamps {
node {
try {
+ // Set build-specific variables
+ venv = "${env.WORKSPACE}/venv"
+
//
// Prepare machines
//
@@ -71,7 +76,6 @@
// value defaults
def openstackCloud
def openstackVersion = OPENSTACK_API_CLIENT ? OPENSTACK_API_CLIENT : 'liberty'
- def openstackEnv = "${env.WORKSPACE}/venv"
if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
error("If you want to reuse existing stack you need to provide it's name")
@@ -96,15 +100,15 @@
git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
// create openstack env
- openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
+ openstack.setupOpenstackVirtualenv(venv, openstackVersion)
openstackCloud = openstack.createOpenstackEnv(OPENSTACK_API_URL, OPENSTACK_API_CREDENTIALS, OPENSTACK_API_PROJECT)
- openstack.getKeystoneToken(openstackCloud, openstackEnv)
+ openstack.getKeystoneToken(openstackCloud, venv)
//
// Verify possibility of create stack for given user and stack type
//
wrap([$class: 'BuildUser']) {
if (env.BUILD_USER_ID && !env.BUILD_USER_ID.equals("jenkins") && !STACK_REUSE.toBoolean()) {
- def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", openstackEnv)
+ def existingStacks = openstack.getStacksForNameContains(openstackCloud, "${env.BUILD_USER_ID}-${JOB_NAME}", venv)
if(existingStacks.size() >= _MAX_PERMITTED_STACKS){
STACK_DELETE = "false"
throw new Exception("You cannot create new stack, you already have ${_MAX_PERMITTED_STACKS} stacks of this type (${JOB_NAME}). \nStack names: ${existingStacks}")
@@ -119,17 +123,22 @@
'instance_zone': HEAT_STACK_ZONE,
'public_net': HEAT_STACK_PUBLIC_NET
]
- openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, openstackEnv, false)
+ openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv, false)
}
}
// get SALT_MASTER_URL
- saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', openstackEnv)
+ saltMasterHost = openstack.getHeatStackOutputParam(openstackCloud, STACK_NAME, 'salt_master_ip', venv)
currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
SALT_MASTER_URL = "http://${saltMasterHost}:6969"
} else if (STACK_TYPE == 'aws') {
+ // setup environment
+ aws.setupVirtualEnv(venv)
+
+ // set aws_env_vars
+ aws_env_vars = aws.getEnvVars(AWS_API_CREDENTIALS, AWS_STACK_REGION)
if (STACK_REUSE.toBoolean() == true && STACK_NAME == '') {
error("If you want to reuse existing stack you need to provide it's name")
@@ -156,23 +165,20 @@
// get templates
git.checkoutGitRepository('template', STACK_TEMPLATE_URL, STACK_TEMPLATE_BRANCH, STACK_TEMPLATE_CREDENTIALS)
- // setup environment
- aws.setupVirtualEnv(venv_path)
-
// start stack
def stack_params = [
"ParameterKey=KeyName,ParameterValue=" + AWS_SSH_KEY,
"ParameterKey=CmpNodeCount,ParameterValue=" + STACK_COMPUTE_COUNT
]
def template_file = 'cfn/' + STACK_TEMPLATE + '.yml'
- aws.createStack(venv_path, env_vars, template_file, STACK_NAME, stack_params)
+ aws.createStack(venv, aws_env_vars, template_file, STACK_NAME, stack_params)
}
// wait for stack to be ready
- aws.waitForStatus(venv_path, env_vars, STACK_NAME, 'CREATE_COMPLETE')
+ aws.waitForStatus(venv, aws_env_vars, STACK_NAME, 'CREATE_COMPLETE')
// get outputs
- saltMasterHost = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'SaltMasterIP')
+ saltMasterHost = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'SaltMasterIP')
currentBuild.description = "${STACK_NAME} ${saltMasterHost}"
SALT_MASTER_URL = "http://${saltMasterHost}:6969"
@@ -184,6 +190,13 @@
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ // Set up override params
+ if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
+ stage('Set Salt overrides') {
+ salt.setSaltOverrides(saltMaster, SALT_OVERRIDES)
+ }
+ }
+
//
// Install
//
@@ -203,12 +216,14 @@
// install k8s
if (common.checkContains('STACK_INSTALL', 'k8s')) {
- stage('Install Kubernetes infra') {
- // configure kubernetes_control_address - save loadbalancer
- def kubernetes_control_address = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ControlLoadBalancer')
- print(kubernetes_control_address)
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+ stage('Install Kubernetes infra') {
+ if (STACK_TYPE == 'aws') {
+ // configure kubernetes_control_address - save loadbalancer
+ def kubernetes_control_address = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ControlLoadBalancer')
+ print(kubernetes_control_address)
+ salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_control_address', kubernetes_control_address], null, true)
+ }
// ensure certificates are generated properly
salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
salt.enforceState(saltMaster, '*', ['salt.minion.cert'], true)
@@ -216,37 +231,35 @@
orchestrate.installKubernetesInfra(saltMaster)
}
+ if (common.checkContains('STACK_INSTALL', 'contrail')) {
+ stage('Install Contrail for Kubernetes') {
+ orchestrate.installContrailNetwork(saltMaster)
+ orchestrate.installKubernetesContrailCompute(saltMaster)
+ }
+ }
+
stage('Install Kubernetes control') {
orchestrate.installKubernetesControl(saltMaster)
-
}
stage('Scale Kubernetes computes') {
if (STACK_COMPUTE_COUNT > 0) {
if (STACK_TYPE == 'aws') {
-
// get stack info
- def scaling_group = aws.getOutputs(venv_path, env_vars, STACK_NAME, 'ComputesScalingGroup')
+ def scaling_group = aws.getOutputs(venv, aws_env_vars, STACK_NAME, 'ComputesScalingGroup')
//update autoscaling group
- aws.updateAutoscalingGroup(venv_path, env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
+ aws.updateAutoscalingGroup(venv, aws_env_vars, scaling_group, ["--desired-capacity " + STACK_COMPUTE_COUNT])
// wait for computes to boot up
- aws.waitForAutoscalingInstances(venv_path, env_vars, scaling_group)
+ aws.waitForAutoscalingInstances(venv, aws_env_vars, scaling_group)
sleep(60)
}
orchestrate.installKubernetesCompute(saltMaster)
}
}
-
- if (common.checkContains('STACK_INSTALL', 'contrail')) {
- stage('Install Contrail for Kubernetes') {
- orchestrate.installContrailNetwork(saltMaster)
- orchestrate.installContrailCompute(saltMaster)
- }
- }
}
// install openstack
@@ -284,10 +297,17 @@
}
+ if (common.checkContains('STACK_INSTALL', 'sl-legacy')) {
+ stage('Install StackLight v1') {
+ orchestrate.installStacklightv1Control(saltMaster)
+ orchestrate.installStacklightv1Client(saltMaster)
+ }
+ }
+
if (common.checkContains('STACK_INSTALL', 'stacklight')) {
stage('Install StackLight') {
- orchestrate.installStacklightControl(saltMaster)
- orchestrate.installStacklightClient(saltMaster)
+ orchestrate.installDockerSwarm(saltMaster)
+ orchestrate.installStacklight(saltMaster)
}
}
diff --git a/docker-build-to-jfrog.groovy b/docker-build-to-jfrog.groovy
deleted file mode 100644
index b16dc06..0000000
--- a/docker-build-to-jfrog.groovy
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Docker image build pipeline with push to JFrog
- * IMAGE_NAME - Image name
- * IMAGE_TAGS - Tag list for image, separated by space
- * CONTEXT_PATH - Path to build context directory
- * CREDENTIALS_ID - gerrit credentials id
- * DOCKERFILE_PATH - path to dockerfile in repository
- * DOCKER_REGISTRY - url to registry
- * PROJECT_NAMESPACE - in which namespace will be stored
-**/
-def artifactory = new com.mirantis.mcp.MCPArtifactory()
-def common = new com.mirantis.mk.Common()
-def gerrit = new com.mirantis.mk.Gerrit()
-
-
-node("docker") {
- def artifactoryServer = Artifactory.server("mcp-ci")
- def buildInfo = Artifactory.newBuildInfo()
-
- def projectNamespace = "mirantis/${PROJECT_NAMESPACE}"
-
- def dockerRepository = DOCKER_REGISTRY
- def docker_dev_repo = "docker-dev-local"
- def docker_prod_repo = "docker-prod-local"
- def dockerFileOption
-
- def buildTag = "oss-ci-docker-${BUILD_NUMBER}-${GERRIT_CHANGE_NUMBER}-${GERRIT_PATCHSET_NUMBER}"
-
- if (DOCKERFILE_PATH.trim() == ''){
- dockerFileOption = ''
- }
- else {
- dockerFileOption = "--file ${DOCKERFILE_PATH}"
- }
- def buildCmd = "docker build --tag ${buildTag} ${dockerFileOption} --rm ${CONTEXT_PATH}"
-
- def imageTagsList = IMAGE_TAGS.tokenize(" ")
- def workspace = common.getWorkspace()
-
- gerritChange = gerrit.getGerritChange(GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, CREDENTIALS_ID)
-
- try{
- stage("checkout") {
- gerrit.gerritPatchsetCheckout([
- credentialsId : CREDENTIALS_ID,
- withWipeOut : true,
- ])
- }
- stage("build image"){
- sh "${buildCmd}"
- imageTagsList << "${GERRIT_CHANGE_NUMBER}_${GERRIT_PATCHSET_NUMBER}"
- for (imageTag in imageTagsList) {
- sh "docker tag ${buildTag} ${dockerRepository}/${projectNamespace}/${IMAGE_NAME}:${imageTag}"
- }
- }
- stage("publish image"){
- if (gerritChange.status != "MERGED"){
- for (imageTag in imageTagsList) {
- artifactory.uploadImageToArtifactory(artifactoryServer,
- dockerRepository,
- "${projectNamespace}/${IMAGE_NAME}",
- imageTag,
- docker_dev_repo,
- buildInfo)
- currentBuild.description = "image: ${IMAGE_NAME}:${imageTag}<br>"
- }
- } else {
- def properties = [
- 'com.mirantis.gerritChangeId': "${GERRIT_CHANGE_ID}",
- 'com.mirantis.gerritPatchsetNumber': "${GERRIT_PATCHSET_NUMBER}",
- 'com.mirantis.gerritChangeNumber' : "${GERRIT_CHANGE_NUMBER}"
- ]
- // Search for an artifact with required properties
- def artifactURI = artifactory.uriByProperties(artifactoryServer.getUrl(),
- properties)
- // Get build info: build id and job name
- if ( artifactURI ) {
- def buildProperties = artifactory.getPropertiesForArtifact(artifactURI)
- //promote docker image
- artifactory.promoteDockerArtifact(artifactoryServer.getUrl(),
- docker_dev_repo,
- docker_prod_repo,
- "${projectNamespace}/${IMAGE_NAME}",
- buildProperties.get('com.mirantis.targetTag').join(','),
- 'latest')
- } else {
- throw new RuntimeException("Artifacts were not found, nothing to promote")
- }
- }
- }
- } catch (Throwable e) {
- currentBuild.result = 'FAILURE'
- common.errorMsg("Build failed due to error: ${e}")
- throw e
- } finally {
- common.sendNotification(currentBuild.result, "",["slack"])
- }
-}
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index a1d84c1..84f5dcf 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -37,16 +37,13 @@
* K8S_API_SERVER Kubernetes API address
* K8S_CONFORMANCE_IMAGE Path to docker image with conformance e2e tests
*
- * TEMPEST_IMAGE_LINK Tempest image link
+ * TEMPEST_IMAGE Tempest image link
+ * TARGET_TEST_NODE Node to run tests
+ * DOCKER_INSTALL Install docker on the target if tue
+ * PATTERN If not false, run tests matched to pattern only
*
* optional parameters for overwriting soft params
- * KUBERNETES_HYPERKUBE_IMAGE Docker repository and tag for hyperkube image
- * CALICO_CNI_IMAGE Docker repository and tag for calico CNI image
- * CALICO_NODE_IMAGE Docker repository and tag for calico node image
- * CALICOCTL_IMAGE Docker repository and tag for calicoctl image
- * MTU MTU for Calico
- * NETCHECKER_AGENT_IMAGE Docker repository and tag for netchecker agent image
- * NETCHECKER_SERVER_IMAGE Docker repository and tag for netchecker server image
+ * SALT_OVERRIDES YAML with overrides for Salt deployment
*
*/
@@ -155,6 +152,12 @@
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
}
+ // Set up override params
+ if (env.getEnvironment().containsKey('SALT_OVERRIDES')) {
+ stage('Set Salt overrides') {
+ salt.setSaltOverrides(saltMaster, SALT_OVERRIDES)
+ }
+ }
//
// Install
//
@@ -175,65 +178,10 @@
// install k8s
if (common.checkContains('STACK_INSTALL', 'k8s')) {
- stage('Overwrite Kubernetes parameters') {
-
- // Overwrite Kubernetes vars if specified
- if (env.getEnvironment().containsKey('KUBERNETES_HYPERKUBE_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_hyperkube_image', KUBERNETES_HYPERKUBE_IMAGE])
- }
- if (env.getEnvironment().containsKey('MTU')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_mtu', MTU])
- }
-
- // Overwrite Calico vars if specified
- if (env.getEnvironment().containsKey('CALICO_CNI_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_calico_cni_image', CALICO_CNI_IMAGE])
- }
- if (env.getEnvironment().containsKey('CALICO_NODE_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_calico_image', CALICO_NODE_IMAGE])
- }
- if (env.getEnvironment().containsKey('CALICOCTL_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_calicoctl_image', CALICOCTL_IMAGE])
- }
- if (env.getEnvironment().containsKey('CALICO_POLICY_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_calico_policy_image', CALICO_POLICY_IMAGE])
- }
-
- // Overwrite Virtlet image if specified
- if (env.getEnvironment().containsKey('VIRTLET_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_virtlet_image', VIRTLET_IMAGE])
- }
-
- // Overwrite netchecker vars if specified
- if (env.getEnvironment().containsKey('NETCHECKER_AGENT_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_netchecker_agent_image', NETCHECKER_AGENT_IMAGE])
- }
- if (env.getEnvironment().containsKey('NETCHECKER_SERVER_IMAGE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_netchecker_server_image', NETCHECKER_SERVER_IMAGE])
- }
-
- // Overwrite docker version if specified
- if (env.getEnvironment().containsKey('DOCKER_ENGINE')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_docker_package', DOCKER_ENGINE])
- }
-
- // Overwrite addons vars if specified
- if (env.getEnvironment().containsKey('HELM_ENABLED')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_helm_enabled', HELM_ENABLED])
- }
- if (env.getEnvironment().containsKey('NETCHECKER_ENABLED')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_netchecker_enabled', NETCHECKER_ENABLED])
- }
- if (env.getEnvironment().containsKey('CALICO_POLICY_ENABLED')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_calico_policy_enabled', CALICO_POLICY_ENABLED])
- }
- if (env.getEnvironment().containsKey('VIRTLET_ENABLED')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_virtlet_enabled', VIRTLET_ENABLED])
- }
- if (env.getEnvironment().containsKey('KUBE_NET_MANAGER_ENABLED')) {
- salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'reclass.cluster_meta_set', ['kubernetes_kube_network_manager_enabled', KUBE_NET_MANAGER_ENABLED])
- }
- }
+ // install infra libs for k8s
+ stage('Install Kubernetes infra') {
+ orchestrate.installKubernetesInfra(saltMaster)
+ }
// If k8s install with contrail network manager then contrail need to be install first
if (common.checkContains('STACK_INSTALL', 'contrail')) {
@@ -244,10 +192,6 @@
}
}
- stage('Install Kubernetes infra') {
- orchestrate.installKubernetesInfra(saltMaster)
- }
-
stage('Install Kubernetes control') {
orchestrate.installKubernetesControl(saltMaster)
}
@@ -346,12 +290,15 @@
}
if (common.checkContains('STACK_TEST', 'openstack')) {
+ if (common.checkContains('DOCKER_INSTALL', 'true')) {
+ test.install_docker(saltMaster, TARGET)
+ }
stage('Run OpenStack tests') {
- test.runTempestTests(saltMaster, TEMPEST_IMAGE_LINK)
+ test.runTempestTests(saltMaster, TEMPEST_IMAGE, TARGET, PATTERN)
}
stage('Copy Tempest results to config node') {
- test.copyTempestResults(saltMaster)
+ test.copyTempestResults(saltMaster, TARGET)
}
}
diff --git a/opencontrail-upgrade.groovy b/opencontrail-upgrade.groovy
new file mode 100644
index 0000000..02c2bc1
--- /dev/null
+++ b/opencontrail-upgrade.groovy
@@ -0,0 +1,481 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [http://10.10.10.1:8000].
+ * STAGE_CONTROLLERS_UPGRADE Run upgrade on Opencontrail controllers (bool)
+ * STAGE_ANALYTICS_UPGRADE Run upgrade on Opencontrail analytics (bool)
+ * STAGE_COMPUTES_UPGRADE Run upgrade on Opencontrail compute nodes (bool)
+ * COMPUTE_TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ * COMPUTE_TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
+ * STAGE_CONTROLLERS_ROLLBACK Run rollback on Opencontrail controllers (bool)
+ * STAGE_ANALYTICS_ROLLBACK Run rollback on Opencontrail analytics (bool)
+ * STAGE_COMPUTES_ROLLBACK Run rollback on Opencontrail compute nodes (bool)
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def args
+def commandKwargs
+def probe = 1
+def errorOccured = false
+def command = 'cmd.shell'
+
+def CONTROL_PKGS = 'contrail-config contrail-config-openstack contrail-control contrail-dns contrail-lib contrail-nodemgr contrail-utils contrail-web-controller contrail-web-core neutron-plugin-contrail python-contrail'
+def ANALYTIC_PKGS = 'contrail-analytics contrail-lib contrail-nodemgr contrail-utils python-contrail'
+def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+
+
+def void runCommonCommands(target, command, args, check, salt, saltMaster, common) {
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
+ salt.printSaltCommandResult(out)
+ sleep(60)
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ input message: "Please check the output of \'${check}\' and continue if it is correct."
+}
+
+timestamps {
+ node() {
+
+ stage('Connect to Salt API') {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true && !errorOccured) {
+
+ stage('Opencontrail controllers upgrade') {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ try {
+ salt.cmdRun(saltMaster, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+ } catch (Exception er) {
+ common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
+ return
+ }
+
+ try {
+ salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+ } catch (Exception er) {
+ common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
+ return
+ }
+
+ args = 'apt install contrail-database -y;'
+ check = 'nodetool status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+ args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
+ check = 'contrail-status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+ try {
+ salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
+ }
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+
+ common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+ }
+ }
+
+ if (STAGE_ANALYTICS_UPGRADE.toBoolean() == true && !errorOccured) {
+
+ stage('Ask for manual confirmation') {
+ input message: "Do you want to continue with the Opencontrail analytic nodes upgrade?"
+ }
+
+ stage('Opencontrail analytics upgrade') {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = 'apt install contrail-database -y;'
+ check = 'nodetool status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+ args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
+ check = 'contrail-status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+ try {
+ salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
+ }
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+ }
+
+ if (STAGE_COMPUTES_UPGRADE.toBoolean() == true && !errorOccured) {
+
+ try {
+
+ stage('List targeted compute servers') {
+ minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+
+ if (minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+ targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+ targetLiveAll = minions.join(' or ')
+ common.infoMsg("Found nodes: ${targetLiveAll}")
+ common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+ }
+
+ stage('Confirm upgrade on sample nodes') {
+ input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
+ }
+
+ stage("Opencontrail compute upgrade on sample nodes") {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ check = 'contrail-status'
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+ salt.printSaltCommandResult(out)
+
+ try {
+ salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+ }
+
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ sleep(10)
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+
+ stage('Confirm upgrade on all targeted nodes') {
+ input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+ }
+ stage("Opencontrail compute upgrade on all targeted nodes") {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ check = 'contrail-status'
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+ salt.printSaltCommandResult(out)
+
+ try {
+ salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+ }
+
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ sleep(10)
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+ }
+
+
+ if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+ stage('Ask for manual confirmation') {
+ input message: "Do you want to continue with the Opencontrail control nodes rollback?"
+ }
+
+ stage('Opencontrail controllers rollback') {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, 'I@opencontrail:control', 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on I@opencontrail:control probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = 'apt install contrail-database -y --force-yes;'
+ check = 'nodetool status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+ args = "apt install -o Dpkg::Options::=\"--force-confold\" ${CONTROL_PKGS} -y --force-yes;"
+ check = 'contrail-status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:control and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:control and *03*', command, args, check, salt, saltMaster, common)
+
+ try {
+ salt.enforceState(saltMaster, 'I@opencontrail:control', 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg('Opencontrail state was executed on I@opencontrail:control and failed please fix it manually.')
+ }
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:control', 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+
+ common.warningMsg('Please check \'show bgp summary\' on your bgp router if all bgp peers are in healthy state.')
+ }
+ }
+
+ if (STAGE_ANALYTICS_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+ stage('Ask for manual confirmation') {
+ input message: "Do you want to continue with the Opencontrail analytic nodes rollback?"
+ }
+
+ stage('Opencontrail analytics rollback') {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector and *01*', 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:collector', 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, 'I@opencontrail:collector', 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on I@opencontrail:collector probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = 'apt install contrail-database -y --force-yes;'
+ check = 'nodetool status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+ args = "apt install -o Dpkg::Options::=\"--force-confold\" ${ANALYTIC_PKGS} -y --force-yes;"
+ check = 'contrail-status'
+
+ // ntw01
+ runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, saltMaster, common)
+ // ntw02
+ runCommonCommands('I@opencontrail:collector and *02*', command, args, check, salt, saltMaster, common)
+ // ntw03
+ runCommonCommands('I@opencontrail:collector and *03*', command, args, check, salt, saltMaster, common)
+
+ try {
+ salt.enforceState(saltMaster, 'I@opencontrail:collector', 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg('Opencontrail state was executed on I@opencontrail:collector and failed please fix it manually.')
+ }
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': 'I@opencontrail:collector', 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+ }
+
+ if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true && !errorOccured) {
+
+ try {
+
+ stage('List targeted compute servers') {
+ minions = salt.getMinions(saltMaster, COMPUTE_TARGET_SERVERS)
+
+ if (minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+ targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+ targetLiveAll = minions.join(' or ')
+ common.infoMsg("Found nodes: ${targetLiveAll}")
+ common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+ }
+
+ stage('Confirm rollback on sample nodes') {
+ input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
+ }
+
+ stage("Opencontrail compute rollback on sample nodes") {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ } catch (Exception er) {
+ errorOccured = true
+ common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ check = 'contrail-status'
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+ salt.printSaltCommandResult(out)
+
+ try {
+ salt.enforceState(saltMaster, targetLiveSubset, 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+ }
+
+ salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ sleep(10)
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+
+ stage('Confirm rollback on all targeted nodes') {
+ input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+ }
+
+ stage("Opencontrail compute upgrade on all targeted nodes") {
+
+ oc_component_repo = salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ['grep -RE \'oc[0-9]{2,3}\' /etc/apt/sources.list* | awk \'{print $1}\' | sed \'s/ *:.*//\''], null, true)
+ oc_component_repo = oc_component_repo['return'][0].values()[0]
+
+ try {
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["rm ${oc_component_repo}"], null, true)
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+ salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+ return
+ }
+
+ args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS} -y;"
+ check = 'contrail-status'
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+ salt.printSaltCommandResult(out)
+
+ try {
+ salt.enforceState(saltMaster, targetLiveAll, 'opencontrail')
+ } catch (Exception er) {
+ common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+ }
+
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+ sleep(10)
+
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+ salt.printSaltCommandResult(out)
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+ }
+ }
+}
diff --git a/openstack-compute-install.groovy b/openstack-compute-install.groovy
index d6b08c3..8e53396 100644
--- a/openstack-compute-install.groovy
+++ b/openstack-compute-install.groovy
@@ -12,7 +12,6 @@
def salt = new com.mirantis.mk.Salt()
def saltMaster
-def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
def minions
def result
def command
@@ -27,7 +26,7 @@
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, targetAll)
+ minions = salt.getMinions(saltMaster, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -38,32 +37,50 @@
common.infoMsg("Selected nodes: ${targetLiveAll}")
}
- stage("Setup network for compute") {
- common.infoMsg("Now all network configuration will be enforced, which caused reboot of nodes: ${targetLiveAll}")
- try {
- salt.cmdRun(saltMaster, targetLiveAll, 'salt-call state.sls linux.system.user,openssh,linux.network;reboot')
- } catch(e) {
- common.infoMsg("no respond from nodes due reboot")
- }
- common.infoMsg("Now pipeline is waiting until node reconnect to salt master")
- timeout(800) {
- retry(666) {
- try {
- salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], 'test.ping')
- } catch(e) {
- common.infoMsg("Still waiting for node to come up")
- sleep(10)
- }
- }
- }
+ stage("Setup repositories") {
+ salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo', true)
}
- stage("Deploy Compute") {
- common.infoMsg("Lets run rest of the states to finish deployment")
- salt.enforceState(saltMaster, targetLiveAll, 'linux,openssh,ntp,salt', true)
- retry(2) {
- salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], 'state.apply')
- }
+ stage("Upgrade packages") {
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.upgrade', [], null, true)
+ }
+
+ stage("Setup networking") {
+ // Sync all of the modules from the salt master.
+ salt.syncAll(saltMaster, targetLiveAll)
+
+ // Apply state 'salt' to install python-psutil for network configuration without restarting salt-minion to avoid losing connection.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply', ['salt', 'exclude=[{\'id\': \'salt_minion_service\'}, {\'id\': \'salt_minion_service_restart\'}, {\'id\': \'salt_minion_sync_all\'}]'], null, true)
+
+ // Restart salt-minion to take effect.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['salt-minion'], null, true, 10)
+
+ // Configure networking excluding vhost0 interface.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.apply', ['linux.network', 'exclude=[{\'id\': \'linux_interface_vhost0\'}]'], null, true)
+
+ // Kill unnecessary processes ifup/ifdown which is stuck from previous state linux.network.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifup'], null, false)
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'ps.pkill', ['ifdown'], null, false)
+
+ // Restart networking to bring UP all interfaces.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['networking'], null, true, 300)
+ }
+
+ stage("Highstate compute") {
+ // Execute highstate without state opencontrail.client.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'state.highstate', ['exclude=opencontrail.client'], null, true)
+
+ // Apply nova state to remove libvirt default bridge virbr0.
+ salt.enforceState(saltMaster, targetLiveAll, 'nova', true)
+
+ // Execute highstate.
+ salt.enforceHighstate(saltMaster, targetLiveAll, true)
+
+ // Restart supervisor-vrouter.
+ salt.runSaltProcessStep(saltMaster, targetLiveAll, 'service.restart', ['supervisor-vrouter'], null, true, 300)
+
+ // Apply salt,collectd to update information about current network interfaces.
+ salt.enforceState(saltMaster, targetLiveAll, 'salt,collectd', true)
}
} catch (Throwable e) {
diff --git a/openstack-compute-upgrade.groovy b/openstack-compute-upgrade.groovy
index 768f055..4a04531 100644
--- a/openstack-compute-upgrade.groovy
+++ b/openstack-compute-upgrade.groovy
@@ -14,7 +14,6 @@
def salt = new com.mirantis.mk.Salt()
def saltMaster
-def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -33,7 +32,7 @@
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, targetAll)
+ minions = salt.getMinions(saltMaster, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")
@@ -71,7 +70,7 @@
if(opencontrail != null) {
stage('Remove OC component from repos on test nodes') {
- salt.cmdRun(saltMaster, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g'")
+ salt.cmdRun(saltMaster, targetTestSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.refresh_db', [], null, true)
}
}
@@ -90,7 +89,7 @@
if(opencontrail != null) {
stage('Remove OC component from repos on sample nodes') {
- salt.cmdRun(saltMaster, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g'")
+ salt.cmdRun(saltMaster, targetLiveSubset, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
salt.runSaltProcessStep(saltMaster, targetLiveSubset, 'pkg.refresh_db', [], null, true)
}
}
@@ -161,7 +160,7 @@
if(opencontrail != null) {
stage('Remove OC component from repos on all targeted nodes') {
- salt.cmdRun(saltMaster, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g'")
+ salt.cmdRun(saltMaster, targetLiveAll, "find /etc/apt/sources.list* -type f -print0 | xargs -0 sed -i -r -e 's/ oc([0-9]*) / /g;s/ oc([0-9]*\$)//g'")
salt.runSaltProcessStep(saltMaster, targetLiveAll, 'pkg.refresh_db', [], null, true)
}
}
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index c5219d8..1dfc4cb 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -449,6 +449,8 @@
salt.enforceState(saltMaster, 'prx*', 'horizon')
// salt 'prx*' state.sls nginx
salt.enforceState(saltMaster, 'prx*', 'nginx')
+ // salt "prx*" state.sls memcached
+ salt.enforceState(saltMaster, 'prx*', 'memcached')
try {
salt.enforceHighstate(saltMaster, 'ctl*')
diff --git a/ovs-gateway-upgrade.groovy b/ovs-gateway-upgrade.groovy
new file mode 100644
index 0000000..70037a4
--- /dev/null
+++ b/ovs-gateway-upgrade.groovy
@@ -0,0 +1,154 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ * SALT_MASTER_CREDENTIALS Credentials to the Salt API.
+ * SALT_MASTER_URL Full Salt API address [https://10.10.10.1:8000].
+ * TARGET_SERVERS Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ * TARGET_SUBSET_TEST Number of nodes to list package updates, empty string means all targetted nodes.
+ * TARGET_SUBSET_LIVE Number of selected nodes to live apply selected package update.
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+
+def saltMaster
+def targetTestSubset
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def args
+def command
+def commandKwargs
+def probe = 1
+
+node() {
+ try {
+
+ stage('Connect to Salt master') {
+ saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ }
+
+ stage('List target servers') {
+ minions = salt.getMinions(saltMaster, TARGET_SERVERS)
+
+ if (minions.isEmpty()) {
+ throw new Exception("No minion was targeted")
+ }
+
+ if (TARGET_SUBSET_TEST != "") {
+ targetTestSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_TEST)).join(' or ')
+ } else {
+ targetTestSubset = minions.join(' or ')
+ }
+ targetLiveSubset = minions.subList(0, Integer.valueOf(TARGET_SUBSET_LIVE)).join(' or ')
+ targetTestSubsetProbe = minions.subList(0, probe).join(' or ')
+ targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+ targetLiveAll = minions.join(' or ')
+ common.infoMsg("Found nodes: ${targetLiveAll}")
+ common.infoMsg("Selected test nodes: ${targetTestSubset}")
+ common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+ }
+
+
+ stage("Add new repos on test nodes") {
+ salt.enforceState(saltMaster, targetTestSubset, 'linux.system.repo')
+ }
+
+ stage("List package upgrades") {
+ salt.runSaltProcessStep(saltMaster, targetTestSubset, 'pkg.list_upgrades', [], null, true)
+ }
+
+ stage('Confirm upgrade on sample nodes') {
+ input message: "Please verify the list of packages that you want to be upgraded. Do you want to continue with upgrade?"
+ }
+
+ stage("Add new repos on sample nodes") {
+ salt.enforceState(saltMaster, targetLiveSubset, 'linux.system.repo')
+ }
+
+ args = "apt-get -y -s -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade"
+
+ stage('Test upgrade on sample') {
+ try {
+ salt.cmdRun(saltMaster, targetLiveSubset, args)
+ } catch (Exception er) {
+ print(er)
+ }
+ }
+
+ stage('Confirm upgrade on sample') {
+ input message: "Please verify if there are packages that it wants to downgrade. If so, execute apt-cache policy on them and verify if everything is fine. Do you want to continue with upgrade?"
+ }
+
+ command = "cmd.run"
+ args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+ stage('Apply package upgrades on sample') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+
+ args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+
+ stage('Start ovs on sample nodes') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+ stage("Run Neutron state on sample nodes") {
+ salt.enforceState(saltMaster, targetLiveSubset, ['neutron'])
+ }
+
+ stage("Run Highstate on sample nodes") {
+ try {
+ salt.enforceHighstate(saltMaster, targetLiveSubset)
+ } catch (Exception er) {
+ common.errorMsg("Highstate was executed on ${targetLiveSubset} but something failed. Please check it and fix it accordingly.")
+ }
+ }
+
+ stage('Confirm upgrade on all targeted nodes') {
+ timeout(time: 2, unit: 'HOURS') {
+ input message: "Verify that the upgraded sample nodes are working correctly. If so, do you want to approve live upgrade on ${targetLiveAll} nodes?"
+ }
+ }
+
+ stage("Add new repos on all targeted nodes") {
+ salt.enforceState(saltMaster, targetLiveAll, 'linux.system.repo')
+ }
+
+ args = 'export DEBIAN_FRONTEND=noninteractive; apt-get -y -q --allow-downgrades -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" dist-upgrade;'
+
+ stage('Apply package upgrades on all targeted nodes') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+
+ args = "sudo /usr/share/openvswitch/scripts/ovs-ctl start"
+
+ stage('Start ovs on all targeted nodes') {
+ out = salt.runSaltCommand(saltMaster, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, commandKwargs)
+ salt.printSaltCommandResult(out)
+ }
+ stage("Run Neutron state on all targeted nodes") {
+ salt.enforceState(saltMaster, targetLiveAll, ['neutron'])
+ }
+
+ stage("Run Highstate on all targeted nodes") {
+ try {
+ salt.enforceHighstate(saltMaster, targetLiveAll)
+ } catch (Exception er) {
+ common.errorMsg("Highstate was executed ${targetLiveAll} but something failed. Please check it and fix it accordingly.")
+ }
+ }
+
+ } catch (Throwable e) {
+ // If there was an error or exception thrown, the build failed
+ currentBuild.result = "FAILURE"
+ throw e
+ }
+}
+
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 900b3d8..af2016e 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -49,21 +49,21 @@
common.warningMsg('Directory already empty')
}
- _pillar = salt.getPillar(saltMaster, "ntw01*", 'cassandra:backup:backup_dir')
+ _pillar = salt.getPillar(saltMaster, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
backup_dir = _pillar['return'][0].values()[0]
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
print(backup_dir)
- salt.runSaltProcessStep(saltMaster, 'ntw01*', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
- salt.runSaltProcessStep(saltMaster, 'ntw01*', 'service.start', ['supervisor-database'], null, true)
+ salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
sleep(30)
// performs restore
- salt.cmdRun(saltMaster, 'ntw01*', "su root -c 'salt-call state.sls cassandra'")
- salt.runSaltProcessStep(saltMaster, 'ntw01*', 'system.reboot', null, null, true, 5)
- salt.runSaltProcessStep(saltMaster, 'ntw02*', 'system.reboot', null, null, true, 5)
- salt.runSaltProcessStep(saltMaster, 'ntw03*', 'system.reboot', null, null, true, 5)
+ salt.cmdRun(saltMaster, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+ salt.runSaltProcessStep(saltMaster, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+ salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
sleep(60)
salt.runSaltProcessStep(saltMaster, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
diff --git a/test-devops-portal-pipeline.groovy b/test-devops-portal-pipeline.groovy
deleted file mode 100644
index 64e18f7..0000000
--- a/test-devops-portal-pipeline.groovy
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
-* OSS - The DevOps Portal Testing Pipeline
-* CREDENTIALS_ID - gerrit credentials id
-**/
-
-gerrit = new com.mirantis.mk.Gerrit()
-common = new com.mirantis.mk.Common()
-
-def getProjectName(gerritRef, defaultGitRef) {
- def refSpec
- if (gerritRef) {
- refSpec = gerritRef
- } else {
- refSpec = defaultGitRef
- }
- def refValue = refSpec.tokenize('/').takeRight(2).join('')
- return "oss${BUILD_NUMBER}${refValue}"
-}
-
-def executeCmd(user, project, cmd) {
- common.infoMsg("Starting command: ${cmd}")
- wrap([$class: 'AnsiColorBuildWrapper']) {
- // Docker sets HOME=/ ignoring that it have to be HOME=/opt/workspace,
- // as `docker-compose exec` does not support to pass environment
- // variables, then `docker exec` is used.
- sh("docker exec --user=${user} --env=HOME=/opt/workspace ${project}_devopsportal_1 ${cmd}")
- }
- common.successMsg("Successfully completed: ${cmd}")
-}
-
-def gerritRef
-try {
- gerritRef = GERRIT_REFSPEC
-} catch (MissingPropertyException e) {
- gerritRef = null
-}
-
-def defaultGitRef, defaultGitUrl
-try {
- defaultGitRef = DEFAULT_GIT_REF
- defaultGitUrl = DEFAULT_GIT_URL
-} catch (MissingPropertyException e) {
- defaultGitRef = null
- defaultGitUrl = null
-}
-def checkouted = false
-
-node("vm") {
- def composePath = 'docker/stack/docker-compose.yml'
- def projectName
- def jenkinsUser
-
- try {
- stage('Checkout Source Code') {
- if (gerritRef) {
- // job is triggered by Gerrit
- checkouted = gerrit.gerritPatchsetCheckout ([
- credentialsId : CREDENTIALS_ID,
- withWipeOut : true,
- ])
- } else if(defaultGitRef && defaultGitUrl) {
- checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", CREDENTIALS_ID)
- }
- if(!checkouted){
- throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
- }
- }
-
- projectName = getProjectName(gerritRef, defaultGitRef)
-
- stage('Setup Up Stack') {
- sh("docker-compose --file ${composePath} --project-name=${projectName} pull")
- sh("docker-compose --file ${composePath} --project-name=${projectName} up -d --force-recreate")
- common.successMsg("Stack with the ${projectName} is started.")
- }
-
- def jenkinsUID = common.getJenkinsUid()
- def jenkinsGID = common.getJenkinsGid()
-
- jenkinsUser = "${jenkinsUID}:${jenkinsGID}"
-
- stage('Print Environment Information') {
- sh("docker-compose version")
- sh("docker version")
- executeCmd(jenkinsUser, projectName, "npm config get")
- executeCmd(jenkinsUser, projectName, "env")
- executeCmd(jenkinsUser, projectName, "ls -lan")
- }
-
- stage('Install Dependencies') {
- executeCmd(jenkinsUser, projectName, "npm install")
- }
- stage('Run Linter Tests') {
- executeCmd(jenkinsUser, projectName, "npm run lint")
- }
- stage('Run Unit Tests') {
- timeout(4) {
- executeCmd(jenkinsUser, projectName, "npm run test:unit")
- }
- }
- stage('Run Function Tests') {
- timeout(20) {
- try {
- executeCmd(jenkinsUser, projectName, "npm run test:functional")
- } catch (err) {
- archiveArtifacts(
- artifacts: "test_output/**/*.png",
- allowEmptyArchive: true,
- )
- throw err
- }
- }
- }
- } catch (err) {
- currentBuild.result = 'FAILURE'
- common.errorMsg("Build failed due to error: ${err}")
- throw err
- } finally {
- common.sendNotification(currentBuild.result, "" ,["slack"])
- stage('Cleanup') {
- wrap([$class: 'AnsiColorBuildWrapper']) {
- sh("docker-compose -f ${composePath} -p ${projectName} down")
- }
- }
- }
-}
-
diff --git a/test-salt-formulas-pipeline.groovy b/test-salt-formulas-pipeline.groovy
index 25e6f58..6d76367 100644
--- a/test-salt-formulas-pipeline.groovy
+++ b/test-salt-formulas-pipeline.groovy
@@ -79,14 +79,17 @@
common.infoMsg(".kitchen.yml found, running kitchen tests")
ruby.ensureRubyEnv()
def kitchenEnvs = []
+ def filteredEnvs = []
if(fileExists(".travis.yml")){
common.infoMsg(".travis.yml found, running custom kitchen init")
def kitchenConfigYML = readYaml(file: ".travis.yml")
- kitchenEnvs=kitchenConfigYML["env"]
+ if(kitchenConfigYML.containsKey("env")){
+ kitchenEnvs=kitchenConfigYML["env"]
+ }
def kitchenInit = kitchenConfigYML["install"]
def kitchenInstalled = false
if(kitchenInit && !kitchenInit.isEmpty()){
- for(int i=0;i<kitchenInit.size();i++){
+ for(int i=0; i<kitchenInit.size(); i++){
if(kitchenInit[i].trim().startsWith("test -e Gemfile")){ //found Gemfile config
common.infoMsg("Custom Gemfile configuration found, using them")
ruby.installKitchen(kitchenInit[i].trim())
@@ -103,12 +106,11 @@
}
common.infoMsg("Running kitchen testing, parallel mode: " + KITCHEN_TESTS_PARALLEL.toBoolean())
wrap([$class: 'AnsiColorBuildWrapper']) {
- if(kitchenEnvs && !kitchenEnvs.isEmpty()){
- common.infoMsg("Found multiple environment, first running kitchen without custom env")
- ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
- for(int i=0;i<kitchenEnvs.size();i++){
- common.infoMsg("Found multiple environment, kitchen running with env: " + kitchenEnvs[i])
- ruby.runKitchenTests(kitchenEnvs[i], KITCHEN_TESTS_PARALLEL.toBoolean())
+ filteredEnvs = ruby.filterKitchenEnvs(kitchenEnvs).unique()
+ if(kitchenEnvs && !kitchenEnvs.isEmpty() && !filteredEnvs.isEmpty()){
+ for(int i=0; i<filteredEnvs.size(); i++){
+ common.infoMsg("Found " + filteredEnvs.size() + " environment, kitchen running with env: " + filteredEnvs[i])
+ ruby.runKitchenTests(filteredEnvs[i], KITCHEN_TESTS_PARALLEL.toBoolean())
}
}else{
ruby.runKitchenTests("", KITCHEN_TESTS_PARALLEL.toBoolean())
diff --git a/update-package.groovy b/update-package.groovy
index c6d008d..ea2259c 100644
--- a/update-package.groovy
+++ b/update-package.groovy
@@ -16,7 +16,6 @@
def salt = new com.mirantis.mk.Salt()
def saltMaster
-def targetAll = ['expression': TARGET_SERVERS, 'type': 'compound']
def targetTestSubset
def targetLiveSubset
def targetLiveAll
@@ -34,7 +33,7 @@
}
stage('List target servers') {
- minions = salt.getMinions(saltMaster, targetAll)
+ minions = salt.getMinions(saltMaster, TARGET_SERVERS)
if (minions.isEmpty()) {
throw new Exception("No minion was targeted")