Merge "[CVP] Change container name for cvp-tempest job"
diff --git a/ceph-remove-node.groovy b/ceph-remove-node.groovy
index e616a28..0fba6a0 100644
--- a/ceph-remove-node.groovy
+++ b/ceph-remove-node.groovy
@@ -90,6 +90,10 @@
stage('Remove Ceph RGW') {
salt.enforceState(pepperEnv, 'I@ceph:radosgw', ['keepalived', 'haproxy'], true)
}
+
+ stage('Purge Ceph RGW pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-common,libcephfs2,python-cephfs,radosgw,python-rados,python-rbd,python-rgw')
+ }
}
if (HOST_TYPE.toLowerCase() != 'osd') {
@@ -222,7 +226,7 @@
// purge Ceph pkgs
stage('Purge Ceph OSD pkgs') {
- runCephCommand(pepperEnv, HOST, 'apt purge ceph-base ceph-common ceph-fuse ceph-mds ceph-osd python-cephfs librados2 python-rados -y')
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-fuse,ceph-mds,ceph-osd,python-cephfs,librados2,python-rados,python-rbd,python-rgw')
}
stage('Remove OSD host from crushmap') {
@@ -294,6 +298,10 @@
salt.enforceState(pepperEnv, tgt, 'ceph.common', true)
}
}
+
+ stage('Purge Ceph MON pkgs') {
+ salt.runSaltProcessStep(pepperEnv, HOST, 'pkg.purge', 'ceph-base,ceph-common,ceph-mgr,ceph-mon,libcephfs2,python-cephfs,python-rbd,python-rgw')
+ }
}
if (HOST_TYPE.toLowerCase() == 'osd' && GENERATE_CRUSHMAP.toBoolean() == true) {
diff --git a/ceph-upgrade.groovy b/ceph-upgrade.groovy
index 86a1f0f..1695e5b 100644
--- a/ceph-upgrade.groovy
+++ b/ceph-upgrade.groovy
@@ -129,10 +129,12 @@
stage("Verify services for ${minion}") {
sleep(10)
- runCephCommand(master, ADMIN_HOST, "ceph -s")
+ runCephCommand(master, "${minion}", "systemctl status ceph-${target}.target")
+ waitForHealthy(master)
}
stage('Ask for manual confirmation') {
+ runCephCommand(master, ADMIN_HOST, "ceph -s")
input message: "From the verification command above, please check Ceph ${target} joined the cluster correctly. If so, Do you want to continue to upgrade next node?"
}
}
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 0c657a5..31a412b 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -32,6 +32,10 @@
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index b33cda6..dbde547 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -38,6 +38,10 @@
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TEMPEST_TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 74c9a63..127066b 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -28,6 +28,10 @@
stage('Initialization') {
sh "rm -rf ${artifacts_dir}"
saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+ os_version=salt.getPillar(saltMaster, 'I@salt:master', '_param:openstack_version')['return'][0].values()[0]
+ if (!os_version) {
+ throw new Exception("Openstack is not found on this env. Exiting")
+ }
salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_artifacts_dir}")
salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_artifacts_dir}")
keystone_creds = validate._get_keystone_creds_v3(saltMaster)
diff --git a/gating-pipeline.groovy b/gating-pipeline.groovy
index 324b0e2..3313d48 100644
--- a/gating-pipeline.groovy
+++ b/gating-pipeline.groovy
@@ -43,20 +43,18 @@
def skipProjectsVerify = ['mk/docker-jnlp-slave']
stage("test") {
+ //notification about Start job
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s -m \"'Build Started %s'\"", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, BUILD_URL))
//check Code-Review
- if (gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
- continue
- } else {
- common.errorMsg("Change don't have a CodeReview, skipping gate")
- throw new Exception ("Change don't have a CodeReview, skipping gate")
+ if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Code-Review", "+")) {
+ throw new Exception('Change don\'t have a CodeReview+1, reject gate')
}
//check Verify
if (!gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")) {
- common.errorMsg("Change don't have true Verify, skipping gate")
- throw new Exception ("Change don't have true Verify, skipping gate")
+ throw new Exception('Change don\'t have initial Verify+1, reject gate')
} else if (gerritChange.status != "MERGED" && !env.SKIP_TEST.toBoolean()) {
//Verify-label off
- ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
+ ssh.agentSh(String.format("ssh -p %s %s@%s gerrit review %s,%s --verified 0", defGerritPort, GERRIT_NAME, GERRIT_HOST, GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER))
//Do stage (test)
doSubmit = true
def gerritProjectArray = GERRIT_PROJECT.tokenize("/")
@@ -79,7 +77,7 @@
if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates' || env.GERRIT_PROJECT == 'salt-models/reclass-system') {
callJobWithExtraVars('test-salt-model-ci-wrapper')
} else {
- if (isJobExists(testJob)) {
+ if (isJobExists(testJob)) {
common.infoMsg("Test job ${testJob} found, running")
def patchsetVerified = gerrit.patchsetHasApproval(gerritChange.currentPatchSet, "Verified", "+")
build job: testJob, parameters: [
@@ -88,14 +86,14 @@
]
giveVerify = true
} else {
- common.infoMsg("Test job ${testJob} not found")
+ common.infoMsg("Test job ${testJob} not found")
+ }
}
}
+ } else {
+ common.infoMsg('Test job skipped')
}
- } else {
- common.infoMsg("Test job skipped")
}
- }
stage("submit review") {
if (gerritChange.status == "MERGED") {
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 261193e..3783331 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -23,6 +23,50 @@
distribRevision = 'proposed'
gitGuessedVersion = false
+def GenerateModelToxDocker(Map params) {
+ def ccRoot = params['ccRoot']
+ def context = params['context']
+ def outDir = params['outDir']
+ def envOpts = params['envOpts']
+ def tempContextFile = new File(ccRoot, 'tempContext.yaml_' + UUID.randomUUID().toString()).toString()
+ writeFile file: tempContextFile, text: context
+ // Get Jenkins user UID and GID
+ def jenkinsUID = sh(script: 'id -u', returnStdout: true).trim()
+ def jenkinsGID = sh(script: 'id -g', returnStdout: true).trim()
+ /*
+ by default, process in image operates via root user
+ Otherwise, gpg key for model and all files managed by jenkins user
+ To make it compatible, install rrequirementfrom user, but generate model via jenkins
+ for build use upstream Ubuntu Bionic image
+ */
+ def configRun = ['distribRevision': 'nightly',
+ 'envOpts' : envOpts + ["CONFIG_FILE=$tempContextFile",
+ "OUTPUT_DIR=${outDir}"
+ ],
+ 'image': 'docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave',
+ 'runCommands' : [
+ '001_prepare_generate_auto_reqs': {
+ sh('''
+ pip install tox
+ ''')
+ },
+ // user & group can be different on host and in docker
+ '002_set_jenkins_id': {
+ sh("""
+ usermod -u ${jenkinsUID} jenkins
+ groupmod -g ${jenkinsUID} jenkins
+ """)
+ },
+ '003_run_generate_auto': {
+ print('[Cookiecutter build] Result:\n' +
+ sh(returnStdout: true, script: 'cd ' + ccRoot + '; su jenkins -c "tox -ve generate_auto" '))
+ }
+ ]
+ ]
+
+ saltModelTesting.setupDockerAndTest(configRun)
+}
+
def globalVariatorsUpdate() {
def templateContext = readYaml text: env.COOKIECUTTER_TEMPLATE_CONTEXT
def context = templateContext['default_context']
@@ -141,7 +185,8 @@
stage('Generate model') {
// GNUPGHOME environment variable is required for all gpg commands
// and for python.generateModel execution
- withEnv(["GNUPGHOME=${env.WORKSPACE}/gpghome"]) {
+ def envOpts = ["GNUPGHOME=${env.WORKSPACE}/gpghome"]
+ withEnv(envOpts) {
if (context['secrets_encryption_enabled'] == 'True') {
sh "mkdir gpghome; chmod 700 gpghome"
def secretKeyID = RequesterEmail ?: "salt@${context['cluster_domain']}".toString()
@@ -183,7 +228,10 @@
// still expect only lower lvl of project, aka model/classes/cluster/XXX/. So,lets dump result into
// temp dir, and then copy it over initial structure.
reclassTempRootDir = sh(script: "mktemp -d -p ${env.WORKSPACE}", returnStdout: true).trim()
- python.generateModel(common2.dumpYAML(['default_context': context]), 'default_context', context['salt_master_hostname'], cutterEnv, reclassTempRootDir, templateEnv, false)
+ GenerateModelToxDocker(['context': common2.dumpYAML(['default_context': context]),
+ 'ccRoot' : templateEnv,
+ 'outDir' : reclassTempRootDir,
+ 'envOpts': envOpts])
dir(modelEnv) {
common.warningMsg('Forming reclass-root structure...')
sh("cp -ra ${reclassTempRootDir}/reclass/* .")