Merge "Add getIPAddressesForNodename funtction"
diff --git a/src/com/mirantis/mcp/MCPArtifactory.groovy b/src/com/mirantis/mcp/MCPArtifactory.groovy
index de4f81f..b10c1be 100644
--- a/src/com/mirantis/mcp/MCPArtifactory.groovy
+++ b/src/com/mirantis/mcp/MCPArtifactory.groovy
@@ -309,3 +309,68 @@
}
sh "rm -v ${queryFile}"
}
+
+/**
+ * Save job artifacts to Artifactory server if available.
+ * Returns link to Artifactory repo, where saved job artifacts.
+ *
+ * @param config LinkedHashMap which contains next parameters:
+ * @param artifactory String, Artifactory server id
+ * @param artifactoryRepo String, repo to save job artifacts
+ * @param buildProps ArrayList, additional props for saved artifacts. Optional, default: []
+ * @param artifactory_not_found_fail Boolean, whether to fail if provided artifactory
+ * id is not found or just print warning message. Optional, default: false
+ */
+def uploadJobArtifactsToArtifactory(LinkedHashMap config) {
+ def common = new com.mirantis.mk.Common()
+ def artifactsDescription = ''
+ def artifactoryServer
+ try {
+ artifactoryServer = Artifactory.server(config.get('artifactory'))
+ } catch (Exception e) {
+ if (config.get('artifactory_not_found_fail', false)) {
+ throw e
+ } else {
+ common.warningMsg(e)
+ return "Artifactory server is not found. Can't save artifacts in Artifactory."
+ }
+ }
+ def artifactDir = 'cur_build_artifacts'
+ def user = ''
+ wrap([$class: 'BuildUser']) {
+ user = env.BUILD_USER_ID
+ }
+ dir(artifactDir) {
+ try {
+ unarchive(mapping: ['**/*' : '.'])
+ // Mandatory and additional properties
+ def properties = getBinaryBuildProperties(config.get('buildProps', []) << "buildUser=${user}")
+
+ // Build Artifactory spec object
+ def uploadSpec = """{
+ "files":
+ [
+ {
+ "pattern": "*",
+ "target": "${config.get('artifactoryRepo')}/",
+ "flat": false,
+ "props": "${properties}"
+ }
+ ]
+ }"""
+
+ artifactoryServer.upload(uploadSpec, newBuildInfo())
+ def linkUrl = "${artifactoryServer.getUrl()}/artifactory/${config.get('artifactoryRepo')}"
+ artifactsDescription = "Job artifacts uploaded to Artifactory: <a href=\"${linkUrl}\">${linkUrl}</a>"
+ } catch (Exception e) {
+ if (e =~ /no artifacts/) {
+ artifactsDescription = 'Build has no artifacts saved.'
+ } else {
+ throw e
+ }
+ } finally {
+ deleteDir()
+ }
+ }
+ return artifactsDescription
+}
diff --git a/src/com/mirantis/mcp/Validate.groovy b/src/com/mirantis/mcp/Validate.groovy
index de71cff..79efa08 100644
--- a/src/com/mirantis/mcp/Validate.groovy
+++ b/src/com/mirantis/mcp/Validate.groovy
@@ -417,29 +417,103 @@
* @param scenarios Directory inside repo with specific scenarios
* @param sl_scenarios Directory inside repo with specific scenarios for stacklight
* @param tasks_args_file Argument file that is used for throttling settings
+ * @param db_connection_str Rally-compliant external DB connection string
+ * @param tags Additional tags used for tagging tasks or building trends
+ * @param trends Build rally trends if enabled
* @param ext_variables The list of external variables
* @param results The reports directory
*/
-def runRallyTests(master, target, dockerImageLink, platform, output_dir, config_repo, config_branch, plugins_repo, plugins_branch, scenarios, sl_scenarios = '', tasks_args_file = '', ext_variables = [], results = '/root/qa_results', skip_list = '') {
+def runRallyTests(
+ master, target, dockerImageLink,
+ platform, output_dir, config_repo,
+ config_branch, plugins_repo, plugins_branch,
+ scenarios, sl_scenarios = '', tasks_args_file = '',
+ db_connection_str = '', tags = [],
+ trends = false, ext_variables = [],
+ results = '/root/qa_results', skip_list = ''
+ ) {
+
def salt = new com.mirantis.mk.Salt()
def output_file = 'docker-rally.log'
def dest_folder = '/home/rally/qa_results'
def env_vars = []
+ def work_dir = 'test_config'
+
+ // compile rally deployment name from env name, platform name,
+ // date, cmp nodes count
+ def deployment_name = ''
+ def cluster_name = salt.getPillar(
+ master, 'I@salt:master', '_param:cluster_name'
+ )['return'][0].values()[0]
+ def rcs_str_node = salt.getPillar(
+ master, 'I@salt:master', 'reclass:storage:node'
+ )['return'][0].values()[0]
+ def date = new Date()
+ date = date.format("yyyyMMddHHmm")
+ def cmp_count
+
+ if (platform['type'] == 'openstack') {
+ cmp_count = rcs_str_node.openstack_compute_rack01['repeat']['count']
+ } else if (platform['type'] == 'k8s') {
+ cmp_count = rcs_str_node.kubernetes_compute_rack01['repeat']['count']
+ } else {
+ throw new Exception("Platform ${platform} is not supported yet")
+ }
+ deployment_name = "env=${cluster_name}:platform=${platform.type}:" +
+ "date=${date}:cmp=${cmp_count}"
+
+ // set up rally cmds
def rally_extra_args = ''
def cmd_rally_plugins =
- "git clone -b ${plugins_branch ?: 'master'} ${plugins_repo} /tmp/plugins; " +
- "sudo pip install --upgrade /tmp/plugins; "
- def cmd_rally_init = ''
- def cmd_rally_checkout = "git clone -b ${config_branch ?: 'master'} ${config_repo} test_config; "
+ "git clone -b ${plugins_branch ?: 'master'} ${plugins_repo} /tmp/plugins; " +
+ "sudo pip install --upgrade /tmp/plugins; "
+ def cmd_rally_init = 'rally db ensure; '
+ if (db_connection_str) {
+ cmd_rally_init = "sudo sed -i -e " +
+ "'s#connection=.*#connection=${db_connection_str}#' " +
+ "/etc/rally/rally.conf; "
+ }
+ def cmd_rally_checkout = "git clone -b ${config_branch ?: 'master'} ${config_repo} ${work_dir}; "
def cmd_rally_start = ''
def cmd_rally_task_args = ''
def cmd_rally_stacklight = ''
- def cmd_rally_report = ''
+ def cmd_rally_report = "rally task export " +
+ "--uuid \\\$(rally task list --uuids-only --status finished) " +
+ "--type junit-xml --to ${dest_folder}/report-rally.xml; " +
+ "rally task report --uuid \\\$(rally task list --uuids-only --status finished) " +
+ "--out ${dest_folder}/report-rally.html"
+ def cmd_filter_tags = ''
+
+ // build rally trends if required
+ if (trends && db_connection_str) {
+ if (tags) {
+ cmd_filter_tags = "--tag " + tags.join(' ')
+ }
+ cmd_rally_report += "; rally task trends --tasks " +
+ "\\\$(rally task list " + cmd_filter_tags +
+ " --all-deployments --uuids-only --status finished) " +
+ "--out ${dest_folder}/trends-rally.html"
+ }
+
+ // add default env tags for inserting into rally tasks
+ tags = tags + [
+ "env=${cluster_name}",
+ "platform=${platform.type}",
+ "cmp=${cmp_count}"
+ ]
+
+ // create results directory
salt.runSaltProcessStep(master, target, 'file.remove', ["${results}"])
salt.runSaltProcessStep(master, target, 'file.mkdir', ["${results}", "mode=777"])
+
+ // get required OS data
if (platform['type'] == 'openstack') {
- def _pillar = salt.getPillar(master, 'I@keystone:server', 'keystone')
- def keystone = _pillar['return'][0].values()[0]
+ cmd_rally_init += "rally deployment create --name='${deployment_name}' --fromenv; " +
+ "rally deployment check; "
+
+ def keystone = salt.getPillar(
+ master, 'I@keystone:server', 'keystone'
+ )['return'][0].values()[0]
env_vars = ( ['tempest_version=15.0.0',
"OS_USERNAME=${keystone.server.admin_name}",
"OS_PASSWORD=${keystone.server.admin_password}",
@@ -448,88 +522,121 @@
"/v${keystone.client.os_client_config.cfgs.root.content.clouds.admin_identity.identity_api_version}",
"OS_REGION_NAME=${keystone.server.region}",
'OS_ENDPOINT_TYPE=admin'] + ext_variables ).join(' -e ')
- cmd_rally_init = 'rally db create; ' +
- 'rally deployment create --fromenv --name=existing; ' +
- 'rally deployment config; '
+
+ // get required SL data
if (platform['stacklight_enabled'] == true) {
- def _pillar_grafana = salt.getPillar(master, 'I@grafana:client', 'grafana:client:server')
- def grafana = _pillar_grafana['return'][0].values()[0]
- cmd_rally_stacklight = bundle_up_scenarios(sl_scenarios, skip_list, "scenarios_${platform.type}_stacklight.yaml")
+ def grafana = salt.getPillar(
+ master, 'I@grafana:client', 'grafana:client:server'
+ )['return'][0].values()[0]
+ cmd_rally_stacklight = bundle_up_scenarios(
+ work_dir + '/' + sl_scenarios,
+ skip_list,
+ "scenarios_${platform.type}_stacklight.yaml"
+ )
+ tags.add('stacklight')
cmd_rally_stacklight += "sed -i 's/grafana_password: .*/grafana_password: ${grafana.password}/' " +
- "test_config/job-params-stacklight.yaml; " +
- "rally $rally_extra_args task start scenarios_${platform.type}_stacklight.yaml " +
- "--task-args-file test_config/job-params-stacklight.yaml; "
+ "${work_dir}/job-params-stacklight.yaml; " +
+ "rally $rally_extra_args task start --tag " + tags.join(' ') +
+ " --task scenarios_${platform.type}_stacklight.yaml " +
+ "--task-args-file ${work_dir}/job-params-stacklight.yaml; "
}
+
+ // get required K8S data
} else if (platform['type'] == 'k8s') {
+ cmd_rally_init += "rally env create --name='${deployment_name}' --from-sysenv; " +
+ "rally env check; "
rally_extra_args = "--debug --log-file ${dest_folder}/task.log"
- def _pillar = salt.getPillar(master, 'I@kubernetes:master and *01*', 'kubernetes:master')
- def kubernetes = _pillar['return'][0].values()[0]
+
+ def kubernetes = salt.getPillar(
+ master, 'I@kubernetes:master and *01*', 'kubernetes:master'
+ )['return'][0].values()[0]
env_vars = [
"KUBERNETES_HOST=http://${kubernetes.apiserver.vip_address}" +
":${kubernetes.apiserver.insecure_port}",
"KUBERNETES_CERT_AUTH=${dest_folder}/k8s-ca.crt",
"KUBERNETES_CLIENT_KEY=${dest_folder}/k8s-client.key",
"KUBERNETES_CLIENT_CERT=${dest_folder}/k8s-client.crt"].join(' -e ')
- def k8s_ca = salt.getReturnValues(salt.runSaltProcessStep(master,
- 'I@kubernetes:master and *01*', 'cmd.run',
- ["cat /etc/kubernetes/ssl/ca-kubernetes.crt"]))
- def k8s_client_key = salt.getReturnValues(salt.runSaltProcessStep(master,
- 'I@kubernetes:master and *01*', 'cmd.run',
- ["cat /etc/kubernetes/ssl/kubelet-client.key"]))
- def k8s_client_crt = salt.getReturnValues(salt.runSaltProcessStep(master,
- 'I@kubernetes:master and *01*', 'cmd.run',
- ["cat /etc/kubernetes/ssl/kubelet-client.crt"]))
+
+ def k8s_ca = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/ca-kubernetes.crt'
+ )
+ def k8s_client_key = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.key'
+ )
+ def k8s_client_crt = salt.getFileContent(
+ master, 'I@kubernetes:master and *01*', '/etc/kubernetes/ssl/kubelet-client.crt'
+ )
def tmp_dir = '/tmp/kube'
+
salt.runSaltProcessStep(master, target, 'file.mkdir', ["${tmp_dir}"])
- salt.runSaltProcessStep(master, target, 'file.write', ["${tmp_dir}/k8s-ca.crt", "${k8s_ca}"])
- salt.runSaltProcessStep(master, target, 'file.write', ["${tmp_dir}/k8s-client.key", "${k8s_client_key}"])
- salt.runSaltProcessStep(master, target, 'file.write', ["${tmp_dir}/k8s-client.crt", "${k8s_client_crt}"])
+ salt.runSaltProcessStep(
+ master, target, 'file.write', ["${tmp_dir}/k8s-ca.crt","${k8s_ca}"]
+ )
+ salt.runSaltProcessStep(
+ master, target, 'file.write', ["${tmp_dir}/k8s-client.key", "${k8s_client_key}"]
+ )
+ salt.runSaltProcessStep(
+ master, target, 'file.write', ["${tmp_dir}/k8s-client.crt", "${k8s_client_crt}"]
+ )
salt.cmdRun(master, target, "mv ${tmp_dir}/* ${results}/")
salt.runSaltProcessStep(master, target, 'file.rmdir', ["${tmp_dir}"])
- cmd_rally_init = "rally db recreate; " +
- "rally env create --name k8s --from-sysenv; " +
- "rally env check k8s; "
+
} else {
throw new Exception("Platform ${platform} is not supported yet")
}
+
+ // set up rally task args file
switch(tasks_args_file) {
case 'none':
cmd_rally_task_args = '; '
break
case '':
- cmd_rally_task_args = '--task-args-file test_config/job-params-light.yaml; '
+ cmd_rally_task_args = "--task-args-file ${work_dir}/job-params-light.yaml; "
break
default:
- cmd_rally_task_args = "--task-args-file ${tasks_args_file}; "
+ cmd_rally_task_args = "--task-args-file ${work_dir}/${tasks_args_file}; "
break
}
if (platform['type'] == 'k8s') {
- cmd_rally_start = "for task in \\\$(" + bundle_up_scenarios(scenarios, skip_list) +
+ cmd_rally_start = "for task in \\\$(" +
+ bundle_up_scenarios(work_dir + '/' + scenarios, skip_list) +
"); do " +
- "rally $rally_extra_args task start \\\$task ${cmd_rally_task_args}" +
+ "rally $rally_extra_args task start --tag " + tags.join(' ') +
+ " --task \\\$task ${cmd_rally_task_args}" +
"done; "
} else {
- cmd_rally_checkout += bundle_up_scenarios(scenarios, skip_list, "scenarios_${platform.type}.yaml")
- cmd_rally_start = "rally $rally_extra_args task start " +
- "scenarios_${platform.type}.yaml ${cmd_rally_task_args}"
+ cmd_rally_checkout += bundle_up_scenarios(
+ work_dir + '/' + scenarios,
+ skip_list,
+ "scenarios_${platform.type}.yaml"
+ )
+ cmd_rally_start = "rally $rally_extra_args task start --tag " + tags.join(' ') +
+ " --task scenarios_${platform.type}.yaml ${cmd_rally_task_args}"
}
- cmd_rally_report= "rally task export --uuid \\\$(rally task list --uuids-only --status finished) " +
- "--type junit-xml --to ${dest_folder}/report-rally.xml; " +
- "rally task report --uuid \\\$(rally task list --uuids-only --status finished) " +
- "--out ${dest_folder}/report-rally.html"
+ // compile full rally cmd
full_cmd = 'set -xe; ' + cmd_rally_plugins +
cmd_rally_init + cmd_rally_checkout +
'set +e; ' + cmd_rally_start +
- cmd_rally_stacklight +
- cmd_rally_report
+ cmd_rally_stacklight + cmd_rally_report
+
+ // run rally
salt.runSaltProcessStep(master, target, 'file.touch', ["${results}/rally.db"])
salt.cmdRun(master, target, "chmod 666 ${results}/rally.db")
- salt.cmdRun(master, target, "docker run -w /home/rally -i --rm --net=host -e ${env_vars} " +
+ salt.cmdRun(
+ master, target,
+ "docker run -w /home/rally -i --rm --net=host -e ${env_vars} " +
"-v ${results}:${dest_folder} " +
"-v ${results}/rally.db:/home/rally/data/rally.db " +
"--entrypoint /bin/bash ${dockerImageLink} " +
"-c \"${full_cmd}\" > ${results}/${output_file}")
+
+ // remove k8s secrets
+ if (platform['type'] == 'k8s') {
+ salt.cmdRun(master, target, "rm ${results}/k8s-*")
+ }
+
+ // save artifacts
addFiles(master, target, results, output_dir)
}
diff --git a/src/com/mirantis/mk/Galera.groovy b/src/com/mirantis/mk/Galera.groovy
index f3c6b7e..5a15823 100644
--- a/src/com/mirantis/mk/Galera.groovy
+++ b/src/com/mirantis/mk/Galera.groovy
@@ -17,7 +17,9 @@
*/
def getWsrepParameters(env, target, parameters=[], print=false) {
- result = []
+ def salt = new com.mirantis.mk.Salt()
+ def common = new com.mirantis.mk.Common()
+ result = [:]
out = salt.runSaltProcessStep(env, "${target}", "mysql.status", [], null, false)
outlist = out['return'][0]
resultYaml = outlist.get(outlist.keySet()[0]).sort()
@@ -25,23 +27,18 @@
common.prettyPrint(resultYaml)
}
if (parameters instanceof String) {
- value = resultYaml[key]
- if (value instanceof String && value.isBigDecimal()) {
- value = value.toBigDecimal()
- }
- result = [key: value]
+ parameters = [parameters]
+ }
+ if (parameters == [] || parameters == ['']) {
+ result = resultYaml
} else {
- if (parameters == []) {
- result = resultYaml
- } else {
- for (key in parameters) {
- value = resultYaml[key]
- if (value instanceof String && value.isBigDecimal()) {
- value = value.toBigDecimal()
- }
- result << [key: value]
- }
+ for (String param in parameters) {
+ value = resultYaml[param]
+ if (value instanceof String && value.isBigDecimal()) {
+ value = value.toBigDecimal()
}
+ result[param] = value
+ }
}
return result
}
@@ -222,13 +219,27 @@
}
}
-def getGaleraLastShutdownNode(env) {
+/** Returns last shutdown node of Galera cluster
+@param env Salt Connection object or pepperEnv
+@param nodes List of nodes to check only (defaults to []). If not provided, it will check all nodes.
+ Use this parameter if the cluster splits to several components and you only want to check one fo them.
+@return status ip address or hostname of last shutdown node
+*/
+
+def getGaleraLastShutdownNode(env, nodes = []) {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- members = ''
+ members = []
lastNode = [ip: '', seqno: -2]
try {
- members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
+ if (nodes) {
+ nodes = salt.getIPAddressesForNodenames(env, nodes)
+ for (node in nodes) {
+ members = [host: "${node.get(node.keySet()[0])}"] + members
+ }
+ } else {
+ members = salt.getReturnValues(salt.getPillar(env, "I@galera:master", "galera:master:members"))
+ }
} catch (Exception er) {
common.errorMsg('Could not retrieve members list')
return 'I@galera:master'
@@ -314,8 +325,7 @@
if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/mysql/xtrabackup' }
salt.runSaltProcessStep(env, lastNodeTarget, 'file.remove', ["${backup_dir}/dbrestored"])
salt.cmdRun(env, 'I@xtrabackup:client', "su root -c 'salt-call state.sls xtrabackup'")
- salt.runSaltProcessStep(env, lastNodeTarget, 'service.start', ['mysql'])
-
+ salt.enforceState(env, lastNodeTarget, 'galera')
// wait until mysql service on galera master is up
try {
salt.commandStatus(env, lastNodeTarget, 'service mysql status', 'running')
diff --git a/src/com/mirantis/mk/Git.groovy b/src/com/mirantis/mk/Git.groovy
index d20c159..1564fec 100644
--- a/src/com/mirantis/mk/Git.groovy
+++ b/src/com/mirantis/mk/Git.groovy
@@ -99,12 +99,17 @@
*
* @param path Path to the git repository
* @param message A commit message
+ * @param global Use global config
*/
-def commitGitChanges(path, message, gitEmail='jenkins@localhost', gitName='jenkins-slave') {
+def commitGitChanges(path, message, gitEmail='jenkins@localhost', gitName='jenkins-slave', global=false) {
def git_cmd
+ def global_arg = ''
+ if (global) {
+ global_arg = '--global'
+ }
dir(path) {
- sh "git config --global user.email '${gitEmail}'"
- sh "git config --global user.name '${gitName}'"
+ sh "git config ${global_arg} user.email '${gitEmail}'"
+ sh "git config ${global_arg} user.name '${gitName}'"
sh(
script: 'git add -A',
diff --git a/src/com/mirantis/mk/Openstack.groovy b/src/com/mirantis/mk/Openstack.groovy
index da2495d..29edec9 100644
--- a/src/com/mirantis/mk/Openstack.groovy
+++ b/src/com/mirantis/mk/Openstack.groovy
@@ -55,6 +55,7 @@
'cmd2>=0.9.1;python_version=="3.4"',
'cmd2>=0.9.1;python_version=="3.5"',
'python-openstackclient',
+ 'python-octaviaclient',
'python-heatclient',
'docutils'
]
diff --git a/src/com/mirantis/mk/Orchestrate.groovy b/src/com/mirantis/mk/Orchestrate.groovy
index 0e2c239..7ef7964 100644
--- a/src/com/mirantis/mk/Orchestrate.groovy
+++ b/src/com/mirantis/mk/Orchestrate.groovy
@@ -650,12 +650,12 @@
// Run k8s on first node without master.setup and master.kube-addons
salt.enforceStateWithExclude([saltId: master, target: "${first_target} ${extra_tgt}", state: "kubernetes.master", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons"])
// Run k8s without master.setup and master.kube-addons
- salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons"])
+ salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.master.kube-addons,kubernetes.client"])
} else {
// Run k8s on first node without master.setup and master.kube-addons
salt.enforceStateWithExclude([saltId: master, target: "${first_target} ${extra_tgt}", state: "kubernetes.master", excludedStates: "kubernetes.master.setup"])
// Run k8s without master.setup
- salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup"])
+ salt.enforceStateWithExclude([saltId: master, target: "I@kubernetes:master ${extra_tgt}", state: "kubernetes", excludedStates: "kubernetes.master.setup,kubernetes.client"])
}
// Run k8s master setup
@@ -689,6 +689,13 @@
salt.runSaltProcessStep(master, "I@kubernetes:pool and not I@kubernetes:master ${extra_tgt}", 'service.restart', ['kubelet'])
}
+def installKubernetesClient(master, extra_tgt = '') {
+ def salt = new com.mirantis.mk.Salt()
+
+ // Install kubernetes client
+ salt.enforceStateWithTest([saltId: master, target: "I@kubernetes:client ${extra_tgt}", state: 'kubernetes.client'])
+}
+
def installDockerSwarm(master, extra_tgt = '') {
def salt = new com.mirantis.mk.Salt()
@@ -722,8 +729,8 @@
def installCicd(master, extra_tgt = '') {
def salt = new com.mirantis.mk.Salt()
def common = new com.mirantis.mk.Common()
- def gerrit_compound = "I@gerrit:client and ci* ${extra_tgt}"
- def jenkins_compound = "I@jenkins:client and ci* ${extra_tgt}"
+ def gerrit_compound = "I@gerrit:client ${extra_tgt}"
+ def jenkins_compound = "I@jenkins:client ${extra_tgt}"
salt.fullRefresh(master, gerrit_compound)
salt.fullRefresh(master, jenkins_compound)
@@ -731,13 +738,13 @@
// Temporary exclude cfg node from docker.client state (PROD-24934)
def dockerClientExclude = !salt.getPillar(master, 'I@salt:master', 'docker:client:stack:jenkins').isEmpty() ? 'and not I@salt:master' : ''
// Pull images first if any
- def listCIMinions = salt.getMinions(master, "ci* ${dockerClientExclude} ${extra_tgt}")
+ def listCIMinions = salt.getMinions(master, "* ${dockerClientExclude} ${extra_tgt}")
for (int i = 0; i < listCIMinions.size(); i++) {
if (!salt.getReturnValues(salt.getPillar(master, listCIMinions[i], 'docker:client:images')).isEmpty()) {
- salt.enforceState([saltId: master, target: listCIMinions[i], state: 'docker.client.images', retries: 2])
+ salt.enforceStateWithTest([saltId: master, target: listCIMinions[i], state: 'docker.client.images', retries: 2])
}
}
- salt.enforceState([saltId: master, target: "I@docker:swarm:role:master and I@jenkins:client ${dockerClientExclude} ${extra_tgt}", state: 'docker.client', retries: 2])
+ salt.enforceStateWithTest([saltId: master, target: "I@docker:swarm:role:master and I@jenkins:client ${dockerClientExclude} ${extra_tgt}", state: 'docker.client', retries: 2])
// API timeout in minutes
def wait_timeout = 10
@@ -760,6 +767,7 @@
def gerrit_host
def gerrit_http_port
def gerrit_http_scheme
+ def gerrit_http_prefix
def host_pillar = salt.getPillar(master, gerrit_compound, 'gerrit:client:server:host')
gerrit_host = salt.getReturnValues(host_pillar)
@@ -770,7 +778,10 @@
def scheme_pillar = salt.getPillar(master, gerrit_compound, 'gerrit:client:server:protocol')
gerrit_http_scheme = salt.getReturnValues(scheme_pillar)
- gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port
+ def prefix_pillar = salt.getPillar(master, gerrit_compound, 'gerrit:client:server:url_prefix')
+ gerrit_http_prefix = salt.getReturnValues(prefix_pillar)
+
+ gerrit_master_url = gerrit_http_scheme + '://' + gerrit_host + ':' + gerrit_http_port + gerrit_http_prefix
}
@@ -783,7 +794,9 @@
// Jenkins
def jenkins_master_host_pillar = salt.getPillar(master, jenkins_compound, '_param:jenkins_master_host')
def jenkins_master_port_pillar = salt.getPillar(master, jenkins_compound, '_param:jenkins_master_port')
- jenkins_master_url = "http://${salt.getReturnValues(jenkins_master_host_pillar)}:${salt.getReturnValues(jenkins_master_port_pillar)}"
+ def jenkins_master_url_prefix_pillar = salt.getPillar(master, jenkins_compound, '_param:jenkins_master_url_prefix')
+
+ jenkins_master_url = "http://${salt.getReturnValues(jenkins_master_host_pillar)}:${salt.getReturnValues(jenkins_master_port_pillar)}${salt.getReturnValues(jenkins_master_url_prefix_pillar)}"
timeout(wait_timeout) {
common.infoMsg('Waiting for Jenkins to come up..')
@@ -798,7 +811,7 @@
withEnv(['ASK_ON_ERROR=false']){
retry(2){
try{
- salt.enforceState([saltId: master, target: "I@gerrit:client ${extra_tgt}", state: 'gerrit'])
+ salt.enforceStateWithTest([saltId: master, target: "I@gerrit:client ${extra_tgt}", state: 'gerrit'])
}catch(e){
salt.fullRefresh(master, "I@gerrit:client ${extra_tgt}")
throw e //rethrow for retry handler
@@ -806,7 +819,7 @@
}
retry(2){
try{
- salt.enforceState([saltId: master, target: "I@jenkins:client ${extra_tgt}", state: 'jenkins'])
+ salt.enforceStateWithTest([saltId: master, target: "I@jenkins:client ${extra_tgt}", state: 'jenkins'])
}catch(e){
salt.fullRefresh(master, "I@jenkins:client ${extra_tgt}")
throw e //rethrow for retry handler
diff --git a/src/com/mirantis/mk/Python.groovy b/src/com/mirantis/mk/Python.groovy
index 6a054bb..c326bf6 100644
--- a/src/com/mirantis/mk/Python.groovy
+++ b/src/com/mirantis/mk/Python.groovy
@@ -290,32 +290,16 @@
def templateOutputDir = templateBaseDir
dir(templateEnvDir) {
if (fileExists(new File(templateEnvDir, 'tox.ini').toString())) {
- tempContextFile = new File(templateEnvDir, 'tempContext.yaml').toString()
+ def tempContextFile = new File(templateEnvDir, 'tempContext.yaml').toString()
writeFile file: tempContextFile, text: context
common.warningMsg('Generating models using context:\n')
print(context)
withEnv(["CONFIG_FILE=$tempContextFile",
- "OUTPUT_DIR=${generatedModel}/classes/cluster/",
+ "OUTPUT_DIR=${modelEnv}",
]) {
print('[Cookiecutter build] Result:\n' +
sh(returnStdout: true, script: 'tox -ve generate_auto'))
}
- // dropme after impelementation new format
- sh "mkdir -p ${generatedModel}/nodes/"
- def nodeFile = "${generatedModel}/nodes/${saltMasterName}.${clusterDomain}.yml"
- def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
- _param:
- linux_system_codename: xenial
- reclass_data_revision: master
- linux:
- system:
- name: ${saltMasterName}
- domain: ${clusterDomain}
- """
- writeFile(file: nodeFile, text: nodeString)
- //
} else {
common.warningMsg("Old format: Generating model from context ${contextName}")
def productList = ["infra", "cicd", "kdt", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
diff --git a/src/com/mirantis/mk/Ruby.groovy b/src/com/mirantis/mk/Ruby.groovy
index dc08ce5..32b595e 100644
--- a/src/com/mirantis/mk/Ruby.groovy
+++ b/src/com/mirantis/mk/Ruby.groovy
@@ -9,9 +9,17 @@
* @param rubyVersion target ruby version (optional, default 2.2.3)
*/
def ensureRubyEnv(rubyVersion="2.4.1"){
- if(!fileExists("/var/lib/jenkins/.rbenv/versions/${rubyVersion}/bin/ruby")){
+ def ruby_build_root = "~/.rbenv/plugins/ruby-build"
+ if (!fileExists("/var/lib/jenkins/.rbenv/versions/${rubyVersion}/bin/ruby")){
//XXX: patch ruby-build because debian package is quite old
- sh "git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build"
+ // Check if git repo exists
+ if (!fileExists("${ruby_build_root}/.git")) {
+ sh "git clone https://github.com/rbenv/ruby-build.git ${ruby_build_root}"
+ } else {
+ // Ensure the repo is up-to-date
+ sh "cd ${ruby_build_root}"
+ sh "git pull"
+ }
sh "rbenv install ${rubyVersion}";
}
sh "rbenv local ${rubyVersion};rbenv exec gem update --system"
diff --git a/src/com/mirantis/mk/Salt.groovy b/src/com/mirantis/mk/Salt.groovy
index 71ed24d..7688c0e 100644
--- a/src/com/mirantis/mk/Salt.groovy
+++ b/src/com/mirantis/mk/Salt.groovy
@@ -485,6 +485,39 @@
}
/**
+ * You can call this function when need to check that all minions are available, free and ready for command execution
+ * @param config LinkedHashMap config parameter, which contains next:
+ * @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
+ * @param target unique identification of a minion or group of salt minions
+ * @param target_reachable unique identification of a minion or group of salt minions to check availability
+ * @param wait timeout between retries to check target minions (default 5)
+ * @param retries finite number of iterations to check minions (default 10)
+ * @param timeout timeout for the salt command if minions do not return (default 5)
+ * @param availability check that minions also are available before checking readiness (default true)
+ */
+def checkTargetMinionsReady(LinkedHashMap config) {
+ def common = new com.mirantis.mk.Common()
+ def saltId = config.get('saltId')
+ def target = config.get('target')
+ def target_reachable = config.get('target_reachable', target)
+ def wait = config.get('wait', 30)
+ def retries = config.get('retries', 10)
+ def timeout = config.get('timeout', 5)
+ def checkAvailability = config.get('availability', true)
+ common.retry(retries, wait) {
+ if (checkAvailability) {
+ minionsReachable(saltId, 'I@salt:master', target_reachable)
+ }
+ def running = salt.runSaltProcessStep(saltId, target, 'saltutil.running', [], null, true, timeout)
+ for (value in running.get("return")[0].values()) {
+ if (value != []) {
+ throw new Exception("Not all salt-minions are ready for execution")
+ }
+ }
+ }
+}
+
+/**
* Run command on salt minion (salt cmd.run wrapper)
* @param saltId Salt Connection object or pepperEnv (the command will be sent using the selected method)
* @param target Get pillar target
diff --git a/src/com/mirantis/mk/SaltModelTesting.groovy b/src/com/mirantis/mk/SaltModelTesting.groovy
index c4bd4fa..585cb8b 100644
--- a/src/com/mirantis/mk/SaltModelTesting.groovy
+++ b/src/com/mirantis/mk/SaltModelTesting.groovy
@@ -277,6 +277,7 @@
def aptRepoUrl = config.get('aptRepoUrl', "")
def aptRepoGPG = config.get('aptRepoGPG', "")
def testContext = config.get('testContext', 'test')
+ def nodegenerator = config.get('nodegenerator', false)
config['envOpts'] = [
"RECLASS_ENV=${reclassEnv}", "SALT_STOPSTART_WAIT=5",
"HOSTNAME=${dockerHostname}", "CLUSTER_NAME=${clusterName}",
@@ -342,6 +343,31 @@
archiveArtifacts artifacts: "nodesinfo.tar.gz"
}
]
+ // this tool should be tested in master branch only
+ // and not for all jobs, as pilot will be used cc-reclass-chunk
+ if (nodegenerator) {
+ config['runCommands']['005_Test_new_nodegenerator'] = {
+ try {
+ sh('''#!/bin/bash
+ new_generated_dir=/srv/salt/_new_generated
+ mkdir -p ${new_generated_dir}
+ nodegenerator -b /srv/salt/reclass/classes/ -o ${new_generated_dir} ${CLUSTER_NAME}
+ diff -r /srv/salt/reclass/nodes/_generated ${new_generated_dir} > /tmp/nodegenerator.diff
+ tar -czf /tmp/_generated.tar.gz /srv/salt/reclass/nodes/_generated/
+ tar -czf /tmp/_new_generated.tar.gz ${new_generated_dir}/
+ tar -czf /tmp/_model.tar.gz /srv/salt/reclass/classes/cluster/*
+ ''')
+ } catch (Exception e) {
+ print "Test new nodegenerator tool is failed: ${e}"
+ }
+ }
+ config['runFinally']['002_Archive_nodegenerator_artefact'] = {
+ sh(script: "cd /tmp; [ -f nodegenerator.diff ] && tar -czf ${env.WORKSPACE}/nodegenerator.tar.gz nodegenerator.diff _generated.tar.gz _new_generated.tar.gz _model.tar.gz", returnStatus: true)
+ if (fileExists('nodegenerator.tar.gz')) {
+ archiveArtifacts artifacts: "nodegenerator.tar.gz"
+ }
+ }
+ }
testResult = setupDockerAndTest(config)
if (testResult) {
common.infoMsg("Node test for context: ${testContext} model: ${reclassEnv} finished: SUCCESS")