Add mk libraries
Change-Id: I829b299b6329e8f4d4424c89717d432513d1eece
diff --git a/src/com/mirantis/mk/artifactory.groovy b/src/com/mirantis/mk/artifactory.groovy
new file mode 100644
index 0000000..494552e
--- /dev/null
+++ b/src/com/mirantis/mk/artifactory.groovy
@@ -0,0 +1,386 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Artifactory functions
+ *
+ */
+
+/**
+ * Make generic call using Artifactory REST API and return parsed JSON
+ *
+ * @param art Artifactory connection object
+ * @param uri URI which will be appended to artifactory server base URL
+ * @param method HTTP method to use (default GET)
+ * @param data JSON data to POST or PUT
+ * @param headers Map of additional request headers
+ */
+def restCall(art, uri, method = 'GET', data = null, headers = [:]) {
+ def connection = new URL("${art.url}/api${uri}").openConnection()
+ if (method != 'GET') {
+ connection.setRequestMethod(method)
+ }
+
+ connection.setRequestProperty('User-Agent', 'jenkins-groovy')
+ connection.setRequestProperty('Accept', 'application/json')
+ connection.setRequestProperty('Authorization', "Basic " +
+ "${art.creds.username}:${art.creds.password}".bytes.encodeBase64().toString())
+
+ for (header in headers) {
+ connection.setRequestProperty(header.key, header.value)
+ }
+
+ if (data) {
+ connection.setDoOutput(true)
+ if (data instanceof String) {
+ connection.setRequestProperty('Content-Type', 'application/json')
+ dataStr = data
+ } else if (data instanceof java.io.File) {
+ connection.setRequestProperty('Content-Type', 'application/octet-stream')
+ dataStr = data.bytes
+ } else if (data instanceof byte[]) {
+ connection.setRequestProperty('Content-Type', 'application/octet-stream')
+ dataStr = data
+ } else {
+ connection.setRequestProperty('Content-Type', 'application/json')
+ dataStr = new groovy.json.JsonBuilder(data).toString()
+ }
+ def out = new OutputStreamWriter(connection.outputStream)
+ out.write(dataStr)
+ out.close()
+ }
+
+ if ( connection.responseCode >= 200 && connection.responseCode < 300 ) {
+ res = connection.inputStream.text
+ try {
+ return new groovy.json.JsonSlurperClassic().parseText(res)
+ } catch (Exception e) {
+ return res
+ }
+ } else {
+ throw new Exception(connection.responseCode + ": " + connection.inputStream.text)
+ }
+}
+
+/**
+ * Make GET request using Artifactory REST API and return parsed JSON
+ *
+ * @param art Artifactory connection object
+ * @param uri URI which will be appended to artifactory server base URL
+ */
+def restGet(art, uri) {
+ return restCall(art, uri)
+}
+
+/**
+ * Make PUT request using Artifactory REST API and return parsed JSON
+ *
+ * @param art Artifactory connection object
+ * @param uri URI which will be appended to artifactory server base URL
+ * @param data JSON Data to PUT
+ */
+def restPut(art, uri, data = null) {
+ return restCall(art, uri, 'PUT', data, ['Accept': '*/*'])
+}
+
+/**
+ * Make DELETE request using Artifactory REST API
+ *
+ * @param art Artifactory connection object
+ * @param uri URI which will be appended to artifactory server base URL
+ */
+def restDelete(art, uri) {
+ return restCall(art, uri, 'DELETE', null, ['Accept': '*/*'])
+}
+
+/**
+ * Make POST request using Artifactory REST API and return parsed JSON
+ *
+ * @param art Artifactory connection object
+ * @param uri URI which will be appended to artifactory server base URL
+ * @param data JSON Data to PUT
+ */
+def restPost(art, uri, data = null) {
+ return restCall(art, uri, 'POST', data, ['Accept': '*/*'])
+}
+
+/**
+ * Query artifacts by properties
+ *
+ * @param art Artifactory connection object
+ * @param properties String or list of properties in key=value format
+ * @param repo Optional repository to search in
+ */
+def findArtifactByProperties(art, properties, repo) {
+ query = parseProperties(properties)
+ if (repo) {
+ query = query + "&repos=${repo}"
+ }
+ res = restGet(art, "/search/prop?${query}")
+ return res.results
+}
+
+/**
+ * Parse properties string or map and return URL-encoded string
+ *
+ * @param properties string or key,value map
+ */
+def parseProperties(properties) {
+ if (properties instanceof String) {
+ return properties
+ } else {
+ props = []
+ for (e in properties) {
+ props.push("${e.key}=${e.value}")
+ }
+ props = props.join('|')
+ return props
+ }
+}
+
+/**
+ * Set single property or list of properties to existing artifact
+ *
+ * @param art Artifactory connection object
+ * @param name Name of artifact
+ * @param version Artifact's version, eg. Docker image tag
+ * @param properties String or list of properties in key=value format
+ * @param recursive Set properties recursively (default false)
+ */
+def setProperty(art, name, version, properties, recursive = 0) {
+ props = parseProperties(properties)
+ restPut(art, "/storage/${art.outRepo}/${name}/${version}?properties=${props}&recursive=${recursive}")
+}
+
+/**
+ * Artifactory connection and context parameters
+ *
+ * @param url Artifactory server URL
+ * @param dockerRegistryBase Base to docker registry
+ * @param dockerRegistrySSL Use https to access docker registry
+ * @param outRepo Output repository name used in context of this
+ * connection
+ * @param credentialsID ID of credentials store entry
+ */
+def connection(url, dockerRegistryBase, dockerRegistrySsl, outRepo, credentialsId = "artifactory") {
+ params = [
+ "url": url,
+ "credentialsId": credentialsId,
+ "docker": [
+ "base": dockerRegistryBase,
+ "ssl": dockerRegistrySsl
+ ],
+ "outRepo": outRepo,
+ "creds": getCredentials(credentialsId)
+ ]
+
+ if (dockerRegistrySsl ?: false) {
+ params["docker"]["proto"] = "https"
+ } else {
+ params["docker"]["proto"] = "http"
+ }
+ params["docker"]["url"] = "${params.docker.proto}://${params.outRepo}.${params.docker.base}"
+
+ return params
+}
+
+/**
+ * Push docker image and set artifact properties
+ *
+ * @param art Artifactory connection object
+ * @param img Docker image object
+ * @param imgName Name of docker image
+ * @param properties Map of additional artifact properties
+ * @param timestamp Build timestamp
+ * @param latest Push latest tag if set to true (default true)
+ */
+def dockerPush(art, img, imgName, properties, timestamp, latest = true) {
+ docker.withRegistry(art.docker.url, art.credentialsId) {
+ img.push()
+ // Also mark latest image
+ img.push("latest")
+ }
+
+ properties["build.number"] = currentBuild.build().environment.BUILD_NUMBER
+ properties["build.name"] = currentBuild.build().environment.JOB_NAME
+ properties["timestamp"] = timestamp
+
+ /* Set artifact properties */
+ setProperty(
+ art,
+ imgName,
+ timestamp,
+ properties
+ )
+
+ // ..and the same for latest
+ if (latest == true) {
+ setProperty(
+ art,
+ imgName,
+ "latest",
+ properties
+ )
+ }
+}
+
+/**
+ * Promote docker image to another environment
+ *
+ * @param art Artifactory connection object
+ * @param imgName Name of docker image
+ * @param tag Tag to promote
+ * @param env Environment (repository suffix) to promote to
+ * @param keep Keep artifact in source repository (copy, default true)
+ * @param latest Push latest tag if set to true (default true)
+ */
+def dockerPromote(art, imgName, tag, env, keep = true, latest = true) {
+ /* XXX: promotion this way doesn't work
+ restPost(art, "/docker/${art.outRepo}/v2/promote", [
+ "targetRepo": "${art.outRepo}-${env}",
+ "dockerRepository": imgName,
+ "tag": tag,
+ "copy": keep ? true : false
+ ])
+ */
+
+ action = keep ? "copy" : "move"
+ restPost(art, "/${action}/${art.outRepo}/${imgName}/${tag}?to=${art.outRepo}-${env}/${imgName}/${tag}")
+ if (latest == true) {
+ dockerUrl = "${art.docker.proto}://${art.outRepo}-${env}.${art.docker.base}"
+ docker.withRegistry(dockerUrl, art.credentialsId) {
+ img = docker.image("${imgName}:$tag")
+ img.pull()
+ img.push("latest")
+ }
+ }
+}
+
+/**
+ * Set offline parameter to repositories
+ *
+ * @param art Artifactory connection object
+ * @param repos List of base repositories
+ * @param suffix Suffix to append to new repository names
+ */
+def setOffline(art, repos, suffix) {
+ for (repo in repos) {
+ repoName = "${repo}-${suffix}"
+ restPost(art, "/repositories/${repoName}", ['offline': true])
+ }
+ return
+}
+
+/**
+ * Create repositories based on timestamp or other suffix from already
+ * existing repository
+ *
+ * @param art Artifactory connection object
+ * @param repos List of base repositories
+ * @param suffix Suffix to append to new repository names
+ */
+def createRepos(art, repos, suffix) {
+ def created = []
+ for (repo in repos) {
+ repoNewName = "${repo}-${suffix}"
+ repoOrig = restGet(art, "/repositories/${repo}")
+ repoOrig.key = repoNewName
+ repoNew = restPut(art, "/repositories/${repoNewName}", repoOrig)
+ created.push(repoNewName)
+ }
+ return created
+}
+
+/**
+ * Delete repositories based on timestamp or other suffix
+ *
+ * @param art Artifactory connection object
+ * @param repos List of base repositories
+ * @param suffix Suffix to append to new repository names
+ */
+def deleteRepos(art, repos, suffix) {
+ def deleted = []
+ for (repo in repos) {
+ repoName = "${repo}-${suffix}"
+ restDelete(art, "/repositories/${repoName}")
+ deleted.push(repoName)
+ }
+ return deleted
+}
+
+/**
+ * Upload debian package
+ *
+ * @param art Artifactory connection object
+ * @param file File path
+ * @param properties Map with additional artifact properties
+ * @param timestamp Image tag
+ */
+def uploadDebian(art, file, properties, distribution, component, timestamp, data = null) {
+ def fh
+ if (file instanceof java.io.File) {
+ fh = file
+ } else {
+ fh = new File(file)
+ }
+
+ def arch = fh.name.split('_')[-1].split('\\.')[0]
+ if (data) {
+ restPut(art, "/${art.outRepo}/pool/${fh.name};deb.distribution=${distribution};deb.component=${component};deb.architecture=${arch}", data)
+ } else {
+ restPut(art, "/${art.outRepo}/pool/${fh.name};deb.distribution=${distribution};deb.component=${component};deb.architecture=${arch}", fh)
+ }
+
+ /* Set artifact properties */
+ properties["build.number"] = currentBuild.build().environment.BUILD_NUMBER
+ properties["build.name"] = currentBuild.build().environment.JOB_NAME
+ properties["timestamp"] = timestamp
+ setProperty(
+ art,
+ "pool/${fh.name}",
+ timestamp,
+ properties
+ )
+}
+
+/**
+ * Build step to upload docker image. For use with eg. parallel
+ *
+ * @param art Artifactory connection object
+ * @param img Image name to push
+ * @param properties Map with additional artifact properties
+ * @param timestamp Image tag
+ */
+def uploadDockerImageStep(art, img, properties, timestamp) {
+ return {
+ println "Uploading artifact ${img} into ${art.outRepo}"
+ dockerPush(
+ art,
+ docker.image("${img}:${timestamp}"),
+ img,
+ properties,
+ timestamp
+ )
+ }
+}
+
+/**
+ * Build step to upload package. For use with eg. parallel
+ *
+ * @param art Artifactory connection object
+ * @param file File path
+ * @param properties Map with additional artifact properties
+ * @param timestamp Image tag
+ */
+def uploadPackageStep(art, file, properties, distribution, component, timestamp) {
+ return {
+ uploadDebian(
+ art,
+ file,
+ properties,
+ distribution,
+ component,
+ timestamp
+ )
+ }
+}
diff --git a/src/com/mirantis/mk/common.groovy b/src/com/mirantis/mk/common.groovy
new file mode 100644
index 0000000..f64b9db
--- /dev/null
+++ b/src/com/mirantis/mk/common.groovy
@@ -0,0 +1,177 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Common functions
+ *
+ */
+
+/**
+ * Generate current timestamp
+ *
+ * @param format Defaults to yyyyMMddHHmmss
+ */
+def getDatetime(format="yyyyMMddHHmmss") {
+ def now = new Date();
+ return now.format(format, TimeZone.getTimeZone('UTC'));
+}
+
+/**
+ * Abort build, wait for some time and ensure we will terminate
+ */
+def abortBuild() {
+ currentBuild.build().doStop()
+ sleep(180)
+ // just to be sure we will terminate
+ throw new InterruptedException()
+}
+
+/**
+ * Print informational message
+ *
+ * @param msg
+ * @param color Colorful output or not
+ */
+def infoMsg(msg, color = true) {
+ printMsg(msg, "cyan")
+}
+
+/**
+ * Print error message
+ *
+ * @param msg
+ * @param color Colorful output or not
+ */
+def errorMsg(msg, color = true) {
+ printMsg(msg, "red")
+}
+
+/**
+ * Print success message
+ *
+ * @param msg
+ * @param color Colorful output or not
+ */
+def successMsg(msg, color = true) {
+ printMsg(msg, "green")
+}
+
+/**
+ * Print warning message
+ *
+ * @param msg
+ * @param color Colorful output or not
+ */
+def warningMsg(msg, color = true) {
+ printMsg(msg, "blue")
+}
+
+/**
+ * Print message
+ *
+ * @param msg Message to be printed
+ * @param level Level of message (default INFO)
+ * @param color Color to use for output or false (default)
+ */
+def printMsg(msg, color = false) {
+ colors = [
+ 'red' : '\u001B[31m',
+ 'black' : '\u001B[30m',
+ 'green' : '\u001B[32m',
+ 'yellow': '\u001B[33m',
+ 'blue' : '\u001B[34m',
+ 'purple': '\u001B[35m',
+ 'cyan' : '\u001B[36m',
+ 'white' : '\u001B[37m',
+ 'reset' : '\u001B[0m'
+ ]
+ if (color != false) {
+ wrap([$class: 'AnsiColorBuildWrapper']) {
+ print "${colors[color]}${msg}${colors.reset}"
+ }
+ } else {
+ print "[${level}] ${msg}"
+ }
+}
+
+/**
+ * Traverse directory structure and return list of files
+ *
+ * @param path Path to search
+ * @param type Type of files to search (groovy.io.FileType.FILES)
+ */
+@NonCPS
+def getFiles(path, type=groovy.io.FileType.FILES) {
+ files = []
+ new File(path).eachFile(type) {
+ files[] = it
+ }
+ return files
+}
+
+/**
+ * Helper method to convert map into form of list of [key,value] to avoid
+ * unserializable exceptions
+ *
+ * @param m Map
+ */
+@NonCPS
+def entries(m) {
+ m.collect {k, v -> [k, v]}
+}
+
+/**
+ * Opposite of build-in parallel, run map of steps in serial
+ *
+ * @param steps Map of String<name>: CPSClosure2<step>
+ */
+def serial(steps) {
+ stepsArray = entries(steps)
+ for (i=0; i < stepsArray.size; i++) {
+ s = stepsArray[i]
+ dummySteps = ["${s[0]}": s[1]]
+ parallel dummySteps
+ }
+}
+
+/**
+ * Get password credentials from store
+ *
+ * @param id Credentials name
+ */
+def getPasswordCredentials(id) {
+ def creds = com.cloudbees.plugins.credentials.CredentialsProvider.lookupCredentials(
+ com.cloudbees.plugins.credentials.common.StandardUsernamePasswordCredentials.class,
+ jenkins.model.Jenkins.instance
+ )
+
+ for (Iterator<String> credsIter = creds.iterator(); credsIter.hasNext();) {
+ c = credsIter.next();
+ if ( c.id == id ) {
+ return c;
+ }
+ }
+
+ throw new Exception("Could not find credentials for ID ${id}")
+}
+
+/**
+ * Get SSH credentials from store
+ *
+ * @param id Credentials name
+ */
+def getSshCredentials(id) {
+ def creds = com.cloudbees.plugins.credentials.CredentialsProvider.lookupCredentials(
+ com.cloudbees.plugins.credentials.common.StandardUsernameCredentials.class,
+ jenkins.model.Jenkins.instance
+ )
+
+ for (Iterator<String> credsIter = creds.iterator(); credsIter.hasNext();) {
+ c = credsIter.next();
+ if ( c.id == id ) {
+ return c;
+ }
+ }
+
+ throw new Exception("Could not find credentials for ID ${id}")
+}
diff --git a/src/com/mirantis/mk/docker.groovy b/src/com/mirantis/mk/docker.groovy
new file mode 100644
index 0000000..5ea83ee
--- /dev/null
+++ b/src/com/mirantis/mk/docker.groovy
@@ -0,0 +1,31 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Docker functions
+ *
+ */
+
+/**
+ * Build step to build docker image. For use with eg. parallel
+ *
+ * @param img Image name
+ * @param baseImg Base image to use (can be empty)
+ * @param dockerFile Dockerfile to use
+ * @param timestamp Image tag
+ */
+def buildDockerImageStep(img, baseImg, dockerFile, timestamp) {
+ File df = new File(dockerfile);
+ return {
+ if (baseImg) {
+ sh "git checkout -f ${dockerfile}; sed -i -e 's,^FROM.*,FROM ${baseImg},g' ${dockerFile}"
+ }
+ docker.build(
+ "${img}:${timestamp}",
+ [
+ "-f ${dockerFile}",
+ df.getParent()
+ ].join(' ')
+ )
+ }
+}
diff --git a/src/com/mirantis/mk/git.groovy b/src/com/mirantis/mk/git.groovy
new file mode 100644
index 0000000..411372d
--- /dev/null
+++ b/src/com/mirantis/mk/git.groovy
@@ -0,0 +1,89 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Git functions
+ *
+ */
+
+/**
+ * Checkout single git repository
+ *
+ * @param path Directory to checkout repository to
+ * @param url Source Git repository URL
+ * @param branch Source Git repository branch
+ * @param credentialsId Credentials ID to use for source Git
+ */
+def checkoutGitRepository(path, url, branch, credentialsId = null){
+ checkout([
+ $class: 'GitSCM',
+ branches: [[name: "*/${branch}"]],
+ doGenerateSubmoduleConfigurations: false,
+ extensions: [[$class: 'RelativeTargetDirectory', relativeTargetDir: path]],
+ submoduleCfg: [],
+ userRemoteConfigs: [[url: url, credentialsId: credentialsId]]
+ ])
+ dir(path) {
+ sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
+ }
+}
+
+/**
+ * Parse HEAD of current directory and return commit hash
+ */
+def getGitCommit() {
+ git_commit = sh (
+ script: 'git rev-parse HEAD',
+ returnStdout: true
+ ).trim()
+ return git_commit
+}
+
+/**
+ * Checkout git repositories in parallel
+ *
+ * @param path Directory to checkout to
+ * @param url Git repository url
+ * @param branch Git repository branch
+ * @param credentialsId Credentials ID to use
+ * @param poll Poll automatically
+ * @param clean Clean status
+ */
+def checkoutGitParallel(path, url, branch, credentialsId = null, poll = true, clean = true) {
+ return {
+ print "Checking out ${url}, branch ${branch} into ${path}"
+ dir(path) {
+ git url: url,
+ branch: branch,
+ credentialsId: credentialsId,
+ poll: poll,
+ clean: clean
+ }
+ }
+}
+
+/**
+ * Mirror git repository
+ */
+def mirrorReporitory(sourceUrl, targetUrl, credentialsId, branches, followTags = false, gitEmail = 'jenkins@localhost', gitUsername = 'Jenkins') {
+ def ssl = new com.mirantis.mk.ssl()
+ if (branches instanceof String) {
+ branches = branches.tokenize(',')
+ }
+ ssl.prepareSshAgentKey(credentialsId)
+ ssl.ensureKnownHosts(targetUrl)
+
+ sh "git remote | grep target || git remote add target ${TARGET_URL}"
+ agentSh "git remote update --prune"
+ for (i=0; i < branches.size; i++) {
+ branch = branches[i]
+ sh "git branch | grep ${branch} || git checkout -b ${branch} origin/${branch}"
+ sh "git branch | grep ${branch} && git checkout ${branch} && git reset --hard origin/${branch}"
+
+ sh "git config --global user.email '${gitEmail}'"
+ sh "git config --global user.name '${gitUsername}'"
+ sh "git ls-tree target/${branch} && git merge --no-edit --ff target/${branch} || echo 'Target repository is empty, skipping merge'"
+ followTagsArg = followTags ? "--follow-tags" : ""
+ agentSh "git push ${followTagsArg} target HEAD:${branch}"
+ }
+}
diff --git a/src/com/mirantis/mk/http.groovy b/src/com/mirantis/mk/http.groovy
new file mode 100644
index 0000000..c0bf70b
--- /dev/null
+++ b/src/com/mirantis/mk/http.groovy
@@ -0,0 +1,102 @@
+package com.mirantis.mk
+/**
+ *
+ * HTTP functions
+ *
+ */
+
+/**
+ * Make generic HTTP call and return parsed JSON
+ *
+ * @param url URL to make the request against
+ * @param method HTTP method to use (default GET)
+ * @param data JSON data to POST or PUT
+ * @param headers Map of additional request headers
+ */
+@NonCPS
+def sendHttpRequest(url, method = 'GET', data = null, headers = [:]) {
+
+ def connection = new URL(url).openConnection()
+ if (method != 'GET') {
+ connection.setRequestMethod(method)
+ }
+
+ if (data) {
+ headers['Content-Type'] = 'application/json'
+ }
+
+ headers['User-Agent'] = 'jenkins-groovy'
+ headers['Accept'] = 'application/json'
+
+ for (header in headers) {
+ connection.setRequestProperty(header.key, header.value)
+ }
+
+ if (data) {
+ connection.setDoOutput(true)
+ if (data instanceof String) {
+ dataStr = data
+ } else {
+ dataStr = new groovy.json.JsonBuilder(data).toString()
+ }
+ def output = new OutputStreamWriter(connection.outputStream)
+ //infoMsg("[HTTP] Request URL: ${url}, method: ${method}, headers: ${headers}, content: ${dataStr}")
+ output.write(dataStr)
+ output.close()
+ }
+
+ if ( connection.responseCode == 200 ) {
+ response = connection.inputStream.text
+ try {
+ response_content = new groovy.json.JsonSlurperClassic().parseText(response)
+ } catch (groovy.json.JsonException e) {
+ response_content = response
+ }
+ //successMsg("[HTTP] Response: code ${connection.responseCode}")
+ return response_content
+ } else {
+ //errorMsg("[HTTP] Response: code ${connection.responseCode}")
+ throw new Exception(connection.responseCode + ": " + connection.inputStream.text)
+ }
+
+}
+
+/**
+ * Make HTTP GET request
+ *
+ * @param url URL which will requested
+ * @param data JSON data to PUT
+ */
+def sendHttpGetRequest(url, data = null, headers = [:]) {
+ return sendHttpRequest(url, 'GET', data, headers)
+}
+
+/**
+ * Make HTTP POST request
+ *
+ * @param url URL which will requested
+ * @param data JSON data to PUT
+ */
+def sendHttpPostRequest(url, data = null, headers = [:]) {
+ return sendHttpRequest(url, 'POST', data, headers)
+}
+
+/**
+ * Make HTTP PUT request
+ *
+ * @param url URL which will requested
+ * @param data JSON data to PUT
+ */
+def sendHttpPutRequest(url, data = null, headers = [:]) {
+ return sendHttpRequest(url, 'PUT', data, headers)
+}
+
+/**
+ * Make HTTP DELETE request
+ *
+ * @param url URL which will requested
+ * @param data JSON data to PUT
+ */
+def sendHttpDeleteRequest(url, data = null, headers = [:]) {
+ return sendHttpRequest(url, 'DELETE', data, headers)
+}
diff --git a/src/com/mirantis/mk/openstack.groovy b/src/com/mirantis/mk/openstack.groovy
new file mode 100644
index 0000000..1d6bcca
--- /dev/null
+++ b/src/com/mirantis/mk/openstack.groovy
@@ -0,0 +1,296 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Openstack functions
+ *
+ */
+
+/**
+ * Install OpenStack service clients in isolated environment
+ *
+ * @param path Path where virtualenv is created
+ * @param version Version of the OpenStack clients
+ */
+
+def setupOpenstackVirtualenv(path, version = 'kilo'){
+ def python = new com.mirantis.mk.python()
+
+ def openstack_kilo_packages = [
+ 'python-cinderclient>=1.3.1,<1.4.0',
+ 'python-glanceclient>=0.19.0,<0.20.0',
+ 'python-heatclient>=0.6.0,<0.7.0',
+ 'python-keystoneclient>=1.6.0,<1.7.0',
+ 'python-neutronclient>=2.2.6,<2.3.0',
+ 'python-novaclient>=2.19.0,<2.20.0',
+ 'python-swiftclient>=2.5.0,<2.6.0',
+ 'oslo.config>=2.2.0,<2.3.0',
+ 'oslo.i18n>=2.3.0,<2.4.0',
+ 'oslo.serialization>=1.8.0,<1.9.0',
+ 'oslo.utils>=1.4.0,<1.5.0',
+ ]
+
+ def openstack_latest_packages = openstack_kilo_packages
+
+ if(version == 'kilo') {
+ requirements = openstack_kilo_packages
+ }
+ else if(version == 'liberty') {
+ requirements = openstack_kilo_packages
+ }
+ else if(version == 'mitaka') {
+ requirements = openstack_kilo_packages
+ }
+ else {
+ requirements = openstack_latest_packages
+ }
+ python.setupVirtualenv(path, 'python2', requirements)
+}
+
+/**
+ * create connection to OpenStack API endpoint
+ *
+ * @param url OpenStack API endpoint address
+ * @param credentialsId Credentials to the OpenStack API
+ * @param project OpenStack project to connect to
+ */
+@NonCPS
+def createOpenstackEnv(url, credentialsId, project) {
+ def common = new com.mirantis.mk.common()
+ creds = common.getPasswordCredentials(credentialsId)
+ params = [
+ "OS_USERNAME": creds.username,
+ "OS_PASSWORD": creds.password.toString(),
+ "OS_TENANT_NAME": project,
+ "OS_AUTH_URL": url,
+ "OS_AUTH_STRATEGY": "keystone"
+ ]
+ res = ""
+ for ( e in params ) {
+ res = "${res}export ${e.key}=${e.value}\n"
+ }
+ writeFile file: "${env.WORKSPACE}/keystonerc", text: res
+ return "${env.WORKSPACE}/keystonerc"
+ //return res.substring(1)
+}
+
+/**
+ * Run command with OpenStack env params and optional python env
+ *
+ * @param cmd Command to be executed
+ * @param env Environmental parameters with endpoint credentials
+ * @param path Optional path to virtualenv with specific clients
+ */
+def runOpenstackCommand(cmd, venv, path = null) {
+ def python = new com.mirantis.mk.python()
+ openstackCmd = ". ${venv}; ${cmd}"
+ if (path) {
+ output = python.runVirtualenvCommand(path, openstackCmd)
+ }
+ else {
+ echo("[Command]: ${openstackCmd}")
+ output = sh (
+ script: openstackCmd,
+ returnStdout: true
+ ).trim()
+ }
+ return output
+}
+
+/**
+ * Get OpenStack Keystone token for current credentials
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param path Optional path to the custom virtualenv
+ */
+def getKeystoneToken(client, path = null) {
+ def python = new com.mirantis.mk.python()
+ cmd = "keystone token-get"
+ outputTable = runOpenstackCommand(cmd, client, path)
+ output = python.parseTextTable(outputTable, 'item', 'prettytable')
+ return output
+}
+
+/**
+ * Get OpenStack Keystone token for current credentials
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param path Optional path to the custom virtualenv
+ */
+def createHeatEnv(file, environment = [], original_file = null) {
+ if (original_file) {
+ envString = readFile file: original_file
+ }
+ else {
+ envString = "parameters:\n"
+ }
+ for ( int i = 0; i < environment.size; i++ ) {
+ envString = "${envString} ${environment.get(i).get(0)}: ${environment.get(i).get(1)}\n"
+ }
+ writeFile file: file, text: envString
+}
+
+/**
+ * Create new OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param template HOT template for the new Heat stack
+ * @param environment Environmentale parameters of the new Heat stack
+ * @param name Name of the new Heat stack
+ * @param path Optional path to the custom virtualenv
+ */
+def createHeatStack(client, name, template, params = [], environment = null, path = null) {
+ def python = new com.mirantis.mk.python()
+ templateFile = "${env.WORKSPACE}/template/template/${template}.hot"
+ if (environment) {
+ envFile = "${env.WORKSPACE}/template/env/${template}/${name}.env"
+ envSource = "${env.WORKSPACE}/template/env/${template}/${environment}.env"
+ createHeatEnv(envFile, params, envSource)
+ }
+ else {
+ envFile = "${env.WORKSPACE}/template/${name}.env"
+ createHeatEnv(envFile, params)
+ }
+ cmd = "heat stack-create -f ${templateFile} -e ${envFile} ${name}"
+ dir("${env.WORKSPACE}/template/template") {
+ outputTable = runOpenstackCommand(cmd, client, path)
+ }
+ output = python.parseTextTable(outputTable, 'item', 'prettytable')
+
+ i = 1
+ while (true) {
+ status = getHeatStackStatus(client, name, path)
+ echo("[Heat Stack] Status: ${status}, Check: ${i}")
+ if (status == 'CREATE_FAILED') {
+ info = getHeatStackInfo(client, name, path)
+ throw new Exception(info.stack_status_reason)
+ }
+ else if (status == 'CREATE_COMPLETE') {
+ info = getHeatStackInfo(client, name, path)
+ echo(info.stack_status_reason)
+ break
+ }
+ sh('sleep 5s')
+ i++
+ }
+ echo("[Heat Stack] Status: ${status}")
+}
+
+/**
+ * Get life cycle status for existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackStatus(client, name, path = null) {
+ cmd = 'heat stack-list | awk -v stack='+name+' \'{if ($4==stack) print $6}\''
+ return runOpenstackCommand(cmd, client, path)
+}
+
+/**
+ * Get info about existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackInfo(env, name, path = null) {
+ def python = new com.mirantis.mk.python()
+ cmd = "heat stack-show ${name}"
+ outputTable = runOpenstackCommand(cmd, env, path)
+ output = python.parseTextTable(outputTable, 'item', 'prettytable')
+ return output
+}
+
+/**
+ * Get existing OpenStack Heat stack output parameter
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack
+ * @param parameter Name of the output parameter
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackOutputParam(env, name, outputParam, path = null) {
+ cmd = "heat output-show ${name} ${outputParam}"
+ output = runOpenstackCommand(cmd, env, path)
+ return output.substring(1, output.length()-1)
+}
+
+/**
+ * List all resources from existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackResources(env, name, path = null) {
+ def python = new com.mirantis.mk.python()
+ cmd = "heat resource-list ${name}"
+ outputTable = runOpenstackCommand(cmd, env, path)
+ output = python.parseTextTable(outputTable, 'list', 'prettytable')
+ return output
+}
+
+/**
+ * Get info about resource from existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackResourceInfo(env, name, resource, path = null) {
+ def python = new com.mirantis.mk.python()
+ cmd = "heat resource-show ${name} ${resource}"
+ outputTable = runOpenstackCommand(cmd, env, path)
+ output = python.parseTextTable(outputTable, 'item', 'prettytable')
+ return output
+}
+
+/**
+ * Update existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def updateHeatStack(env, name, path = null) {
+ def python = new com.mirantis.mk.python()
+ cmd = "heat stack-update ${name}"
+ outputTable = runOpenstackCommand(cmd, env, path)
+ output = python.parseTextTable(outputTable, 'item', 'prettytable')
+ return output
+}
+
+/**
+ * Delete existing OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def deleteHeatStack(env, name, path = null) {
+ cmd = "heat stack-delete ${name}"
+ outputTable = runOpenstackCommand(cmd, env, path)
+}
+
+/**
+ * Return list of servers from OpenStack Heat stack
+ *
+ * @param env Connection parameters for OpenStack API endpoint
+ * @param name Name of the managed Heat stack instance
+ * @param path Optional path to the custom virtualenv
+ */
+def getHeatStackServers(env, name, path = null) {
+ resources = heatGetStackResources(env, name, path)
+ servers = []
+ for (resource in resources) {
+ if (resource.resource_type == 'OS::Nova::Server') {
+ resourceName = resource.resource_name
+ server = heatGetStackResourceInfo(env, name, resourceName, path)
+ servers.add(server.attributes.name)
+ }
+ }
+ echo("[Stack ${name}] Servers: ${servers}")
+ return servers
+}
diff --git a/src/com/mirantis/mk/python.groovy b/src/com/mirantis/mk/python.groovy
new file mode 100644
index 0000000..c832cb3
--- /dev/null
+++ b/src/com/mirantis/mk/python.groovy
@@ -0,0 +1,128 @@
+package com.mirantis.mk
+
+/**
+ *
+ * Python functions
+ *
+ */
+
+/**
+ * Install python virtualenv
+ *
+ * @param path Path to virtualenv
+ * @param python Version of Python (python/python3)
+ * @param reqs Environment requirements in list format
+ */
+def setupVirtualenv(path, python = 'python2', reqs = []) {
+ virtualenv_cmd = "virtualenv ${path} --python ${python}"
+
+ echo("[Python ${path}] Setup ${python} environment")
+ sh(returnStdout: true, script: virtualenv_cmd)
+ args = ""
+ for (req in reqs) {
+ args = args + "${req}\n"
+ }
+ writeFile file: "${path}/requirements.txt", text: args
+ runVirtualenvCommand(path, "pip install -r ${path}/requirements.txt")
+}
+
+/**
+ * Run command in specific python virtualenv
+ *
+ * @param path Path to virtualenv
+ * @param cmd Command to be executed
+ */
+def runVirtualenvCommand(path, cmd) {
+ virtualenv_cmd = ". ${path}/bin/activate; ${cmd}"
+ echo("[Python ${path}] Run command ${cmd}")
+ output = sh(
+ returnStdout: true,
+ script: virtualenv_cmd
+ ).trim()
+ return output
+}
+
+@NonCPS
+def loadJson(rawData) {
+ return new groovy.json.JsonSlurperClassic().parseText(rawData)
+}
+
+/**
+ * Parse content from markup-text tables to variables
+ *
+ * @param tableStr String representing the table
+ * @param mode Either list (1st row are keys) or item (key, value rows)
+ * @param format Format of the table
+ */
+def parseTextTable(tableStr, type = 'item', format = 'rest') {
+ parserScript = "${env.WORKSPACE}/scripts/parse_text_table.py"
+ tableFile = "${env.WORKSPACE}/prettytable.txt"
+ writeFile file: tableFile, text: tableStr
+ rawData = sh (
+ script: "python ${parserScript} --file '${tableFile}' --type ${type}",
+ returnStdout: true
+ ).trim()
+ data = loadJson(rawData)
+ echo("[Parsed table] ${data}")
+ return data
+}
+
+/**
+ * Install cookiecutter in isolated environment
+ *
+ * @param path Path where virtualenv is created
+ */
+def setupCookiecutterVirtualenv(path) {
+ requirements = [
+ 'cookiecutter',
+ ]
+ setupVirtualenv(path, 'python2', requirements)
+}
+
+/**
+ * Generate the cookiecutter templates with given context
+ *
+ * @param path Path where virtualenv is created
+ */
+def buildCookiecutterTemplate (template, context, path = none) {
+ contextFile = "default_context.json"
+ contextString = "parameters:\n"
+ for (parameter in context) {
+ contextString = "${contextString} ${parameter.key}: ${parameter.value}\n"
+ }
+ writeFile file: contextFile, text: contextString
+ command = ". ./${work_dir}/bin/activate; cookiecutter --config-file ${cookiecutter_context_file} --overwrite-if-exists --verbose --no-input ${template_dir}"
+ output = sh (returnStdout: true, script: command)
+ echo("[Cookiecutter build] Output: ${output}")
+}
+
+/**
+ * Install jinja rendering in isolated environment
+ *
+ * @param path Path where virtualenv is created
+ */
+def setupJinjaVirtualenv(path) {
+ requirements = [
+ 'jinja2-cli',
+ 'pyyaml',
+ ]
+ setupVirtualenv(path, 'python2', requirements)
+}
+
+/**
+ * Generate the Jinja templates with given context
+ *
+ * @param path Path where virtualenv is created
+ */
+def jinjaBuildTemplate (template, context, path = none) {
+ contextFile = "jinja_context.yml"
+ contextString = ""
+ for (parameter in context) {
+ contextString = "${contextString}${parameter.key}: ${parameter.value}\n"
+ }
+ writeFile file: contextFile, text: contextString
+ cmd = "jinja2 ${template} ${contextFile} --format=yaml"
+ data = sh (returnStdout: true, script: cmd)
+ echo(data)
+ return data
+}
diff --git a/src/com/mirantis/mk/salt.groovy b/src/com/mirantis/mk/salt.groovy
new file mode 100644
index 0000000..2416f91
--- /dev/null
+++ b/src/com/mirantis/mk/salt.groovy
@@ -0,0 +1,416 @@
+package com.mirantis.mk
+
+/**
+ *
+ * SaltStack functions
+ *
+ */
+
+/**
+ * Login to Salt API and return auth token
+ *
+ * @param url Salt API server URL
+ * @param params Salt connection params
+ */
+def getSaltToken(url, params) {
+ def http = new com.mirantis.mk.http()
+ data = [
+ 'username': params.creds.username,
+ 'password': params.creds.password.toString(),
+ 'eauth': 'pam'
+ ]
+ authToken = http.sendHttpGetRequest("${url}/login", data, ['Accept': '*/*'])['return'][0]['token']
+ return authToken
+}
+
+/**
+ * Salt connection and context parameters
+ *
+ * @param url Salt API server URL
+ * @param credentialsID ID of credentials store entry
+ */
+def createSaltConnection(url, credentialsId) {
+ def common = new com.mirantis.mk.common()
+ params = [
+ "url": url,
+ "credentialsId": credentialsId,
+ "authToken": null,
+ "creds": common.getPasswordCredentials(credentialsId)
+ ]
+ params["authToken"] = getSaltToken(url, params)
+
+ return params
+}
+
+/**
+ * Run action using Salt API
+ *
+ * @param master Salt connection object
+ * @param client Client type
+ * @param target Target specification, eg. for compound matches by Pillar
+ * data: ['expression': 'I@openssh:server', 'type': 'compound'])
+ * @param function Function to execute (eg. "state.sls")
+ * @param args Additional arguments to function
+ * @param kwargs Additional key-value arguments to function
+ */
+def runSaltCommand(master, client, target, function, args = null, kwargs = null) {
+ data = [
+ 'tgt': target.expression,
+ 'fun': function,
+ 'client': client,
+ 'expr_form': target.type,
+ ]
+
+ if (args) {
+ data['arg'] = args
+ }
+ if (kwargs) {
+ data['kwarg'] = kwargs
+ }
+
+ headers = [
+ 'X-Auth-Token': "${master.authToken}"
+ ]
+
+ return http.sendHttpPostRequest("${master.url}/", data, headers)
+}
+
+def getSaltPillar(master, target, pillar) {
+ def out = runSaltCommand(master, 'local', target, 'pillar.get', [pillar.replace('.', ':')])
+ return out
+}
+
+def enforceSaltState(master, target, state, output = false) {
+ def run_states
+ if (state instanceof String) {
+ run_states = state
+ } else {
+ run_states = state.join(',')
+ }
+
+ def out = runSaltCommand(master, 'local', target, 'state.sls', [run_states])
+ try {
+ checkSaltResult(out)
+ } finally {
+ if (output == true) {
+ printSaltResult(out)
+ }
+ }
+ return out
+}
+
+def runSaltCmd(master, target, cmd) {
+ return runSaltCommand(master, 'local', target, 'cmd.run', [cmd])
+}
+
+def syncSaltAll(master, target) {
+ return runSaltCommand(master, 'local', target, 'saltutil.sync_all')
+}
+
+def enforceSaltApply(master, target, output = false) {
+ def out = runSaltCommand(master, 'local', target, 'state.highstate')
+ try {
+ checkSaltResult(out)
+ } finally {
+ if (output == true) {
+ printSaltResult(out)
+ }
+ }
+ return out
+}
+
+def generateSaltNodeKey(master, target, host, keysize = 4096) {
+ args = [host]
+ kwargs = ['keysize': keysize]
+ return runSaltCommand(master, 'wheel', target, 'key.gen_accept', args, kwargs)
+}
+
+def generateSaltNodeMetadata(master, target, host, classes, parameters) {
+ args = [host, '_generated']
+ kwargs = ['classes': classes, 'parameters': parameters]
+ return runSaltCommand(master, 'local', target, 'reclass.node_create', args, kwargs)
+}
+
+def orchestrateSaltSystem(master, target, orchestrate) {
+ return runSaltCommand(master, 'runner', target, 'state.orchestrate', [orchestrate])
+}
+
+/**
+ * Check result for errors and throw exception if any found
+ *
+ * @param result Parsed response of Salt API
+ */
+def checkSaltResult(result) {
+ for (entry in result['return']) {
+ if (!entry) {
+ throw new Exception("Salt API returned empty response: ${result}")
+ }
+ for (node in entry) {
+ for (resource in node.value) {
+ if (resource instanceof String || resource.value.result.toString().toBoolean() != true) {
+ throw new Exception("Salt state on node ${node.key} failed: ${node.value}")
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Print Salt run results in human-friendly form
+ *
+ * @param result Parsed response of Salt API
+ * @param onlyChanges If true (default), print only changed resources
+ * @param raw Simply pretty print what we have, no additional
+ * parsing
+ */
+def printSaltResult(result, onlyChanges = true, raw = false) {
+ if (raw == true) {
+ print new groovy.json.JsonBuilder(result).toPrettyString()
+ } else {
+ def out = [:]
+ for (entry in result['return']) {
+ for (node in entry) {
+ out[node.key] = [:]
+ for (resource in node.value) {
+ if (resource instanceof String) {
+ out[node.key] = node.value
+ } else if (resource.value.result.toString().toBoolean() == false || resource.value.changes || onlyChanges == false) {
+ out[node.key][resource.key] = resource.value
+ }
+ }
+ }
+ }
+
+ for (node in out) {
+ if (node.value) {
+ println "Node ${node.key} changes:"
+ print new groovy.json.JsonBuilder(node.value).toPrettyString()
+ } else {
+ println "No changes for node ${node.key}"
+ }
+ }
+ }
+}
+
+@NonCPS
+def getSaltProcess(saltProcess) {
+
+ def process_def = [
+ 'validate_foundation_infra': [
+ [tgt: 'I@salt:master', fun: 'cmd.run', arg: ['salt-key']],
+ [tgt: 'I@salt:minion', fun: 'test.version'],
+ [tgt: 'I@salt:master', fun: 'cmd.run', arg: ['reclass-salt --top']],
+ [tgt: 'I@reclass:storage', fun: 'reclass.inventory'],
+ [tgt: 'I@salt:minion', fun: 'state.show_top'],
+ ],
+ 'install_foundation_infra': [
+ [tgt: 'I@salt:master', fun: 'state.sls', arg: ['salt.master,reclass']],
+ [tgt: 'I@linux:system', fun: 'saltutil.refresh_pillar'],
+ [tgt: 'I@linux:system', fun: 'saltutil.sync_all'],
+ [tgt: 'I@linux:system', fun: 'state.sls', arg: ['linux,openssh,salt.minion,ntp']],
+ ],
+ 'install_openstack_mk_infra': [
+ // Install keepaliveds
+ [tgt: 'I@keepalived:cluster', fun: 'state.sls', arg: ['keepalived'], batch:1],
+ // Check the keepalived VIPs
+ [tgt: 'I@keepalived:cluster', fun: 'cmd.run', arg: ['ip a | grep 172.16.10.2']],
+ // Install glusterfs
+ [tgt: 'I@glusterfs:server', fun: 'state.sls', arg: ['glusterfs.server.service']],
+ [tgt: 'I@glusterfs:server', fun: 'state.sls', arg: ['glusterfs.server.setup'], batch:1],
+ [tgt: 'I@glusterfs:server', fun: 'cmd.run', arg: ['gluster peer status']],
+ [tgt: 'I@glusterfs:server', fun: 'cmd.run', arg: ['gluster volume status']],
+ // Install rabbitmq
+ [tgt: 'I@rabbitmq:server', fun: 'state.sls', arg: ['rabbitmq']],
+ // Check the rabbitmq status
+ [tgt: 'I@rabbitmq:server', fun: 'cmd.run', arg: ['rabbitmqctl cluster_status']],
+ // Install galera
+ [tgt: 'I@galera:master', fun: 'state.sls', arg: ['galera']],
+ [tgt: 'I@galera:slave', fun: 'state.sls', arg: ['galera']],
+ // Check galera status
+ [tgt: 'I@galera:master', fun: 'mysql.status'],
+ [tgt: 'I@galera:slave', fun: 'mysql.status'],
+ // Install haproxy
+ [tgt: 'I@haproxy:proxy', fun: 'state.sls', arg: ['haproxy']],
+ [tgt: 'I@haproxy:proxy', fun: 'service.status', arg: ['haproxy']],
+ [tgt: 'I@haproxy:proxy', fun: 'service.restart', arg: ['rsyslog']],
+ // Install memcached
+ [tgt: 'I@memcached:server', fun: 'state.sls', arg: ['memcached']],
+ ],
+ 'install_openstack_mk_control': [
+ // setup keystone service
+ [tgt: 'I@keystone:server', fun: 'state.sls', arg: ['keystone.server'], batch:1],
+ // populate keystone services/tenants/roles/users
+ [tgt: 'I@keystone:client', fun: 'state.sls', arg: ['keystone.client']],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; keystone service-list']],
+ // Install glance and ensure glusterfs clusters
+ [tgt: 'I@glance:server', fun: 'state.sls', arg: ['glance.server'], batch:1],
+ [tgt: 'I@glance:server', fun: 'state.sls', arg: ['glusterfs.client']],
+ // Update fernet tokens before doing request on keystone server
+ [tgt: 'I@keystone:server', fun: 'state.sls', arg: ['keystone.server']],
+ // Check glance service
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; glance image-list']],
+ // Install and check nova service
+ [tgt: 'I@nova:controller', fun: 'state.sls', arg: ['nova'], batch:1],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; nova service-list']],
+ // Install and check cinder service
+ [tgt: 'I@cinder:controller', fun: 'state.sls', arg: ['cinder'], batch:1],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; cinder list']],
+ // Install neutron service
+ [tgt: 'I@neutron:server', fun: 'state.sls', arg: ['neutron'], batch:1],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; neutron agent-list']],
+ // Install heat service
+ [tgt: 'I@heat:server', fun: 'state.sls', arg: ['heat'], batch:1],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; heat resource-type-list']],
+ // Install horizon dashboard
+ [tgt: 'I@horizon:server', fun: 'state.sls', arg: ['horizon']],
+ [tgt: 'I@nginx:server', fun: 'state.sls', arg: ['nginx']],
+ ],
+ 'install_openstack_mk_network': [
+ // Install opencontrail database services
+ [tgt: 'I@opencontrail:database', fun: 'state.sls', arg: ['opencontrail.database'], batch:1],
+ // Install opencontrail control services
+ [tgt: 'I@opencontrail:control', fun: 'state.sls', arg: ['opencontrail'], batch:1],
+ // Provision opencontrail control services
+ [tgt: 'I@opencontrail:control:id:1', fun: 'cmd.run', arg: ['/usr/share/contrail-utils/provision_control.py --api_server_ip 172.16.10.254 --api_server_port 8082 --host_name ctl01 --host_ip 172.16.10.101 --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add']],
+ [tgt: 'I@opencontrail:control:id:1', fun: 'cmd.run', arg: ['/usr/share/contrail-utils/provision_control.py --api_server_ip 172.16.10.254 --api_server_port 8082 --host_name ctl02 --host_ip 172.16.10.102 --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add']],
+ [tgt: 'I@opencontrail:control:id:1', fun: 'cmd.run', arg: ['/usr/share/contrail-utils/provision_control.py --api_server_ip 172.16.10.254 --api_server_port 8082 --host_name ctl03 --host_ip 172.16.10.103 --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add']],
+ // Test opencontrail
+ [tgt: 'I@opencontrail:control', fun: 'cmd.run', arg: ['contrail-status']],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; neutron net-list']],
+ [tgt: 'I@keystone:server', fun: 'cmd.run', arg: ['. /root/keystonerc; nova net-list']],
+ ],
+ 'install_openstack_mk_compute': [
+ // Configure compute nodes
+ [tgt: 'I@nova:compute', fun: 'state.apply'],
+ [tgt: 'I@nova:compute', fun: 'state.apply'],
+ // Provision opencontrail virtual routers
+ [tgt: 'I@opencontrail:control:id:1', fun: 'cmd.run', arg: ['/usr/share/contrail-utils/provision_vrouter.py --host_name cmp01 --host_ip 172.16.10.105 --api_server_ip 172.16.10.254 --oper add --admin_user admin --admin_password workshop --admin_tenant_name admin']],
+ [tgt: 'I@nova:compute', fun: 'system.reboot'],
+ ],
+ 'install_openstack_mcp_infra': [
+ // Comment nameserver
+ [tgt: 'I@kubernetes:master', fun: 'cmd.run', arg: ["sed -i 's/nameserver 10.254.0.10/#nameserver 10.254.0.10/g' /etc/resolv.conf"]],
+ // Install glusterfs
+ [tgt: 'I@glusterfs:server', fun: 'state.sls', arg: ['glusterfs.server.service']],
+ // Install keepalived
+ [tgt: 'I@keepalived:cluster', fun: 'state.sls', arg: ['keepalived'], batch:1],
+ // Check the keepalived VIPs
+ [tgt: 'I@keepalived:cluster', fun: 'cmd.run', arg: ['ip a | grep 172.16.10.2']],
+ // Setup glusterfs
+ [tgt: 'I@glusterfs:server', fun: 'state.sls', arg: ['glusterfs.server.setup'], batch:1],
+ [tgt: 'I@glusterfs:server', fun: 'cmd.run', arg: ['gluster peer status']],
+ [tgt: 'I@glusterfs:server', fun: 'cmd.run', arg: ['gluster volume status']],
+ // Install haproxy
+ [tgt: 'I@haproxy:proxy', fun: 'state.sls', arg: ['haproxy']],
+ [tgt: 'I@haproxy:proxy', fun: 'service.status', arg: ['haproxy']],
+ // Install docker
+ [tgt: 'I@docker:host', fun: 'state.sls', arg: ['docker.host']],
+ [tgt: 'I@docker:host', fun: 'cmd.run', arg: ['docker ps']],
+ // Install bird
+ [tgt: 'I@bird:server', fun: 'state.sls', arg: ['bird']],
+ // Install etcd
+ [tgt: 'I@etcd:server', fun: 'state.sls', arg: ['etcd.server.service']],
+ [tgt: 'I@etcd:server', fun: 'cmd.run', arg: ['etcdctl cluster-health']],
+ ],
+ 'install_stacklight_control': [
+ [tgt: 'I@elasticsearch:server', fun: 'state.sls', arg: ['elasticsearch.server'], batch:1],
+ [tgt: 'I@influxdb:server', fun: 'state.sls', arg: ['influxdb'], batch:1],
+ [tgt: 'I@kibana:server', fun: 'state.sls', arg: ['kibana.server'], batch:1],
+ [tgt: 'I@grafana:server', fun: 'state.sls', arg: ['grafana'], batch:1],
+ [tgt: 'I@nagios:server', fun: 'state.sls', arg: ['nagios'], batch:1],
+ [tgt: 'I@elasticsearch:client', fun: 'state.sls', arg: ['elasticsearch.client'], batch:1],
+ [tgt: 'I@kibana:client', fun: 'state.sls', arg: ['kibana.client'], batch:1],
+ ],
+ 'install_stacklight_client': [
+ ]
+ ]
+ return process_def[saltProcess]
+}
+
+/**
+ * Run predefined salt process
+ *
+ * @param master Salt connection object
+ * @param process Process name to be run
+ */
+def runSaltProcess(master, process) {
+
+ tasks = getSaltProcess(process)
+
+ for (i = 0; i <tasks.size(); i++) {
+ task = tasks[i]
+ infoMsg("[Salt master ${master.url}] Task ${task}")
+ if (task.containsKey('arg')) {
+ result = runSaltCommand(master, 'local', ['expression': task.tgt, 'type': 'compound'], task.fun, task.arg)
+ }
+ else {
+ result = runSaltCommand(master, 'local', ['expression': task.tgt, 'type': 'compound'], task.fun)
+ }
+ if (task.fun == 'state.sls') {
+ printSaltResult(result, false)
+ }
+ else {
+ echo("${result}")
+ }
+ }
+}
+
+/**
+ * Print Salt state run results in human-friendly form
+ *
+ * @param result Parsed response of Salt API
+ * @param onlyChanges If true (default), print only changed resources
+ * parsing
+ */
+def printSaltStateResult(result, onlyChanges = true) {
+ def out = [:]
+ for (entry in result['return']) {
+ for (node in entry) {
+ out[node.key] = [:]
+ for (resource in node.value) {
+ if (resource instanceof String) {
+ out[node.key] = node.value
+ } else if (resource.value.result.toString().toBoolean() == false || resource.value.changes || onlyChanges == false) {
+ out[node.key][resource.key] = resource.value
+ }
+ }
+ }
+ }
+
+ for (node in out) {
+ if (node.value) {
+ println "Node ${node.key} changes:"
+ print new groovy.json.JsonBuilder(node.value).toPrettyString()
+ } else {
+ println "No changes for node ${node.key}"
+ }
+ }
+}
+
+/**
+ * Print Salt state run results in human-friendly form
+ *
+ * @param result Parsed response of Salt API
+ * @param onlyChanges If true (default), print only changed resources
+ * parsing
+ */
+def printSaltCommandResult(result, onlyChanges = true) {
+ def out = [:]
+ for (entry in result['return']) {
+ for (node in entry) {
+ out[node.key] = [:]
+ for (resource in node.value) {
+ out[node.key] = node.value
+ }
+ }
+ }
+
+ for (node in out) {
+ if (node.value) {
+ println "Node ${node.key} changes:"
+ print new groovy.json.JsonBuilder(node.value).toPrettyString()
+ } else {
+ println "No changes for node ${node.key}"
+ }
+ }
+}
diff --git a/src/com/mirantis/mk/ssl.groovy b/src/com/mirantis/mk/ssl.groovy
new file mode 100644
index 0000000..504e5d3
--- /dev/null
+++ b/src/com/mirantis/mk/ssl.groovy
@@ -0,0 +1,43 @@
+package com.mirantis.mk
+
+/**
+ *
+ * SSL functions
+ *
+ */
+
+/**
+ * Ensure entry in SSH known hosts
+ *
+ * @param url url of remote host
+ */
+def ensureKnownHosts(url) {
+ uri = new URI(url)
+ port = uri.port ?: 22
+
+ sh "test -f ~/.ssh/known_hosts && grep ${uri.host} ~/.ssh/known_hosts || ssh-keyscan -p ${port} ${uri.host} >> ~/.ssh/known_hosts"
+}
+
+/**
+ * Execute command with ssh-agent
+ *
+ * @param cmd Command to execute
+ */
+def runSshAgentCommand(cmd) {
+ sh(". ~/.ssh/ssh-agent.sh && ${cmd}")
+}
+
+/**
+ * Setup ssh agent and add private key
+ *
+ * @param credentialsId Jenkins credentials name to lookup private key
+ */
+def prepareSshAgentKey(credentialsId) {
+ c = getSshCredentials(credentialsId)
+ sh("test -d ~/.ssh || mkdir -m 700 ~/.ssh")
+ sh('pgrep -l -u $USER -f | grep -e ssh-agent\$ >/dev/null || ssh-agent|grep -v "Agent pid" > ~/.ssh/ssh-agent.sh')
+ sh("echo '${c.getPrivateKey()}' > ~/.ssh/id_rsa_${credentialsId} && chmod 600 ~/.ssh/id_rsa_${credentialsId}")
+ runSshAgentCommand("ssh-add ~/.ssh/id_rsa_${credentialsId}")
+}
+
+return this;