Merge "Allign RECLASS_VERSION|FORMULAS_REVISION=>DISTRIB_REVISION"
diff --git a/branch-git-repos.groovy b/branch-git-repos.groovy
index 47c143a..0624c40 100644
--- a/branch-git-repos.groovy
+++ b/branch-git-repos.groovy
@@ -116,19 +116,9 @@
                 sh "git branch -d '${gitBranchNew}' && git push origin ':${gitBranchNew}' || :"
                 sh "git tag    -d '${gitBranchNew}' && git push origin ':refs/tags/${gitBranchNew}' || :"
 
-                // Check if gitSrcObj is a branch
-                gitCommit = sh (script: "git ls-remote --heads --quiet origin '${gitSrcObj}' | awk '{print \$1}'",
-                                returnStdout: true).trim()
-                if (gitCommit) {
-                // Rename existing branch
-                    sh "git checkout -b '${gitSrcObj}' -t 'origin/${gitSrcObj}'" // Checkout old branch
-                    sh "git branch -m '${gitSrcObj}' '${gitBranchNew}'"          // ... rename it
-                    sh "git push origin ':${gitSrcObj}'"                         // ... remove old remote branch
-                } else {
                 // Create new branch
-                    sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'"        // Create new local branch
-                }
-                sh "git push origin '${gitBranchNew}'"                           // ... push new branch
+                sh "git checkout -b '${gitBranchNew}' '${gitSrcObj}'" // Create new local branch
+                sh "git push origin '${gitBranchNew}'"                // ... push new branch
             }
         }
     }
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index aadc7c9..a51f436 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -486,6 +486,7 @@
 
                     if (common.checkContains('STACK_INSTALL', 'contrail')) {
                         orchestrate.installContrailCompute(venvPepper, extra_tgt)
+                        orchestrate.installBackup(venvPepper, 'contrail', extra_tgt)
                     }
                 }
 
diff --git a/cvp-runner.groovy b/cvp-runner.groovy
index dd58da5..7cf8e28 100644
--- a/cvp-runner.groovy
+++ b/cvp-runner.groovy
@@ -10,30 +10,107 @@
  *   TESTS_REPO                      Repo to clone
  *   TESTS_SETTINGS                  Additional environment varibales to apply
  *   PROXY                           Proxy to use for cloning repo or for pip
+ *   TEST_IMAGE                      Docker image link or name to use for running container with test framework.
+ *   DEBUG_MODE                      If you need to debug (keep container after test), please enabled this
  *
  */
 
+common = new com.mirantis.mk.Common()
 validate = new com.mirantis.mcp.Validate()
-
+salt = new com.mirantis.mk.Salt()
 def artifacts_dir = 'validation_artifacts/'
+def remote_dir = '/root/qa_results/'
+def container_workdir = '/var/lib'
+def TARGET_NODE = "I@gerrit:client"
+def reinstall_env = false
+def container_name = "${env.JOB_NAME}"
+def saltMaster
+def settings
 
 node() {
     try{
         stage('Initialization') {
-            validate.prepareVenv(TESTS_REPO, PROXY)
+            sh "rm -rf ${artifacts_dir}"
+            if ( TESTS_SETTINGS != "" ) {
+                for (var in TESTS_SETTINGS.tokenize(";")) {
+                    key = var.tokenize("=")[0].trim()
+                    value = var.tokenize("=")[1].trim()
+                    if (key == 'TARGET_NODE') {
+                        TARGET_NODE = value
+                        common.infoMsg("Node for container is set to ${TARGET_NODE}")
+                    }
+                    if (key == 'REINSTALL_ENV') {
+                        reinstall_env = value.toBoolean()
+                    }
+                }
+            }
+            if ( IMAGE == "" ) {
+                common.infoMsg("Env for tests will be built on Jenkins slave")
+                TARGET_NODE = ""
+                validate.prepareVenv(TESTS_REPO, PROXY)
+            } else {
+                saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+                salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+                salt.cmdRun(saltMaster, TARGET_NODE, "mkdir -p ${remote_dir}")
+                validate.runContainer(saltMaster, TARGET_NODE, IMAGE, container_name)
+                if ( TESTS_REPO != "") {
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} rm -rf ${container_workdir}/cvp*")
+                    salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} git clone ${TESTS_REPO} ${container_workdir}/${container_name}")
+                    TESTS_SET = container_workdir + '/' + container_name + '/' + TESTS_SET
+                    if ( reinstall_env ) {
+                        common.infoMsg("Pip packages in container will be reinstalled based on requirements.txt from ${TESTS_REPO}")
+                        salt.cmdRun(saltMaster, TARGET_NODE, "docker exec ${container_name} pip install --force-reinstall -r ${container_workdir}/${container_name}/requirements.txt")
+                    }
+                }
+            }
         }
 
         stage('Run Tests') {
             sh "mkdir -p ${artifacts_dir}"
-            validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+            validate.runPyTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, TESTS_SETTINGS.tokenize(";"), container_name, TARGET_NODE, remote_dir, artifacts_dir)
         }
+
         stage ('Publish results') {
             archiveArtifacts artifacts: "${artifacts_dir}/*"
             junit "${artifacts_dir}/*.xml"
+            if (env.JOB_NAME.contains("cvp-spt")) {
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483aa.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT Glance results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@name="test_speed_glance"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bb.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT HW2HW results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_hw2hw"]/properties/property']]
+                plot csvFileName: 'plot-8634d2fe-dc48-4713-99f9-b69a381483bc.csv',
+                     group: 'SPT',
+                     style: 'line',
+                     title: 'SPT VM2VM results',
+                     xmlSeries: [[
+                     file: "${env.JOB_NAME}_report.xml",
+                     nodeType: 'NODESET',
+                     url: '',
+                     xpath: '/testsuite/testcase[@classname="cvp_spt.tests.test_vm2vm"]/properties/property']]
+            }
         }
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
         throw e
+    } finally {
+        if (DEBUG_MODE == 'false') {
+            validate.runCleanup(saltMaster, TARGET_NODE, container_name)
+            salt.cmdRun(saltMaster, TARGET_NODE, "rm -rf ${remote_dir}")
+        }
     }
 }
diff --git a/cvp-stacklight.groovy b/cvp-stacklight.groovy
new file mode 100644
index 0000000..e7ce974
--- /dev/null
+++ b/cvp-stacklight.groovy
@@ -0,0 +1,33 @@
+/**
+ *
+ * Temporary pipeline for running cvp-stacklight job
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL                 URL of Salt master
+ *   SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *   TESTS_SET                       Leave empty for full run or choose a file (test)
+ *   TESTS_REPO                      Repo to clone
+ *   TESTS_SETTINGS                  Additional environment varibales to apply
+ *   PROXY                           Proxy to use for cloning repo or for pip
+ *
+ */
+
+validate = new com.mirantis.mcp.Validate()
+
+def artifacts_dir = 'validation_artifacts/'
+
+node() {
+    stage('Initialization') {
+        validate.prepareVenv(TESTS_REPO, PROXY)
+    }
+
+    stage('Run Tests') {
+        sh "mkdir -p ${artifacts_dir}"
+        validate.runTests(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS, TESTS_SET, artifacts_dir, TESTS_SETTINGS)
+    }
+    stage ('Publish results') {
+        archiveArtifacts artifacts: "${artifacts_dir}/*"
+        junit "${artifacts_dir}/*.xml"
+    }
+}
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 6b36fcc..7609103 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -266,7 +266,7 @@
 
                 if (templateContext['default_context']['local_repositories'] == 'True') {
                     def aptlyServerHostname = templateContext.default_context.aptly_server_hostname
-                    sh "cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config.sh"
+                    sh "[ -f mcp-common-scripts/config-drive/mirror_config.yaml ] && cp mcp-common-scripts/config-drive/mirror_config.yaml mirror_config || cp mcp-common-scripts/config-drive/mirror_config.sh mirror_config"
 
                     def smc_apt = [:]
                     smc_apt['SALT_MASTER_DEPLOY_IP'] = templateContext['default_context']['salt_master_management_address']
@@ -275,11 +275,11 @@
                     smc_apt['APTLY_MINION_ID'] = "${aptlyServerHostname}.${clusterDomain}"
 
                     for (i in common.entries(smc_apt)) {
-                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config.sh"
+                        sh "sed -i \"s,export ${i[0]}=.*,export ${i[0]}=${i[1]},\" mirror_config"
                     }
 
                     // create apt config-drive
-                    sh "./create-config-drive --user-data mirror_config.sh --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
+                    sh "./create-config-drive --user-data mirror_config --hostname ${aptlyServerHostname} ${aptlyServerHostname}.${clusterDomain}-config.iso"
                     sh("mv ${aptlyServerHostname}.${clusterDomain}-config.iso output-${clusterName}/")
 
                     // save apt iso to artifacts
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 470f338..0d9ce5e 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -17,15 +17,17 @@
  *   GIT_CREDENTIALS
  *   GIT_REPO_LIST
  *   VCP_IMAGE_LIST - list of images
+ *   SYNC_VCP_IMAGE_TO_S3 - boolean
  *   RELEASE_VCP_IMAGES - boolean
  *   EMAIL_NOTIFY
  *   NOTIFY_RECIPIENTS
- *   NOTIFY_TEXT
- *
+  *
  */
 
 common = new com.mirantis.mk.Common()
-git = new com.mirantis.mk.Git()
+
+syncVcpImagesToS3 = env.SYNC_VCP_IMAGE_TO_S3.toBoolean() ?: false
+emailNotify = env.EMAIL_NOTIFY.toBoolean() ?: false
 
 def triggerAptlyPromoteJob(aptlyUrl, components, diffOnly, dumpPublish, packages, recreate, source, storages, target) {
     build job: "aptly-promote-all-testing-stable", parameters: [
@@ -66,6 +68,9 @@
 }
 
 def triggerGitTagJob(gitRepoList, gitCredentials, tag, sourceTag) {
+    // There is no `nightly` and `testing` build-IDs` in release process
+    // for git repos
+    if ( sourceTag in ['nightly', 'testing'] ) sourceTag = 'master'
     build job: "tag-git-repos-all", parameters: [
         [$class: 'TextParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
         [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
@@ -83,6 +88,13 @@
     ]
 }
 
+def triggerSyncVCPJob(VcpImageList) {
+    build job: "upload-to-s3", parameters: [
+            [$class: 'TextParameterValue', name: 'FILENAMES',
+             value: VcpImageList + VcpImageList.collect({it + '.md5'})]
+    ]
+}
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
@@ -117,9 +129,14 @@
                     triggerPromoteVCPJob(VCP_IMAGE_LIST, TARGET_REVISION, SOURCE_REVISION)
 
                 }
-                if (EMAIL_NOTIFY.toBoolean()) {
+                if (syncVcpImagesToS3) {
+                    common.infoMsg("Syncing VCP images from internal: http://apt.mcp.mirantis.net/images  to s3: images.mirantis.com")
+                    triggerSyncVCPJob('')
+                }
+                if (emailNotify) {
+                    notify_text = "MCP Promotion  ${env.SOURCE_REVISION} => ${env.TARGET_REVISION} has been done"
                     emailext(to: NOTIFY_RECIPIENTS,
-                        body: NOTIFY_TEXT,
+                        body: notify_text,
                         subject: "MCP Promotion has been done")
                 }
             }
diff --git a/stacklight-upgrade.groovy b/stacklight-upgrade.groovy
new file mode 100644
index 0000000..8c445ac
--- /dev/null
+++ b/stacklight-upgrade.groovy
@@ -0,0 +1,163 @@
+/**
+ *
+ * Upgrade Stacklight packages and components
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL                 URL of Salt master
+ *  SALT_MASTER_CREDENTIALS         Credentials to the Salt API
+ *
+ *  STAGE_UPGRADE_SYSTEM_PART           Set to True if upgrade of system part (telegraf, fluentd, prometheus-relay) is desired
+ *  STAGE_UPGRADE_ES_KIBANA             Set to True if Elasticsearch and Kibana upgrade is desired
+ *  STAGE_UPGRADE_DOCKER_COMPONENTS     Set to True if upgrade for components running in Docker Swarm is desired
+ *
+ */
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def targetLiveSubset
+def targetLiveAll
+def minions
+def result
+def args
+def commandKwargs
+def probe = 1
+def errorOccured = false
+def command = 'cmd.run'
+
+def upgrade(master, target, service, pckg, state) {
+    stage("Change ${target} repos") {
+        salt.runSaltProcessStep(master, "${target}", 'saltutil.refresh_pillar', [], null, true, 5)
+        salt.enforceState(master, "${target}", 'linux.system.repo', true)
+    }
+    stage("Update ${pckg} package") {
+        common.infoMsg("Upgrade ${service} package")
+        try {
+            salt.runSaltProcessStep(master, "${target}", command, ["apt-get install --only-upgrade ${pckg}"], null, true)
+        } catch (Exception er) {
+            errorOccured = true
+            common.errorMsg("${pckg} package is not upgraded.")
+            return
+        }
+    }
+    stage("Run ${state} on ${target}") {
+        try {
+            salt.enforceState(master, '${target}', '${state}')
+        } catch (Exception er) {
+            errorOccured = true
+            common.errorMsg('${state} state was executed and failed. Please fix it manually.')
+        }
+    }
+    out = salt.runSaltCommand(master, 'local', ['expression': '${target}', 'type': 'compound'], command, null, 'systemctl status ${service}.service', null)
+    salt.printSaltCommandResult(out)
+
+    common.warningMsg('Please check \'systemctl status ${service}.service\' on ${target} nodes if ${service} is running.')
+    return
+}
+
+def upgrade_es_kibana(master) {
+    stage('Elasticsearch upgrade') {
+        try {
+            salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl stop elasticsearch"], null, true)
+            salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["apt-get --only-upgrade install elasticsearch"], null, true)
+            salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl daemon-reload"], null, true)
+            salt.runSaltProcessStep(master, 'I@elasticsearch:server', command, ["systemctl start elasticsearch"], null, true)
+            salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
+        } catch (Exception er) {
+            errorOccured = true
+            common.errorMsg("Elasticsearch upgrade failed. Please fix it manually.")
+            return
+        }
+    }
+    stage('Verify that the Elasticsearch cluster status is green') {
+        try {
+            def retries_wait = 20
+            def retries = 15
+            def elasticsearch_vip
+            if(!pillar['return'].isEmpty()) {
+                elasticsearch_vip = pillar['return'][0].values()[0]
+            } else {
+                errorOccured = true
+                common.errorMsg('[ERROR] Elasticsearch VIP address could not be retrieved')
+            }
+            pillar = salt.getPillar(master, "I@elasticsearch:client", 'elasticsearch:client:server:port')
+            def elasticsearch_port
+            if(!pillar['return'].isEmpty()) {
+                elasticsearch_port = pillar['return'][0].values()[0]
+            } else {
+                errorOccured = true
+                common.errorMsg('[ERROR] Elasticsearch VIP port could not be retrieved')
+            }
+            common.retry(retries,retries_wait) {
+                common.infoMsg('Waiting for Elasticsearch to become green..')
+                salt.cmdRun(master, "I@elasticsearch:client", "curl -sf ${elasticsearch_vip}:${elasticsearch_port}/_cat/health | awk '{print \$4}' | grep green")
+            }
+        } catch (Exception er) {
+            errorOccured = true
+            common.errorMsg("Elasticsearch cluster status is not \'green\'. Please fix it manually.")
+            return
+        }
+    }
+    stage('Kibana upgrade') {
+        try {
+            salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl stop kibana"], null, true)
+            salt.runSaltProcessStep(master, 'I@kibana:server', command, ["apt-get --only-upgrade install kibana"], null, true)
+            salt.runSaltProcessStep(master, 'I@kibana:server', command, ["systemctl start kibana"], null, true)
+        } catch (Exception er) {
+            errorOccured = true
+            common.errorMsg("Kibana upgrade failed. Please fix it manually.")
+            return
+        }
+        out = salt.runSaltCommand(master, 'local', ['expression': 'I@kibana:server', 'type': 'compound'], command, null, 'systemctl status kibana.service', null)
+        salt.printSaltCommandResult(out)
+
+        common.warningMsg('Please check if kibana service is running.')
+        return
+    }
+}
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+
+        stage('Setup virtualenv for Pepper') {
+            python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+        }
+
+        if (STAGE_UPGRADE_SYSTEM_PART.toBoolean() == true && !errorOccured) {
+            upgrade(pepperEnv, "I@telegraf:agent or I@telegraf:remote_agent", "telegraf", "telegraf", "telegraf")
+            upgrade(pepperEnv, "I@fluentd:agent", "td-agent", "td-agent", "fluentd")
+            if (salt.testTarget(pepperEnv, "I@prometheus:relay")) {
+                upgrade(pepperEnv, "I@prometheus:relay", "prometheus-relay", "prometheus-relay", "prometheus")
+            }
+            if (salt.testTarget(pepperEnv, "I@prometheus:exporters:libvirt")) {
+                upgrade(pepperEnv, "I@prometheus:exporters:libvirt", "libvirt-exporter", "libvirt-exporter", "prometheus")
+            }
+            if (salt.testTarget(pepperEnv, "I@prometheus:exporters:jmx")) {
+                upgrade(pepperEnv, "I@prometheus:exporters:jmx", "jmx-exporter", "jmx-exporter", "prometheus")
+            }
+            if (STAGE_UPGRADE_ES_KIBANA.toBoolean() == true && !errorOccured) {
+                upgrade_es_kibana(pepperEnv)
+            }
+        }
+
+        if (STAGE_UPGRADE_DOCKER_COMPONENTS.toBoolean() == true && !errorOccured) {
+
+            stage('Docker components upgrade') {
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm monitoring"], null, true)
+                    salt.enforceState(pepperEnv, 'I@docker:swarm and I@prometheus:server', 'prometheus')
+                    salt.runSaltProcessStep(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', command, ["docker stack rm dashboard"], null, true)
+                    salt.enforceState(pepperEnv, 'I@docker:swarm:role:master and I@prometheus:server', 'docker')
+                    salt.runSaltProcessStep(pepperEnv, '*', 'saltutil.sync_all', [], null, true)
+                    salt.enforceState(pepperEnv, 'I@grafana:client', 'grafana.client')
+                } catch (Exception er) {
+                    errorOccured = true
+                    common.errorMsg("Upgrade of docker components failed. Please fix it manually.")
+                    return
+                }
+            }
+        }
+    }
+}
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
index 312ec9e..68fcdcd 100644
--- a/tag-git-repos.groovy
+++ b/tag-git-repos.groovy
@@ -16,9 +16,23 @@
 
 def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
   common.infoMsg("Tagging: ${repoURL} ${ref} => ${tag}")
-  git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+  checkout([
+    $class: 'GitSCM',
+    branches: [
+      [name: 'FETCH_HEAD'],
+    ],
+    userRemoteConfigs: [
+      [url: repoURL, refspec: ref, credentialsId: credentials],
+    ],
+    extensions: [
+      [$class: 'PruneStaleBranch'],
+      [$class: 'RelativeTargetDirectory', relativeTargetDir: repoName],
+      [$class: 'SubmoduleOption', disableSubmodules: true],
+      [$class: 'UserIdentity', name: 'MCP CI', email: 'ci+infra@mirantis.com'],
+    ],
+  ])
   dir(repoName) {
-    sh "git tag -f -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+    sh "git tag -f -a ${tag} -m \"Release of mcp version ${tag}\""
     sshagent([credentials]) {
       sh "git push -f origin ${tag}:refs/tags/${tag}"
     }