Merge "Fixed docker image build pipeline"
diff --git a/artifactory-promote-docker-image.groovy b/artifactory-promote-docker-image.groovy
new file mode 100644
index 0000000..e278f05
--- /dev/null
+++ b/artifactory-promote-docker-image.groovy
@@ -0,0 +1,67 @@
+#!groovy
+
+/**
+ *
+ * Promote docker image from one artifactory repository (development) to
+ * another (production)
+ *
+ * Expected parameters:
+ *   REPO_SRC          Source Artifactory repository (default 'docker-dev-local')
+ *   REPO_DST          Destination Artifactory repository (default 'docker-prod-local')
+ *   IMAGE_SRC         Source image name (without docker registry!) to promote (required)
+ *   IMAGE_DST         Destination image (default same as IMAGE_SRC)
+ *
+ *   COPY_IMAGE        Copy image instead of moving (default 'true')
+ *
+ *   ARTIFACTORY_URL   Base URL of Artifactory instance, i.e. without `/api/...` path.
+ *                       (default 'https://artifactory.mcp.mirantis.net/artifactory/')
+ *   ARTIFACTORY_CREDS Credentials to login into Artifactory (default 'artifactory')
+ *
+ *   SLAVE_LABEL       Label of the slave to run job (default 'master')
+ *
+ *   Slave requirements: curl installed
+ *
+ */
+
+import groovy.json.JsonOutput
+
+String repo_src = env.REPO_SRC ?: 'docker-dev-local'
+String repo_dst = env.REPO_DST ?: 'docker-prod-local'
+String image_src = env.IMAGE_SRC
+String image_dst = env.IMAGE_DST ?: env.IMAGE_SRC
+
+boolean copy_image = env.COPY_IMAGE.asBoolean() ?: true
+
+String artifactory_url = env.ARTIFACTORY_URL ?: 'https://artifactory.mcp.mirantis.net/artifactory/'
+String artifactory_creds = env.ARTIFACTORY_CREDS ?: 'artifactory'
+
+String slave_label = env.SLAVE_LABEL ?: 'master'
+
+// Delimiter for splitting docker image name and tag (to avoid codeNarc DRY warning)
+String _colon = ':'
+
+String img_src_name, img_src_tag
+String img_dst_name, img_dst_tag
+
+node(slave_label) {
+    (img_src_name, img_src_tag) = image_src.tokenize(_colon)
+    (img_dst_name, img_dst_tag) = image_dst.tokenize(_colon)
+
+    String api_req = JsonOutput.toJson([
+        targetRepo: repo_dst,
+        dockerRepository: img_src_name,
+        targetDockerRepository: img_dst_name,
+        tag: img_src_tag,
+        targetTag: img_dst_tag,
+        copy: copy_image,
+    ])
+
+    withCredentials([usernameColonPassword(credentialsId: artifactory_creds, variable: 'USERPASS')]) {
+        sh """
+            curl -fLsS \
+                -u \$USERPASS \
+                -X POST -d '${api_req}' -H 'Content-Type: application/json' \
+                '${artifactory_url}api/docker/${repo_src}/v2/promote'
+        """
+    }
+}
diff --git a/build-debian-packages-pipeline.groovy b/build-debian-packages-pipeline.groovy
index 0d9839f..4220b7a 100644
--- a/build-debian-packages-pipeline.groovy
+++ b/build-debian-packages-pipeline.groovy
@@ -52,9 +52,17 @@
         if (debian_branch) {
           pollBranches.add([name:DEBIAN_BRANCH])
         }
+        def extensions = [[$class: 'CleanCheckout']]
+        def userRemoteConfigs = [[credentialsId: SOURCE_CREDENTIALS, url: SOURCE_URL]]
+        // Checkout specified refspec to local branch
+        if (common.validInputParam('SOURCE_REFSPEC')) {
+          extensions.add([$class: 'BuildChooserSetting', buildChooser: [$class: 'GerritTriggerBuildChooser']])
+          extensions.add([$class: 'LocalBranch', localBranch: SOURCE_BRANCH])
+          userRemoteConfigs[0]['refspec'] = SOURCE_REFSPEC
+        }
         checkout changelog: true, poll: false,
           scm: [$class: 'GitSCM', branches: pollBranches, doGenerateSubmoduleConfigurations: false,
-          extensions: [[$class: 'CleanCheckout']],  submoduleCfg: [], userRemoteConfigs: [[credentialsId: SOURCE_CREDENTIALS, url: SOURCE_URL]]]
+          extensions: extensions,  submoduleCfg: [], userRemoteConfigs: userRemoteConfigs]
         if (debian_branch){
           sh("git checkout "+DEBIAN_BRANCH)
         }
@@ -62,7 +70,12 @@
       debian.cleanup(OS+":"+DIST)
     }
     stage("build-source") {
-      debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix)
+      // If SOURCE_REFSPEC is defined refspec will be checked out to local branch and need to build it instead of origin branch.
+      if (common.validInputParam('SOURCE_REFSPEC')) {
+        debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix, '')
+      } else {
+        debian.buildSource("src", OS+":"+DIST, snapshot, 'Jenkins', 'autobuild@mirantis.com', revisionPostfix)
+      }
       archiveArtifacts artifacts: "build-area/*.dsc"
       archiveArtifacts artifacts: "build-area/*_source.changes"
       archiveArtifacts artifacts: "build-area/*.tar.*"
diff --git a/build-debian-packages-telegraf.groovy b/build-debian-packages-telegraf.groovy
index dde098e..efcddaa 100644
--- a/build-debian-packages-telegraf.groovy
+++ b/build-debian-packages-telegraf.groovy
@@ -4,7 +4,6 @@
 def aptly = new com.mirantis.mk.Aptly()
 
 def timestamp = common.getDatetime()
-def version = "1.3~${timestamp}"
 
 node('docker') {
     try{
@@ -48,13 +47,13 @@
             }
             stage("build package") {
                 img.inside{
-                    sh("""wget https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz &&
-                        tar xf go1.8.1.linux-amd64.tar.gz &&
+                    sh("""wget https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz &&
+                        tar xf go1.9.2.linux-amd64.tar.gz &&
                         export GOROOT=\$PWD/go &&
                         export PATH=\$PATH:\$GOROOT/bin &&
                         export GOPATH=\$PWD &&
                         cd src/github.com/influxdata/telegraf &&
-                        scripts/build.py --package --version=\"${version}\" --platform=linux --arch=amd64""")
+                        scripts/build.py --package --platform=linux --arch=amd64""")
                 }
                 archiveArtifacts artifacts: "${workingDir}/telegraf/build/*.deb"
             }
diff --git a/build-mirror-image.groovy b/build-mirror-image.groovy
index 8aca89c..7a41304 100644
--- a/build-mirror-image.groovy
+++ b/build-mirror-image.groovy
@@ -46,7 +46,7 @@
         def workspace = common.getWorkspace()
         rcFile = openstack.createOpenstackEnv(OS_URL, OS_CREDENTIALS_ID, OS_PROJECT, "default", "", "default", "2", "")
         openstackEnv = String.format("%s/venv", workspace)
-        def openstackVersion = "ocata"
+        def openstackVersion = OS_VERSION
 
         VM_IP_DELAY = VM_IP_DELAY as Integer
         VM_IP_RETRIES = VM_IP_RETRIES as Integer
@@ -59,7 +59,7 @@
                 sh "mkdir -p ${workspace}/tmp"
             }
 
-            sh "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/salt-bootstrap.sh"
+            sh "wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/salt-bootstrap.sh"
             openstack.setupOpenstackVirtualenv(openstackEnv, openstackVersion)
         }
 
@@ -100,24 +100,25 @@
 
         stage("Create Docker Registry"){
             common.infoMsg("Creating Docker Registry")
-            salt.enforceState(venvPepper, '*apt*', ['docker.host'], true, false, null, false, -1, 2)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker run --restart always -d -p 5000:5000 --name registry registry:2'], null, true)
-            salt.enforceState(venvPepper, '*apt*', ['docker.client.registry'], true, false, null, false, -1, 2)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker system prune --all --force'], null, true)
+            salt.enforceState(venvPepper, '*apt*', ["docker.host"], true, false, null, false, -1, 2)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["docker run --restart always -d -p 5000:5000 --name registry registry:2"], null, true)
+            salt.enforceState(venvPepper, '*apt*', ["docker.client.registry"], true, false, null, false, -1, 2)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["docker system prune --all --force"], null, true)
         }
 
         stage("Create Aptly"){
             common.infoMsg("Creating Aptly")
             salt.enforceState(venvPepper, '*apt*', ['aptly'], true, false, null, false, -1, 2)
             //TODO: Do it new way
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly_mirror_update.sh -s -v', 'runas=aptly'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['nohup aptly api serve --no-lock > /dev/null 2>&1 </dev/null &', 'runas=aptly'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly-publisher --timeout=1200 publish -v -c /etc/aptly-publisher.yaml --architectures amd64 --url http://127.0.0.1:8080 --recreate --force-overwrite', 'runas=aptly'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['aptly db cleanup', 'runas=aptly'], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly_mirror_update.sh -s -v", "runas=aptly"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["nohup aptly api serve --no-lock > /dev/null 2>&1 </dev/null &", "runas=aptly"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly-publisher --timeout=1200 publish -v -c /etc/aptly-publisher.yaml --architectures amd64 --url http://127.0.0.1:8080 --recreate --force-overwrite", "runas=aptly"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly db cleanup", "runas=aptly"], null, true)
             //NEW way
-            //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
-            //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", 'runas=aptly'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh'], null, true)
+            //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", "runas=aptly"], null, true)
+            //salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acrfv", "runas=aptly"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/aptly/aptly-update.sh -O /srv/scripts/aptly-update.sh"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["chmod +x /srv/scripts/aptly-update.sh"], null, true)
         }
 
         stage("Create Git mirror"){
@@ -127,24 +128,26 @@
 
         stage("Create PyPi mirror"){
             common.infoMsg("Creating PyPi mirror")
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip install pip2pi'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/pypi_mirror/requirements.txt -O /srv/pypi_mirror/requirements.txt'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["pip install pip2pi"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/pypi_mirror/requirements.txt -O /srv/pypi_mirror/requirements.txt"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt"], null, true)
         }
 
         stage("Create mirror of images"){
             common.infoMsg("Creating mirror of images")
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/images_mirror/images.txt -O /srv/images.txt'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/mirror-image/images_mirror/update-images.sh -O /srv/scripts/update-images.sh'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['chmod +x /srv/scripts/update-images.sh'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh -u http://ci.mcp.mirantis.net:8085/images'], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/images_mirror/images.txt -O /srv/images.txt"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["wget https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${SCRIPTS_REF}/mirror-image/images_mirror/update-images.sh -O /srv/scripts/update-images.sh"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["chmod +x /srv/scripts/update-images.sh"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["/srv/scripts/update-images.sh -u http://ci.mcp.mirantis.net:8085/images"], null, true)
         }
 
         stage("Create instance snapshot"){
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['rm -rf /var/lib/cloud/sem/* /var/lib/cloud/instance /var/lib/cloud/instances/*'], null, true)
-            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['cloud-init init'], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["rm -rf /var/lib/cloud/sem/* /var/lib/cloud/instance /var/lib/cloud/instances/*"], null, true)
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["cloud-init init"], null, true)
 
-            openstack.runOpenstackCommand("openstack server stop mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+            retry(3, 5){
+                openstack.runOpenstackCommand("openstack server stop mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+            }
 
             retry(6, 30){
                 serverStatus = openstack.runOpenstackCommand("openstack server show --format value -c status mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
@@ -152,12 +155,16 @@
                     throw new ResourceException("Instance is not ready for image create.")
                 }
             }
-            openstack.runOpenstackCommand("openstack server image create --name ${IMAGE_NAME}-${dateTime} --wait mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+            retry(3, 5){
+                openstack.runOpenstackCommand("openstack server image create --name ${IMAGE_NAME}-${dateTime} --wait mcp-offline-mirror-${dateTime}", rcFile, openstackEnv)
+            }
         }
 
         stage("Publish image"){
             common.infoMsg("Saving image ${IMAGE_NAME}-${dateTime}")
-            openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}-${dateTime} ${IMAGE_NAME}-${dateTime}", rcFile, openstackEnv)
+            retry(3, 5){
+                openstack.runOpenstackCommand("openstack image save --file ${IMAGE_NAME}-${dateTime} ${IMAGE_NAME}-${dateTime}", rcFile, openstackEnv)
+            }
             sh "md5sum ${IMAGE_NAME}-${dateTime} > ${IMAGE_NAME}-${dateTime}.md5"
 
             common.infoMsg("Uploading image ${IMAGE_NAME}-${dateTime}")
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index cbb09b7..4b25488 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -18,6 +18,7 @@
  *   STACK_CLEANUP_JOB          Name of job for deleting stack
  *
  *   STACK_COMPUTE_COUNT        Number of compute nodes to launch
+ *   STATIC_MGMT_NETWORK        Check if model contains static IP address definitions for all nodes
  *
  *   AWS_STACK_REGION           CloudFormation AWS region
  *   AWS_API_CREDENTIALS        AWS Access key ID with  AWS secret access key
@@ -37,6 +38,10 @@
  *  required for STACK_TYPE=physical
  *   SALT_MASTER_URL            URL of Salt master
 
+ *   BOOTSTRAP_EXTRA_REPO_PARAMS  optional parameter to define a list of extra repos with parameters
+ *                                which have to be added during bootstrap.
+ *                                Format: repo 1, repo priority 1, repo pin 1; repo 2, repo priority 2, repo pin 2;
+
  * Test settings:
  *   TEST_K8S_API_SERVER     Kubernetes API address
  *   TEST_K8S_CONFORMANCE_IMAGE   Path to docker image with conformance e2e tests
@@ -167,6 +172,18 @@
                         envParams.put('cfg_formula_pkg_revision', FORMULA_PKG_REVISION)
                     }
 
+                    // put extra repo definitions
+                    if (common.validInputParam('BOOTSTRAP_EXTRA_REPO_PARAMS')) {
+                        common.infoMsg("Setting additional repo during bootstrap to ${BOOTSTRAP_EXTRA_REPO_PARAMS}")
+                        envParams.put('cfg_bootstrap_extra_repo_params', BOOTSTRAP_EXTRA_REPO_PARAMS)
+                    }
+
+                    // put extra salt-formulas
+                    if (common.validInputParam('EXTRA_FORMULAS')) {
+                        common.infoMsg("Setting extra salt-formulas to ${EXTRA_FORMULAS}")
+                        envParams.put('cfg_extra_formulas', EXTRA_FORMULAS)
+                    }
+
                     openstack.createHeatStack(openstackCloud, STACK_NAME, STACK_TEMPLATE, envParams, HEAT_STACK_ENVIRONMENT, venv)
                 }
 
@@ -264,37 +281,21 @@
 
         if (common.checkContains('STACK_INSTALL', 'core')) {
             stage('Install core infrastructure') {
-                orchestrate.installFoundationInfra(venvPepper)
+                def staticMgmtNetwork = false
+                if (common.validInputParam('STATIC_MGMT_NETWORK')) {
+                    staticMgmtNetwork = STATIC_MGMT_NETWORK.toBoolean()
+                }
+                orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
 
                 if (common.checkContains('STACK_INSTALL', 'kvm')) {
                     orchestrate.installInfraKvm(venvPepper)
-                    orchestrate.installFoundationInfra(venvPepper)
+                    orchestrate.installFoundationInfra(venvPepper, staticMgmtNetwork)
                 }
 
                 orchestrate.validateFoundationInfra(venvPepper)
             }
         }
 
-        // install ceph
-        if (common.checkContains('STACK_INSTALL', 'ceph')) {
-            stage('Install Ceph MONs') {
-                orchestrate.installCephMon(venvPepper)
-            }
-
-            stage('Install Ceph OSDs') {
-                orchestrate.installCephOsd(venvPepper)
-            }
-
-
-            stage('Install Ceph clients') {
-                orchestrate.installCephClient(venvPepper)
-            }
-
-            stage('Connect Ceph') {
-                orchestrate.connectCeph(venvPepper)
-            }
-        }
-
         // install k8s
         if (common.checkContains('STACK_INSTALL', 'k8s')) {
 
@@ -400,8 +401,35 @@
 
         }
 
+        // install ceph
+        if (common.checkContains('STACK_INSTALL', 'ceph')) {
+            stage('Install Ceph MONs') {
+                orchestrate.installCephMon(venvPepper)
+            }
+
+            stage('Install Ceph OSDs') {
+                orchestrate.installCephOsd(venvPepper)
+            }
+
+
+            stage('Install Ceph clients') {
+                orchestrate.installCephClient(venvPepper)
+            }
+
+            stage('Connect Ceph') {
+                orchestrate.connectCeph(venvPepper)
+            }
+        }
+
+        if (common.checkContains('STACK_INSTALL', 'oss')) {
+          stage('Install Oss infra') {
+            orchestrate.installOssInfra(venvPepper)
+          }
+        }
+
         if (common.checkContains('STACK_INSTALL', 'cicd')) {
             stage('Install Cicd') {
+                orchestrate.installInfra(venvPepper)
                 orchestrate.installDockerSwarm(venvPepper)
                 orchestrate.installCicd(venvPepper)
             }
@@ -421,6 +449,17 @@
             }
         }
 
+        if (common.checkContains('STACK_INSTALL', 'oss')) {
+          stage('Install OSS') {
+            if (!common.checkContains('STACK_INSTALL', 'stacklight')) {
+              // In case if StackLightv2 enabled containers already started
+              orchestrate.installDockerSwarm(venvPepper)
+              salt.enforceState(venvPepper, 'I@docker:swarm:role:master and I@devops_portal:config', 'docker.client', true)
+            }
+            orchestrate.installOss(venvPepper)
+          }
+        }
+
         //
         // Test
         //
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 1e94251..781f5b4 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -1,16 +1,23 @@
 /**
  *
- * Launch validation of the cloud
+ * Launch HA test for the cloud
  *
  * Expected parameters:
- *   SALT_MASTER_URL             URL of Salt master
- *   SALT_MASTER_CREDENTIALS     Credentials to the Salt API
  *
- *   TEST_IMAGE                  Docker image link
- *   TARGET_NODE                 Salt target for tempest node
- *   TEMPEST_TEST_SET            If not false, run tests matched to pattern only
- *   RUN_TEMPEST_TESTS           If not false, run Tempest tests
- *   RUN_RALLY_TESTS             If not false, run Rally tests
+ *   SALT_MASTER_URL             URL of Salt master
+ *   SALT_MASTER_CREDENTIALS     Credentials that are used in this Jenkins for accessing Salt master (usually "salt")
+ *   PROXY                       Proxy address (if any) for accessing the Internet. It will be used for cloning repos and installing pip dependencies
+ *   TEST_IMAGE                  Docker image link to use for running container with testing tools.
+ *   TOOLS_REPO                  URL of repo where testing tools, scenarios, configs are located
+ *
+ *   DEBUG_MODE                  If you need to debug (keep container after test), please enabled this
+ *   MANUAL_CONFIRMATION         Ask for confirmation before doing something destructive (reboot/shutdown node)
+ *   RETRY_CHECK_STATUS          Number of retries to check node status
+ *   SKIP_LIST_PATH              Path to tempest skip list file in TOOLS_REPO
+ *   TARGET_NODES                Nodes to test
+ *   TEMPEST_REPO                Tempest repo to clone and use
+ *   TEMPEST_TARGET_NODE         Node, where tests will be executed
+ *   TEMPEST_TEST_PATTERN        Tests to run during HA scenarios
  *
  */
 
@@ -156,7 +163,7 @@
         }
 
         stage('Collect results') {
-            val.addFiles(saltMaster, TEMPEST_TARGET_NODE, remote_artifacts_dir, artifacts_dir)
+            validate.addFiles(saltMaster, TEMPEST_TARGET_NODE, remote_artifacts_dir, artifacts_dir)
             archiveArtifacts artifacts: "${artifacts_dir}/*"
             if (DEBUG_MODE == 'false') {
                 validate.runCleanup(saltMaster, TEMPEST_TARGET_NODE)
diff --git a/docker-mirror-images.groovy b/docker-mirror-images.groovy
index 91a65a6..8f0373c 100644
--- a/docker-mirror-images.groovy
+++ b/docker-mirror-images.groovy
@@ -22,7 +22,7 @@
         def imageName = matcher.group(1)
         return imageName
     }else{
-        throw new IllegalFormatException("Wrong format of image name.")
+        throw new IllegalArgumentException("Wrong format of image name.")
     }
 }
 
@@ -32,13 +32,18 @@
             def creds = common.getPasswordCredentials(TARGET_REGISTRY_CREDENTIALS_ID)
             sh "docker login --username=${creds.username} --password=${creds.password.toString()} ${REGISTRY_URL}"
             def images = IMAGE_LIST.tokenize('\n')
-            def imageName
+            def imageName, imagePath, targetRegistry, imageArray
             for (image in images){
-                sh "echo ${image}"
-                imageName = getImageName(image)
-                sh "docker pull ${image}"
-                sh "docker tag ${image} ${TARGET_REGISTRY}/${imageName}:${IMAGE_TAG}"
-                sh "docker push ${TARGET_REGISTRY}/${imageName}:${IMAGE_TAG}"
+                if(image.trim().indexOf(' ') == -1){
+                    throw new IllegalArgumentException("Wrong format of image and target repository input")
+                }
+                imageArray = image.trim().tokenize(' ')
+                imagePath = imageArray[0]
+                targetRegistry = imageArray[1]
+                imageName = getImageName(imagePath)
+                sh """docker pull ${imagePath}
+                      docker tag ${imagePath} ${targetRegistry}/${imageName}:${IMAGE_TAG}
+                      docker push ${targetRegistry}/${imageName}:${IMAGE_TAG}"""
             }
         }
     } catch (Throwable e) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
new file mode 100644
index 0000000..1381865
--- /dev/null
+++ b/opencontrail40-upgrade.groovy
@@ -0,0 +1,390 @@
+/**
+ * Update packages on given nodes
+ *
+ * Expected parameters:
+ *   SALT_MASTER_CREDENTIALS        Credentials to the Salt API.
+ *   SALT_MASTER_URL                Full Salt API address [http://10.10.10.1:8000].
+ *   STAGE_CONTROLLERS_UPGRADE      Run upgrade on Opencontrail controllers and analytics (bool)
+ *   STAGE_COMPUTES_UPGRADE         Run upgrade on Opencontrail compute nodes  (bool)
+ *   COMPUTE_TARGET_SERVERS         Salt compound target to match nodes to be updated [*, G@osfamily:debian].
+ *   COMPUTE_TARGET_SUBSET_LIVE     Number of selected nodes to live apply selected package update.
+ *   STAGE_CONTROLLERS_ROLLBACK     Run rollback on Opencontrail controllers  (bool)
+ *   STAGE_COMPUTES_ROLLBACK        Run rollback on Opencontrail compute nodes  (bool)
+ *
+**/
+
+def common = new com.mirantis.mk.Common()
+def salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+
+def pepperEnv = "pepperEnv"
+def targetLiveSubset
+def targetLiveAll
+def minions
+def args
+def probe = 1
+def command = 'cmd.shell'
+
+def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+//def cmpPkgs = ['contrail-lib', 'contrail-nodemgr', 'contrail-utils', 'contrail-vrouter-agent', 'contrail-vrouter-utils', 'python-contrail', 'python-contrail-vrouter-api', 'python-opencontrail-vrouter-netns', 'contrail-vrouter-dkms']
+def CMP_PKGS = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
+def KERNEL_MODULE_RELOAD = 'service supervisor-vrouter stop;ifdown vhost0;rmmod vrouter;modprobe vrouter;ifup vhost0;service supervisor-vrouter start;'
+def analyticsServices = ['supervisor-analytics', 'supervisor-database', 'zookeeper']
+def controlServices = ['contrail-webui-jobserver', 'contrail-webui-webserver', 'supervisor-config', 'ifmap-server', 'supervisor-control', 'supervisor-database', 'zookeeper']
+
+def void runCommonCommands(target, command, args, check, salt, pepperEnv, common) {
+
+    out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, args, null)
+    salt.printSaltCommandResult(out)
+    // wait until $check is in correct state
+    if ( check == "nodetool status" ) {
+        salt.commandStatus(pepperEnv, target, check, 'Status=Up')
+    } else if ( check == "doctrail all contrail-status" ) {
+        salt.commandStatus(pepperEnv, target, "${check} | grep -v == | grep -v FOR | grep -v \\* | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup | grep -v -F /var/crashes/", null, false)
+    }
+
+    //out = salt.runSaltCommand(pepperEnv, 'local', ['expression': target, 'type': 'compound'], command, null, check, null)
+    //salt.printSaltCommandResult(out)
+    //input message: "Please check the output of \'${check}\' and continue if it is correct."
+}
+
+node() {
+
+    stage('Setup virtualenv for Pepper') {
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+    }
+
+    if (STAGE_CONTROLLERS_UPGRADE.toBoolean() == true) {
+
+        stage('Opencontrail controllers upgrade') {
+            try {
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.refresh_pillar', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'saltutil.sync_all', [], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'file.remove', ["/etc/apt/sources.list.d/cassandra.list"], null, true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:database or I@neutron:server', 'linux.system.repo')
+
+            } catch (Exception er) {
+                common.errorMsg("Opencontrail component on I@opencontrail:control or I@opencontrail:collector or I@neutron:server probably failed to be replaced.")
+                throw er
+            }
+
+            try {
+                controllerImage = salt.getPillar(pepperEnv, "I@opencontrail:control and *01*", "docker:client:compose:opencontrail_api:service:controller:image")
+                analyticsImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analytics:image")
+                analyticsdbImage = salt.getPillar(pepperEnv, "I@opencontrail:collector and *01*", "docker:client:compose:opencontrail_api:service:analyticsdb:image")
+                salt.enforceState(pepperEnv, 'I@opencontrail:database', 'docker.host')
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'dockerng.pull', [controllerImage])
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsImage])
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'dockerng.pull', [analyticsdbImage])
+
+            } catch (Exception er) {
+                common.errorMsg("Docker images on I@opencontrail:control or I@opencontrail:collector probably failed to be downloaded.")
+                throw er
+            }
+
+            salt.enforceState(pepperEnv, 'I@zookeeper:backup:server', 'zookeeper.backup')
+            salt.enforceState(pepperEnv, 'I@zookeeper:backup:client', 'zookeeper.backup')
+
+            try {
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c '/usr/local/bin/zookeeper-backup-runner.sh'")
+            } catch (Exception er) {
+                common.errorMsg('Zookeeper failed to backup. Please fix it before continuing.')
+                throw er
+            }
+
+            salt.enforceState(pepperEnv, 'I@cassandra:backup:server', 'cassandra.backup')
+            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', 'cassandra.backup')
+
+            try {
+                salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c '/usr/local/bin/cassandra-backup-runner-call.sh'")
+            } catch (Exception er) {
+                common.errorMsg('Cassandra failed to backup. Please fix it before continuing.')
+                throw er
+            }
+            
+            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'])
+
+            try {
+                for (service in analyticsServices) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.stop', [service])
+                }
+                result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.directory_exists', ['/var/lib/analyticsdb/data'])['return'][0].values()[0]
+                if (result == false) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.move', ['/var/lib/cassandra', '/var/lib/analyticsdb'])
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'file.copy', ['/var/lib/zookeeper', '/var/lib/analyticsdb_zookeeper_data','recurse=True'])
+                }
+                check = 'doctrail all contrail-status'
+                salt.enforceState(pepperEnv, 'I@opencontrail:collector', 'docker.client')
+                runCommonCommands('I@opencontrail:collector and *01*', command, args, check, salt, pepperEnv, common)
+            } catch (Exception er) {
+                common.errorMsg("Opencontrail Analytics failed to be upgraded.")
+                throw er
+            }
+            try {
+                check = 'doctrail all contrail-status'
+                for (service in controlServices) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', [service])
+                }
+                result = salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.directory_exists', ['/var/lib/configdb/data'])['return'][0].values()[0]
+                if (result == false) {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/cassandra', '/var/lib/configdb', 'recurse=True'])
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.copy', ['/var/lib/zookeeper', '/var/lib/config_zookeeper_data', 'recurse=True'])
+                }
+                salt.enforceState(pepperEnv, 'I@opencontrail:control', 'docker.client')
+                runCommonCommands('I@opencontrail:control and *01*', command, args, check, salt, pepperEnv, common)
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'pkg.install', ['neutron-plugin-contrail,contrail-heat,python-contrail'])
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'])
+            } catch (Exception er) {
+                common.errorMsg("Opencontrail Controller failed to be upgraded.")
+                throw er
+            }
+
+        }
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-database.tgz', '/var/lib/cassandra'])
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'archive.tar', ['zcvf', '/root/contrail-zookeeper.tgz', '/var/lib/zoopeeker'])
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-database.tgz', '/var/lib/cassandra'])
+        salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'archive.tar', ['zcvf', '/root/contrail-analytics-zookeeper.tgz', '/var/lib/zookeeper'])
+        //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'pkg.remove', [controlPkgs])
+        //salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'pkg.remove', [analyticsPkgs])
+        for (service in controlServices) {
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.disable', [service])
+        }
+        for (service in analyticsServices) {
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'service.disable', [service])
+            }
+    }
+
+
+    if (STAGE_COMPUTES_UPGRADE.toBoolean() == true) {
+
+        try {
+
+            stage('List targeted compute servers') {
+                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+            }
+
+            stage('Confirm upgrade on sample nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on the following sample nodes? ${targetLiveSubset}"
+            }
+
+            stage("Opencontrail compute upgrade on sample nodes") {
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.sync_all', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    throw er
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+
+                //sleep(10)
+                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+            stage('Confirm upgrade on all targeted nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+            }
+            stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.sync_all', [], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    throw er
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                //sleep(10)
+                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
+    }
+
+
+    if (STAGE_CONTROLLERS_ROLLBACK.toBoolean() == true) {
+
+        stage('Ask for manual confirmation') {
+            input message: "Do you want to continue with the Opencontrail nodes rollback?"
+        }
+
+       stage('Opencontrail controllers rollback') {
+
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'saltutil.refresh_pillar', [], null, true)
+            salt.enforceState(pepperEnv, 'I@opencontrail:database', 'linux.system.repo')
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:database', 'cmd.shell', ['cd /etc/docker/compose/opencontrail/; docker-compose down'], null, true)
+
+            if(env["ASK_ON_ERROR"] && env["ASK_ON_ERROR"] == "true"){
+                salt.enforceState(pepperEnv, 'I@opencontrail:database and *01*', 'opencontrail.database', true)
+                salt.enforceState(pepperEnv, 'I@opencontrail:database', 'opencontrail.database', true)
+            }else{
+                try {
+                    salt.enforceState(pepperEnv, 'I@opencontrail:database and *01*', 'opencontrail.database', true)
+                } catch (Exception e) {
+                    common.warningMsg('Exception in state opencontrail.database on I@opencontrail:database and *01*')
+                }
+                try {
+                    salt.enforceState(pepperEnv, 'I@opencontrail:database and *01*', 'opencontrail.database', true)
+                } catch (Exception e) {
+                    common.warningMsg('Exception in state opencontrail.database on I@opencontrail:database')
+                }
+            }
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and *01*', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:collector', 'state.sls', ['opencontrail', 'exclude=opencontrail.client'])
+        }
+    }
+
+    if (STAGE_COMPUTES_ROLLBACK.toBoolean() == true) {
+
+        try {
+
+            stage('List targeted compute servers') {
+                minions = salt.getMinions(pepperEnv, COMPUTE_TARGET_SERVERS)
+
+                if (minions.isEmpty()) {
+                    throw new Exception("No minion was targeted")
+                }
+
+                targetLiveSubset = minions.subList(0, Integer.valueOf(COMPUTE_TARGET_SUBSET_LIVE)).join(' or ')
+                targetLiveSubsetProbe = minions.subList(0, probe).join(' or ')
+
+                targetLiveAll = minions.join(' or ')
+                common.infoMsg("Found nodes: ${targetLiveAll}")
+                common.infoMsg("Selected sample nodes: ${targetLiveSubset}")
+            }
+
+            stage('Confirm rollback on sample nodes') {
+                input message: "Do you want to continue with the Opencontrail compute rollback on the following sample nodes? ${targetLiveSubset}"
+            }
+
+            stage("Opencontrail compute rollback on sample nodes") {
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveSubset} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    throw er
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(pepperEnv, targetLiveSubset, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveSubset} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                //sleep(10)
+                salt.commandStatus(pepperEnv, targetLiveSubset, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveSubset, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+            stage('Confirm rollback on all targeted nodes') {
+                input message: "Do you want to continue with the Opencontrail compute upgrade on all the targeted nodes? ${targetLiveAll} nodes?"
+            }
+
+            stage("Opencontrail compute upgrade on all targeted nodes") {
+
+                try {
+                    salt.runSaltProcessStep(pepperEnv, targetLiveSubset, 'file.remove', ["/etc/apt/sources.list.d/mcp_opencontrail.list"], null, true)
+                    salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'saltutil.refresh_pillar', [], null, true)
+                    salt.enforceState(pepperEnv, targetLiveAll, 'linux.system.repo')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail component on ${targetLiveAll} probably failed to be replaced. Please check it in ${oc_component_repo} before continuing.")
+                    throw er
+                }
+
+                args = "export DEBIAN_FRONTEND=noninteractive; apt install --allow-downgrades -o Dpkg::Options::=\"--force-confold\" -o Dpkg::Options::=\"--force-confdef\" ${CMP_PKGS}  -y;"
+                check = 'contrail-status'
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, args, null)
+                salt.printSaltCommandResult(out)
+
+                try {
+                    salt.enforceState(pepperEnv, targetLiveAll, 'opencontrail')
+                } catch (Exception er) {
+                    common.errorMsg("Opencontrail state was executed on ${targetLiveAll} and failed please fix it manually.")
+                }
+
+                salt.runSaltProcessStep(pepperEnv, targetLiveAll, 'cmd.shell', ["${KERNEL_MODULE_RELOAD}"], null, true)
+                //sleep(10)
+                salt.commandStatus(pepperEnv, targetLiveAll, "${check} | grep -v == | grep -v active | grep -v -F /var/crashes/", null, false)
+
+                out = salt.runSaltCommand(pepperEnv, 'local', ['expression': targetLiveAll, 'type': 'compound'], command, null, check, null)
+                salt.printSaltCommandResult(out)
+            }
+
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            currentBuild.description = currentBuild.description ? e.message + " " + currentBuild.description : e.message
+            throw e
+        }
+    }
+}
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
new file mode 100644
index 0000000..28b0bb4
--- /dev/null
+++ b/release-mcp-version.groovy
@@ -0,0 +1,108 @@
+/**
+ *
+ * Release MCP
+ *
+ * Expected parameters:
+ *   MCP_VERSION
+ *   RELEASE_APTLY
+ *   RELEASE_DOCKER
+ *   RELEASE_GIT
+ *   APTLY_URL
+ *   APTLY_STORAGES
+ *   DOCKER_CREDENTIALS
+ *   DOCKER_URL
+ *   DOCKER_IMAGES
+ *   GIT_CREDENTIALS
+ *   GIT_REPO_LIST
+ */
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+
+def triggerAptlyPromoteJob(aptlyUrl, components, diffOnly, dumpPublish, packages, recreate, source, storages, target){
+  build job: "aptly-promote-all-testing-stable", parameters: [
+    [$class: 'StringParameterValue', name: 'APTLY_URL', value: aptlyUrl],
+    [$class: 'StringParameterValue', name: 'COMPONENTS', value: components],
+    [$class: 'BooleanParameterValue', name: 'DIFF_ONLY', value: diffOnly],
+    [$class: 'BooleanParameterValue', name: 'DUMP_PUBLISH', value: dumpPublish],
+    [$class: 'StringParameterValue', name: 'PACKAGES', value: packages],
+    [$class: 'BooleanParameterValue', name: 'RECREATE', value: recreate],
+    [$class: 'StringParameterValue', name: 'SOURCE', value: source],
+    [$class: 'StringParameterValue', name: 'STORAGES', value: storages],
+    [$class: 'StringParameterValue', name: 'TARGET', value: target]
+  ]
+}
+
+def triggerDockerMirrorJob(dockerCredentials, dockerRegistryUrl, mcpVersion, imageList) {
+  build job: "docker-images-mirror", parameters: [
+    [$class: 'StringParameterValue', name: 'TARGET_REGISTRY_CREDENTIALS_ID', value: dockerCredentials],
+    [$class: 'StringParameterValue', name: 'REGISTRY_URL', value: dockerRegistryUrl],
+    [$class: 'StringParameterValue', name: 'IMAGE_TAG', value: mcpVersion],
+    [$class: 'StringParameterValue', name: 'IMAGE_LIST', value: imageList]
+  ]
+}
+
+def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
+    git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+    dir(repoName) {
+        def checkTag = sh(script: "git tag -l ${tag}", returnStdout: true)
+        if(checkTag == ""){
+            sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+        }else{
+            def currentTagRef = sh(script: "git rev-list -n 1 ${tag}", returnStdout: true)
+            if(currentTagRef.equals(ref)){
+                common.infoMsg("Tag is already on the right ref")
+                return
+            }
+            else{
+                sshagent([credentials]) {
+                    sh "git push --delete origin ${tag}"
+                }
+                sh "git tag --delete ${tag}"
+                sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+            }
+        }
+        sshagent([credentials]) {
+            sh "git push origin ${tag}"
+        }
+    }
+}
+
+node() {
+    try {
+        if(RELEASE_APTLY.toBoolean())
+        {
+            stage("Release Aptly"){
+                triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/testing', APTLY_STORAGES, '{0}/stable')
+                triggerAptlyPromoteJob(APTLY_URL, 'all', false, true, 'all', false, '(.*)/stable', APTLY_STORAGES, '{0}/${MCP_VERSION}')
+            }
+        }
+        if(RELEASE_DOCKER.toBoolean())
+        {
+            stage("Release Docker"){
+                triggerDockerMirrorJob(DOCKER_CREDENTIALS, DOCKER_URL, MCP_VERSION, DOCKER_IMAGES)
+            }
+        }
+        if(RELEASE_GIT.toBoolean())
+        {
+            stage("Release Git"){
+                def repos = GIT_REPO_LIST.tokenize('\n')
+                def repoUrl, repoName, repoCommit, repoArray
+                for (repo in repos){
+                    if(repo.trim().indexOf(' ') == -1){
+                        throw new IllegalArgumentException("Wrong format of repository and commit input")
+                    }
+                    repoArray = repo.trim().tokenize(' ')
+                    repoName = repoArray[0]
+                    repoUrl = repoArray[1]
+                    repoCommit = repoArray[2]
+                    gitRepoAddTag(repoUrl, repoName, MCP_VERSION, GIT_CREDENTIALS, repoCommit)
+                }
+            }
+        }
+    } catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
\ No newline at end of file
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index 70e74b7..abdecf9 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -82,8 +82,31 @@
     def templateContext = readYaml text: content
     def clusterName = templateContext.default_context.cluster_name
     def clusterDomain = templateContext.default_context.cluster_domain
-    git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
-    saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", clusterName, EXTRA_FORMULAS, testEnv)
+    if (SYSTEM_GIT_URL == "") {
+        git.checkoutGitRepository("${testEnv}/classes/system", RECLASS_MODEL_URL, RECLASS_MODEL_BRANCH, CREDENTIALS_ID)
+    } else {
+        dir("${testEnv}/classes/system") {
+            if (!gerrit.gerritPatchsetCheckout(SYSTEM_GIT_URL, SYSTEM_GIT_REF, "HEAD", CREDENTIALS_ID)) {
+              common.errorMsg("Failed to obtain system reclass with url: ${SYSTEM_GIT_URL} and ${SYSTEM_GIT_REF}")
+            }
+        }
+    }
+
+    def nbTry = 0
+    while (nbTry < 5) {
+        nbTry++
+        try {
+            saltModelTesting.setupAndTestNode("cfg01.${clusterDomain}", clusterName, EXTRA_FORMULAS, testEnv)
+            break
+        } catch (Exception e) {
+            if (e.getMessage() == "script returned exit code 124") {
+                common.errorMsg("Impossible to test node due to timeout of salt-master, retriggering")
+            } else {
+                throw e
+            }
+        }
+    }
+
 }
 
 def gerritRef
diff --git a/test-salt-model-node.groovy b/test-salt-model-node.groovy
index b7c616b..cc5c7be 100644
--- a/test-salt-model-node.groovy
+++ b/test-salt-model-node.groovy
@@ -29,7 +29,7 @@
 def checkouted = false
 
 throttle(['test-model']) {
-  node("python&&virtual") {
+  node("python") {
     try{
       stage("checkout") {
         if(defaultGitRef != "" && defaultGitUrl != "") {
diff --git a/test-system-reclass-pipeline.groovy b/test-system-reclass-pipeline.groovy
index 49755bc..6e3cb11 100644
--- a/test-system-reclass-pipeline.groovy
+++ b/test-system-reclass-pipeline.groovy
@@ -62,18 +62,24 @@
 
           def branches = [:]
           def testModels = documentationOnly ? [] : TEST_MODELS.split(',')
-            for (int i = 0; i < testModels.size(); i++) {
-              def cluster = testModels[i]
-              def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
-              branches["${cluster}"] = {
-                build job: "test-salt-model-${cluster}", parameters: [
-                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
-                  [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
-                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
-                  [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
-                ]
-              }
+          for (int i = 0; i < testModels.size(); i++) {
+            def cluster = testModels[i]
+            def clusterGitUrl = defaultGitUrl.substring(0, defaultGitUrl.lastIndexOf("/") + 1) + cluster
+            branches["${cluster}"] = {
+              build job: "test-salt-model-${cluster}", parameters: [
+                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_URL', value: clusterGitUrl],
+                [$class: 'StringParameterValue', name: 'DEFAULT_GIT_REF', value: "HEAD"],
+                [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+                [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+              ]
             }
+          }
+          branches["cookiecutter"] = {
+            build job: "test-mk-cookiecutter-templates", parameters: [
+              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_URL', value: defaultGitUrl],
+              [$class: 'StringParameterValue', name: 'SYSTEM_GIT_REF', value: systemRefspec]
+            ]
+          }
           parallel branches
         }else{
            throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
diff --git a/update-mirror-image.groovy b/update-mirror-image.groovy
index 0e28a4e..238dbb2 100644
--- a/update-mirror-image.groovy
+++ b/update-mirror-image.groovy
@@ -2,8 +2,19 @@
  * Update mirror image
  *
  * Expected parameters:
- *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
- *   SALT_MASTER_URL            Full Salt API address [https://10.10.10.1:8000].
+ *   SALT_MASTER_CREDENTIALS            Credentials to the Salt API.
+ *   SALT_MASTER_URL                    Full Salt API address [https://10.10.10.1:8000].
+ *   UPDATE_APTLY                       Option to update Aptly
+ *   UPDATE_APTLY_MIRRORS               List of mirrors
+ *   PUBLISH_APTLY                      Publish aptly snapshots
+ *   RECREATE_APTLY_PUBLISHES           Option to recreate Aptly publishes separated by comma
+ *   FORCE_OVERWRITE_APTLY_PUBLISHES    Option to force overwrite existing packages while publishing
+ *   CLEANUP_APTLY                      Option to cleanup old Aptly snapshots
+ *   UPDATE_DOCKER_REGISTRY             Option to update Docker Registry
+ *   CLEANUP_DOCKER_CACHE               Option to cleanup locally cached Docker images
+ *   UPDATE_PYPI                        Option to update Python Packages
+ *   UPDATE_GIT                         Option to update Git repositories
+ *   UPDATE_IMAGES                      Option to update VM images
  *
 **/
 
@@ -14,35 +25,75 @@
 
 node() {
     try {
-
         python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
 
-        stage('Update Aptly packages'){
-            common.infoMsg("Updating Aptly packages.")
-            salt.enforceState(venvPepper, 'apt*', ['aptly'], true)
-            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['/srv/scripts/aptly-update.sh'], null, true)
-        }
+        if(UPDATE_APTLY.toBoolean()){
+            stage('Update Aptly mirrors'){
+                def aptlyMirrorArgs = "-s -v"
 
-        stage('Update Docker images'){
-            common.infoMsg("Updating Docker images.")
-            salt.enforceState(venvPepper, 'apt*', ['docker.client.registry'], true)
-        }
+                salt.enforceState(venvPepper, '*apt*', ['aptly.server'], true)
+                sleep(10)
 
-        stage('Update PyPi packages'){
-            common.infoMsg("Updating PyPi packages.")
-            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+                if(UPDATE_APTLY_MIRRORS != ""){
+                    common.infoMsg("Updating List of Aptly mirrors.")
+                    UPDATE_APTLY_MIRRORS = UPDATE_APTLY_MIRRORS.replaceAll("\\s","")
+                    def mirrors = UPDATE_APTLY_MIRRORS.tokenize(",")
+                    for(mirror in mirrors){
+                        salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs} -m ${mirror}\"", 'runas=aptly'], null, true)
+                    }
+                }
+                else{
+                    common.infoMsg("Updating all Aptly mirrors.")
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=\"${aptlyMirrorArgs}\"", 'runas=aptly'], null, true)
+                }
+            }
         }
+        if(PUBLISH_APTLY.toBoolean()){
+            def aptlyPublishArgs = "-av"
 
-        stage('Update Git repositories'){
-            common.infoMsg("Updating Git repositories.")
-            salt.enforceState(venvPepper, 'apt*', ['git.server'], true)
+            common.infoMsg("Publishing all Aptly snapshots.")
+
+            salt.enforceState(venvPepper, '*apt*', ['aptly.publisher'], true)
+            sleep(10)
+
+            if(CLEANUP_APTLY.toBoolean()){
+                aptlyPublishArgs += "c"
+            }
+            if(RECREATE_APTLY_PUBLISHES.toBoolean()){
+                aptlyPublishArgs += "r"
+            }
+            if(FORCE_OVERWRITE_APTLY_PUBLISHES.toBoolean()){
+                aptlyPublishArgs += "f"
+            }
+            salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=\"${aptlyPublishArgs}\"", 'runas=aptly'], null, true)
         }
-
-        stage('Update VM images'){
-            common.infoMsg("Updating VM images.")
-            salt.runSaltProcessStep(venvPepper, 'apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+        if(UPDATE_DOCKER_REGISTRY.toBoolean()){
+            stage('Update Docker images'){
+                common.infoMsg("Updating Docker images.")
+                salt.enforceState(venvPepper, '*apt*', ['docker.client.registry'], true)
+                if(CLEANUP_DOCKER_CACHE.toBoolean()){
+                    salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['docker system prune --all --force'], null, true)
+                }
+            }
         }
-
+        if(UPDATE_PYPI.toBoolean()){
+            stage('Update PyPi packages'){
+                common.infoMsg("Updating PyPi packages.")
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+            }
+        }
+        if(UPDATE_GIT.toBoolean()){
+            stage('Update Git repositories'){
+                common.infoMsg("Updating Git repositories.")
+                salt.enforceState(venvPepper, '*apt*', ['git.server'], true)
+            }
+        }
+        if(UPDATE_IMAGES.toBoolean()){
+            stage('Update VM images'){
+                common.infoMsg("Updating VM images.")
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+            }
+        }
     } catch (Throwable e) {
         // If there was an error or exception thrown, the build failed
         currentBuild.result = "FAILURE"
diff --git a/upgrade-mcp-release.groovy b/upgrade-mcp-release.groovy
new file mode 100644
index 0000000..0341263
--- /dev/null
+++ b/upgrade-mcp-release.groovy
@@ -0,0 +1,67 @@
+/**
+ *
+ * Update Salt environment pipeline
+ *
+ * Expected parameters:
+ *   SALT_MASTER_URL            Salt API server location
+ *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API
+ *   MCP_VERSION                Version of MCP to upgrade to
+ *   UPDATE_LOCAL_REPOS         Update local repositories
+ */
+
+// Load shared libs
+def salt = new com.mirantis.mk.Salt()
+def common = new com.mirantis.mk.Common()
+def python = new com.mirantis.mk.Python()
+def venvPepper = "venvPepper"
+
+node("python") {
+    try {
+        python.setupPepperVirtualenv(venvPepper, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage("Update Reclass"){
+            common.infoMsg("Updating reclass model")
+            salt.cmdRun(venvPepper, "I@salt:master", 'cd /srv/salt/reclass && git pull -r && git submodule update', false)
+            salt.runSaltProcessStep(venvPepper, 'I@salt:master', 'cmd.run', ['reclass-salt --top'], null, true)
+            salt.enforceState(venvPepper, "I@salt:master", 'reclass', true)
+        }
+
+        if(UPDATE_LOCAL_REPOS.toBoolean()){
+            stage("Update local repos"){
+                common.infoMsg("Updating local repositories")
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly publish list --raw | awk '{print \$2, \$1}' | xargs -n2 aptly publish drop", 'runas=aptly'], null, true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly snapshot list --raw | grep -E '*' | xargs -n 1 aptly snapshot drop", 'runas=aptly'], null, true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ["aptly mirror list --raw | grep -E '*' | xargs -n 1 aptly mirror drop", 'runas=aptly'], null, true)
+                salt.enforceState(venvPepper, '*apt*', 'aptly', true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_mirror_update.sh', "args=-sv", 'runas=aptly'], null, true)
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.script', ['salt://aptly/files/aptly_publish_update.sh', "args=-acfrv", 'runas=aptly'], null, true)
+
+                salt.enforceState(venvPepper, '*apt*', 'docker.client.registry', true)
+
+                salt.enforceState(venvPepper, '*apt*', 'git server', true)
+
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['pip2pi /srv/pypi_mirror/packages/ -r /srv/pypi_mirror/requirements.txt'], null, true)
+
+                salt.runSaltProcessStep(venvPepper, '*apt*', 'cmd.run', ['/srv/scripts/update-images.sh'], null, true)
+            }
+        }
+
+        stage("Update APT repos"){
+            common.infoMsg("Updating APT repositories")
+            salt.enforceState(venvPepper, "I@linux:system", 'linux.system.repo', true)
+        }
+
+        stage("Update formulas"){
+            common.infoMsg("Updating salt formulas")
+            salt.cmdRun(venvPepper, "I@salt:master", 'apt-get clean && apt-get update && apt-get install -y salt-formula-*')
+
+            common.infoMsg("Running salt sync-all")
+            salt.runSaltProcessStep(venvPepper, '*', 'saltutil.sync_all', [], null, true)
+        }
+    }
+    catch (Throwable e) {
+        // If there was an error or exception thrown, the build failed
+        currentBuild.result = "FAILURE"
+        throw e
+    }
+}
\ No newline at end of file