Merge "Add perfReport generation"
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 745b5ab..66e4bc7 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -587,10 +587,17 @@
             stage('Finalize') {
                 if (common.checkContains('STACK_INSTALL', 'finalize')) {
                     def gluster_compound = 'I@glusterfs:server'
+                    def salt_ca_compound = 'I@salt:minion:ca:salt_master_ca'
                     // Enforce highstate asynchronous only on the nodes which are not glusterfs servers
-                    salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound)
+                    salt.enforceHighstate(venvPepper, '* and not ' + gluster_compound + ' and not ' + salt_ca_compound)
                     // Iterate over nonempty set of gluster servers and apply highstates one by one
                     // TODO: switch to batch once salt 2017.7+ would be used
+                    def saltcaMinions = salt.getMinionsSorted(venvPepper, salt_ca_compound)
+                    if ( !saltcaMinions.isEmpty() ) {
+                        for ( target in saltcaMinions ) {
+                            salt.enforceHighstate(venvPepper, target)
+                        }
+                    }
                     def glusterMinions = salt.getMinionsSorted(venvPepper, gluster_compound)
                     if ( !glusterMinions.isEmpty() ) {
                         for ( target in glusterMinions ) {
diff --git a/cloud-update.groovy b/cloud-update.groovy
index 9a33c36..8c8b8b6 100644
--- a/cloud-update.groovy
+++ b/cloud-update.groovy
@@ -352,77 +352,27 @@
     }
 }
 
-def getCfgNodeProvider(pepperEnv, master_name) {
+def getNodeProvider(pepperEnv, nodeName, type='') {
     def salt = new com.mirantis.mk.Salt()
     def common = new com.mirantis.mk.Common()
-    if (!CFG_NODE_PROVIDER?.trim()) {
-        kvms = salt.getMinions(pepperEnv, 'I@salt:control')
-        for (kvm in kvms) {
-            try {
-                vms = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, kvm, 'virt.list_active_vms', [], null, true))
-                if (vms.toString().contains(master_name)) {
+    def kvms = salt.getMinions(pepperEnv, 'I@salt:control')
+    for (kvm in kvms) {
+        try {
+            vms = salt.getReturnValues(salt.runSaltProcessStep(pepperEnv, kvm, 'virt.list_domains', [], null, true))
+            if (vms.toString().contains(nodeName)) {
+                if (type == 'master' && !CFG_NODE_PROVIDER?.trim()) {
                     CFG_NODE_PROVIDER = kvm
+                } else {
+                    return kvm
                     //break
                 }
-            } catch (Exception er) {
-                common.infoMsg("${master_name} not present on ${kvm}")
             }
-        }
-    }
-}
-
-/*
-def rollbackSaltMaster(pepperEnv, target, path='/var/lib/libvirt/images') {
-    def salt = new com.mirantis.mk.Salt()
-    def common = new com.mirantis.mk.Common()
-    try {
-        input message: "PART1 - Are you sure to rollback ${target}? To rollback click on PROCEED. To skip rollback PART1 click on ABORT."
-    } catch (Exception er) {
-        common.infoMsg("skipping rollback of ${target}")
-        return
-    }
-    def domain = salt.getDomainName(pepperEnv)
-    def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
-    getCfgNodeProvider(pepperEnv, master)
-    try {
-        try {
-            salt.getReturnValues(salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "ls -la ${path}/${master}.${domain}.xml"))
-            common.errorMsg('Pipeline is about to disconnect from salt-api. You will have to rerun the pipeline with ROLLBACK_CFG checked and skip PART1 to finish rollback.')
-            salt.cmdRun(pepperEnv, CFG_NODE_PROVIDER, "virsh destroy ${master}.${domain}; virsh define ${path}/${master}.${domain}.xml; virsh start ${master}.${domain} ")
         } catch (Exception er) {
-            common.errorMsg(er)
-            input message: "Rollback for ${target} failed. Rollback manually."
+            common.infoMsg("${nodeName} not present on ${kvm}")
         }
-    } catch (Exception er) {
-        common.errorMsg(er)
-        input message: "Rollback for ${target} failed. Rollback manually."
     }
 }
 
-def finishSaltMasterRollback(pepperEnv, target, path='/var/lib/libvirt/images') {
-    def salt = new com.mirantis.mk.Salt()
-    def common = new com.mirantis.mk.Common()
-    def virsh = new com.mirantis.mk.Virsh()
-    try {
-        input message: "PART2 - Are you sure to finalize ${target} rollback? Click on PROCEED. To skip rollback click on ABORT."
-    } catch (Exception er) {
-        common.infoMsg("skipping finalize rollback of ${target}")
-        return
-    }
-    salt.minionsReachable(pepperEnv, 'I@salt:master', '*')
-    def domain = salt.getDomainName(pepperEnv)
-    def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
-    getCfgNodeProvider(pepperEnv, master)
-    try {
-        virsh.liveSnapshotAbsent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME, path)
-        // purge and setup previous repos
-        salt.enforceState(pepperEnv, target, 'linux.system.repo')
-    } catch (Exception e) {
-        common.errorMsg(e)
-        input message: "Check what failed after ${target} rollback. Do you want to PROCEED?"
-    }
-}*/
-
 def services(pepperEnv, probe, target, action='stop') {
     def services = ["keepalived","haproxy","nginx","nova-api","cinder","glance","heat","neutron","apache2","rabbitmq-server"]
     if (action == 'stop') {
@@ -543,14 +493,10 @@
     def domain = salt.getDomainName(pepperEnv)
     def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
     common.warningMsg(target_hosts)
-    //def nodeCount = 1
     for (t in target_hosts) {
         def target = salt.stripDomainName(t)
-        def nodeCount = target[4]
-        common.warningMsg(nodeCount)
-        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        def nodeProvider = getNodeProvider(pepperEnv, t)
         virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
-        //nodeCount++
     }
 }
 
@@ -559,18 +505,16 @@
     def virsh = new com.mirantis.mk.Virsh()
     def domain = salt.getDomainName(pepperEnv)
     def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
-    def nodeCount = 1
     for (t in target_hosts) {
         if (tgt == 'I@salt:master') {
             def master = salt.getReturnValues(salt.getPillar(pepperEnv, t, 'linux:network:hostname'))
-            getCfgNodeProvider(pepperEnv, master)
+            getNodeProvider(pepperEnv, master, 'master')
             virsh.liveSnapshotMerge(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
         } else {
             def target = salt.stripDomainName(t)
-            def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+            def nodeProvider = getNodeProvider(pepperEnv, t)
             virsh.liveSnapshotMerge(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
         }
-        nodeCount++
     }
     salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
 }
@@ -584,20 +528,16 @@
     def domain = salt.getDomainName(pepperEnv)
     def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
     // first destroy all vms
-    def nodeCount = 1
     for (t in target_hosts) {
         def target = salt.stripDomainName(t)
-        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        def nodeProvider = getNodeProvider(pepperEnv, t)
         salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
-        nodeCount++
     }
-    nodeCount = 1
     // rollback vms
     for (t in target_hosts) {
         def target = salt.stripDomainName(t)
-        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        def nodeProvider = getNodeProvider(pepperEnv, t)
         virsh.liveSnapshotRollback(pepperEnv, nodeProvider, target, SNAPSHOT_NAME)
-        nodeCount++
     }
     try {
         salt.minionsReachable(pepperEnv, 'I@salt:master', tgt)
@@ -620,10 +560,9 @@
     def domain = salt.getDomainName(pepperEnv)
     def target_hosts = salt.getMinionsSorted(pepperEnv, "${tgt}")
     // first destroy all vms
-    def nodeCount = 1
     for (t in target_hosts) {
         def target = salt.stripDomainName(t)
-        def nodeProvider = salt.getNodeProvider(pepperEnv, "${generalTarget}0${nodeCount}")
+        def nodeProvider = getNodeProvider(pepperEnv, t)
         salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.destroy', ["${target}.${domain}"], null, true)
         //salt.runSaltProcessStep(pepperEnv, "${nodeProvider}*", 'virt.undefine', ["${target}.${domain}"], null, true)
         try {
@@ -631,7 +570,6 @@
         } catch (Exception e) {
             common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
         }
-        nodeCount++
     }
 }
 
@@ -880,7 +818,7 @@
                 def type = 'cfg'
                 if (salt.testTarget(pepperEnv, target)) {
                     def master = salt.getReturnValues(salt.getPillar(pepperEnv, target, 'linux:network:hostname'))
-                    getCfgNodeProvider(pepperEnv, master)
+                    getNodeProvider(pepperEnv, master, 'master')
                     if (!ROLLBACK_BY_REDEPLOY.toBoolean()) {
                         virsh.liveSnapshotPresent(pepperEnv, CFG_NODE_PROVIDER, master, SNAPSHOT_NAME)
                     } else {
diff --git a/docker-build-image-pipeline.groovy b/docker-build-image-pipeline.groovy
index 2ec0ab6..db448ea 100644
--- a/docker-build-image-pipeline.groovy
+++ b/docker-build-image-pipeline.groovy
@@ -7,6 +7,8 @@
  * IMAGE_TAGS - Image tags
  * DOCKERFILE_PATH - Relative path to docker file in image repo
  * REGISTRY_URL - Docker registry URL (can be empty)
+ * ARTIFACTORY_URL - URL to artifactory
+ * ARTIFACTORY_NAMESPACE - Artifactory namespace (oss, cicd,...)
  * REGISTRY_CREDENTIALS_ID - Docker hub credentials id
  *
 **/
@@ -15,6 +17,7 @@
 def gerrit = new com.mirantis.mk.Gerrit()
 def git = new com.mirantis.mk.Git()
 def dockerLib = new com.mirantis.mk.Docker()
+def artifactory = new com.mirantis.mcp.MCPArtifactory()
 timeout(time: 12, unit: 'HOURS') {
   node("docker") {
     def workspace = common.getWorkspace()
@@ -28,7 +31,6 @@
         buildArgs = []
       }
       def dockerApp
-      docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
         stage("checkout") {
            git.checkoutGitRepository('.', IMAGE_GIT_URL, IMAGE_BRANCH, IMAGE_CREDENTIALS_ID)
         }
@@ -58,12 +60,30 @@
           }
         }
         stage("upload to docker hub"){
-          for(int i=0;i<imageTagsList.size();i++){
-            common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]}")
-            dockerApp.push(imageTagsList[i])
+          docker.withRegistry(REGISTRY_URL, REGISTRY_CREDENTIALS_ID) {
+            for(int i=0;i<imageTagsList.size();i++){
+              common.infoMsg("Uploading image ${IMAGE_NAME} with tag ${imageTagsList[i]} to dockerhub")
+              dockerApp.push(imageTagsList[i])
+            }
           }
         }
-      }
+        stage("upload to artifactory"){
+          if(common.validInputParam("ARTIFACTORY_URL") && common.validInputParam("ARTIFACTORY_NAMESPACE")) {
+             def artifactoryServer = Artifactory.server("mcp-ci")
+             def shortImageName = IMAGE_NAME
+             if (IMAGE_NAME.contains("/")) {
+                shortImageName = IMAGE_NAME.tokenize("/")[1]
+             }
+             for (imageTag in imageTagsList) {
+               sh "docker tag ${IMAGE_NAME} ${ARTIFACTORY_URL}/mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}:${imageTag}"
+               artifactory.uploadImageToArtifactory(artifactoryServer, ARTIFACTORY_URL,
+                                                 "mirantis/${ARTIFACTORY_NAMESPACE}/${shortImageName}",
+                                                 imageTag, "docker-dev-local")
+             }
+          }else{
+            common.warningMsg("ARTIFACTORY_URL not given, upload to artifactory skipped")
+          }
+        }
     } catch (Throwable e) {
        // If there was an error or exception thrown, the build failed
        currentBuild.result = "FAILURE"
@@ -74,3 +94,4 @@
     }
   }
 }
+
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 21134a2..d306e34 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -16,6 +16,12 @@
 def salt = new com.mirantis.mk.Salt()
 def python = new com.mirantis.mk.Python()
 
+def getNodeProvider(pepperEnv, name) {
+    def salt = new com.mirantis.mk.Salt()
+    def kvm = salt.getKvmMinionId(pepperEnv)
+    return salt.getReturnValues(salt.getPillar(pepperEnv, "${kvm}", "salt:control:cluster:internal:node:${name}:provider"))
+}
+
 def stopServices(pepperEnv, probe, target, type) {
     def openstack = new com.mirantis.mk.Openstack()
     def services = []
@@ -86,7 +92,7 @@
 
     if (SKIP_VM_RELAUNCH.toBoolean() == false) {
 
-        def upgNodeProvider = salt.getNodeProvider(pepperEnv, test_upgrade_node)
+        def upgNodeProvider = getNodeProvider(pepperEnv, test_upgrade_node)
 
         salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.destroy', ["${test_upgrade_node}.${domain}"])
         salt.runSaltProcessStep(pepperEnv, "${upgNodeProvider}", 'virt.undefine', ["${test_upgrade_node}.${domain}"])
@@ -98,7 +104,7 @@
         }
 
         // salt 'kvm02*' state.sls salt.control
-        salt.enforceState(pepperEnv, "${upgNodeProvider}", 'salt.control')
+        stateRun(pepperEnv, "${upgNodeProvider}", 'salt.control')
         // wait until upg node is registered in salt-key
         salt.minionPresent(pepperEnv, 'I@salt:master', test_upgrade_node)
         // salt '*' saltutil.refresh_pillar
@@ -107,10 +113,17 @@
         salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'saltutil.sync_all', [])
     }
 
+    stateRun(pepperEnv, "${test_upgrade_node}*", ['linux.network.proxy'])
+    try {
+        salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion.base"], null, true, 60)
+    } catch (Exception e) {
+        common.warningMsg(e)
+    }
+
     stateRun(pepperEnv, "${test_upgrade_node}*", ['linux', 'openssh'])
 
     try {
-        salt.runSaltProcessStep(master, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
+        salt.runSaltProcessStep(pepperEnv, "${test_upgrade_node}*", 'state.sls', ["salt.minion"], null, true, 60)
     } catch (Exception e) {
         common.warningMsg(e)
     }
@@ -122,16 +135,17 @@
     } catch (Exception e) {
         common.warningMsg('salt-minion was restarted. We should continue to run')
     }
+    salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'saltutil.sync_grains')
+    salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.flush')
+    salt.runSaltProcessStep(master, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'mine.update')
+    salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
     try {
         salt.enforceState(pepperEnv, 'I@backupninja:server', ['salt.minion'])
     } catch (Exception e) {
         common.warningMsg('salt-minion was restarted. We should continue to run')
     }
-    // salt '*' state.apply salt.minion.grains
-    //salt.enforceState(pepperEnv, '*', 'salt.minion.grains')
-    // salt -C 'I@backupninja:server' state.sls backupninja
+
     salt.enforceState(pepperEnv, 'I@backupninja:server', 'backupninja')
-    salt.enforceState(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'backupninja')
     salt.runSaltProcessStep(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"])
     try {
         salt.cmdRun(pepperEnv, '( I@galera:master or I@galera:slave ) and I@backupninja:client', "arp -d ${backupninja_backup_host}")
@@ -223,10 +237,9 @@
             stopServices(pepperEnv, node, tgt, general_target)
         }
 
-        def node_count = 1
         for (t in target_hosts) {
             def target = salt.stripDomainName(t)
-            def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
+            def nodeProvider = salt.getNodeProvider(pepperEnv, t)
             if ((OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) && (SKIP_VM_RELAUNCH.toBoolean() == false)) {
                 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
                 sleep(2)
@@ -244,7 +257,6 @@
             } else if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
                 virsh.liveSnapshotPresent(pepperEnv, nodeProvider, target, snapshotName)
             }
-            node_count++
         }
     }
 
@@ -263,6 +275,13 @@
     // salt '*' saltutil.sync_all
     salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'saltutil.sync_all', [])
 
+    stateRun(pepperEnv, upgrade_general_target, ['linux.network.proxy'])
+    try {
+        salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion.base"], null, true, 60)
+    } catch (Exception e) {
+        common.warningMsg(e)
+    }
+
     if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == false) {
 
         try {
@@ -302,7 +321,7 @@
             common.warningMsg(e)
         }
         try {
-            salt.runSaltProcessStep(master, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
+            salt.runSaltProcessStep(pepperEnv, upgrade_general_target, 'state.sls', ["salt.minion"], null, true, 60)
         } catch (Exception e) {
             common.warningMsg(e)
         }
@@ -468,10 +487,9 @@
             general_target = 'ctl'
         }
 
-        def node_count = 1
         for (t in target_hosts) {
             def target = salt.stripDomainName(t)
-            def nodeProvider = salt.getNodeProvider(pepperEnv, "${general_target}0${node_count}")
+            def nodeProvider = salt.getNodeProvider(pepperEnv, t)
             salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.destroy', ["${target}.${domain}"])
             sleep(2)
             if (OPERATING_SYSTEM_RELEASE_UPGRADE.toBoolean() == true) {
@@ -487,7 +505,6 @@
                 salt.runSaltProcessStep(pepperEnv, "${nodeProvider}", 'virt.start', ["${target}.${domain}"])
                 virsh.liveSnapshotAbsent(pepperEnv, nodeProvider, target, snapshotName)
             }
-            node_count++
         }
     }
 
diff --git a/release-mcp-version.groovy b/release-mcp-version.groovy
index 95e8bac..8af3fbe 100644
--- a/release-mcp-version.groovy
+++ b/release-mcp-version.groovy
@@ -16,6 +16,10 @@
  *   DOCKER_IMAGES
  *   GIT_CREDENTIALS
  *   GIT_REPO_LIST
+ *   EMAIL_NOTIFY
+ *   NOTIFY_RECIPIENTS
+ *   NOTIFY_TEXT
+ *
  */
 
 common = new com.mirantis.mk.Common()
@@ -51,31 +55,14 @@
   ]
 }
 
-def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
-    git.checkoutGitRepository(repoName, repoURL, "master", credentials)
-    dir(repoName) {
-        def checkTag = sh(script: "git tag -l ${tag}", returnStdout: true)
-        if(checkTag == ""){
-            sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
-        }else{
-            def currentTagRef = sh(script: "git rev-list -n 1 ${tag}", returnStdout: true)
-            if(currentTagRef.equals(ref)){
-                common.infoMsg("Tag is already on the right ref")
-                return
-            }
-            else{
-                sshagent([credentials]) {
-                    sh "git push --delete origin ${tag}"
-                }
-                sh "git tag --delete ${tag}"
-                sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
-            }
-        }
-        sshagent([credentials]) {
-            sh "git push origin ${tag}"
-        }
-    }
+def triggerGitTagJob(gitRepoList, gitCredentials, tag) {
+  build job: "tag-git-repos-stable", parameters: [
+    [$class: 'StringParameterValue', name: 'GIT_REPO_LIST', value: gitRepoList],
+    [$class: 'StringParameterValue', name: 'GIT_CREDENTIALS', value: gitCredentials],
+    [$class: 'StringParameterValue', name: 'TAG', value: tag]
+  ]
 }
+
 timeout(time: 12, unit: 'HOURS') {
     node() {
         try {
@@ -100,18 +87,13 @@
                 if(RELEASE_GIT.toBoolean())
                 {
                     common.infoMsg("Promoting Git repositories")
-                    def repos = GIT_REPO_LIST.tokenize('\n')
-                    def repoUrl, repoName, repoCommit, repoArray
-                    for (repo in repos){
-                        if(repo.trim().indexOf(' ') == -1){
-                            throw new IllegalArgumentException("Wrong format of repository and commit input")
-                        }
-                        repoArray = repo.trim().tokenize(' ')
-                        repoName = repoArray[0]
-                        repoUrl = repoArray[1]
-                        repoCommit = repoArray[2]
-                        gitRepoAddTag(repoUrl, repoName, TARGET_REVISION, GIT_CREDENTIALS, repoCommit)
-                    }
+                    triggerGitTagJob(GIT_REPO_LIST, GIT_CREDENTIALS, TARGET_REVISION)
+
+                }
+                if (EMAIL_NOTIFY.toBoolean()) {
+                    emailext(to: NOTIFY_RECIPIENTS,
+                        body: NOTIFY_TEXT,
+                        subject: "MCP Promotion has been done")
                 }
             }
         } catch (Throwable e) {
diff --git a/tag-git-repos.groovy b/tag-git-repos.groovy
new file mode 100644
index 0000000..373e029
--- /dev/null
+++ b/tag-git-repos.groovy
@@ -0,0 +1,62 @@
+/**
+ *
+ * Tag Git repositories
+ *
+ * Expected parameters:
+ *   GIT_REPO_LIST
+ *   GIT_CREDENTIALS
+ *   TAG
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+git = new com.mirantis.mk.Git()
+
+def gitRepoAddTag(repoURL, repoName, tag, credentials, ref = "HEAD"){
+    git.checkoutGitRepository(repoName, repoURL, "master", credentials)
+    dir(repoName) {
+        def checkTag = sh(script: "git tag -l ${tag}", returnStdout: true)
+        if(checkTag == ""){
+            sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+        }else{
+            def currentTagRef = sh(script: "git rev-list -n 1 ${tag}", returnStdout: true)
+            if(currentTagRef.equals(ref)){
+                common.infoMsg("Tag is already on the right ref")
+                return
+            }
+            else{
+                sshagent([credentials]) {
+                    sh "git push --delete origin ${tag}"
+                }
+                sh "git tag --delete ${tag}"
+                sh "git tag -a ${tag} ${ref} -m \"Release of mcp version ${tag}\""
+            }
+        }
+        sshagent([credentials]) {
+            sh "git push origin ${tag}"
+        }
+    }
+}
+
+timeout(time: 12, unit: 'HOURS') {
+    node() {
+        try {
+            def repos = GIT_REPO_LIST.tokenize('\n')
+            def repoUrl, repoName, repoCommit, repoArray
+            for (repo in repos){
+                if(repo.trim().indexOf(' ') == -1){
+                    throw new IllegalArgumentException("Wrong format of repository and commit input")
+                }
+                repoArray = repo.trim().tokenize(' ')
+                repoName = repoArray[0]
+                repoUrl = repoArray[1]
+                repoCommit = repoArray[2]
+                gitRepoAddTag(repoUrl, repoName, TAG, GIT_CREDENTIALS, repoCommit)
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
+        }
+    }
+}
\ No newline at end of file