Merge "WIP fix dockerExists function in mcp release upgrade pipeline"
diff --git a/ceph-add-node.groovy b/ceph-add-node.groovy
index 92d61e0..33a5a67 100644
--- a/ceph-add-node.groovy
+++ b/ceph-add-node.groovy
@@ -74,7 +74,15 @@
             }
         }
 
-        salt.enforceState(pepperEnv, HOST, 'prometheus')
-        salt.enforceState(pepperEnv, 'I@prometheus', 'prometheus')
+        stage("Update/Install monitoring") {
+            //Collect Grains
+            salt.enforceState(pepperEnv, HOST, 'salt.minion.grains')
+            salt.runSaltProcessStep(pepperEnv, HOST, 'saltutil.refresh_modules')
+            salt.runSaltProcessStep(pepperEnv, HOST, 'mine.update')
+            sleep(5)
+
+            salt.enforceState(pepperEnv, HOST, 'prometheus')
+            salt.enforceState(pepperEnv, 'I@prometheus:server', 'prometheus')
+        }
     }
 }
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index c2d4943..a2a4907 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -7,33 +7,6 @@
  *
 **/
 
-// Deprecation to avoid unexpected behaviour because it should be passed via initial context.
-// Need to delete this "if" statement at 1 April 2018.
-if(env.COOKIECUTTER_TEMPLATE_CREDENTIALS ||
-   env.COOKIECUTTER_TEMPLATE_URL ||
-   env.COOKIECUTTER_TEMPLATE_BRANCH ||
-   env.COOKIECUTTER_TEMPLATE_PATH ||
-   env.SHARED_RECLASS_URL){
-    println '''
-    DEPRECATION: Please note that the following variables are deprocated:
-    - COOKIECUTTER_TEMPLATE_CREDENTIALS
-    - COOKIECUTTER_TEMPLATE_URL
-    - COOKIECUTTER_TEMPLATE_BRANCH
-    - COOKIECUTTER_TEMPLATE_PATH
-    - SHARED_RECLASS_URL
-    You need to pass the values using the following variables from initial cookiecutter context:
-    - cookiecutter_template_url
-    - cookiecutter_template_branch
-    - shared_reclass_url
-    The following variables are not needed anymore:
-    - COOKIECUTTER_TEMPLATE_CREDENTIALS - cookiecutter-templates repos are accessible for anounimous
-                                        (https://gerrit.mcp.mirantis.net)
-    - COOKIECUTTER_TEMPLATE_PATH - hardcoded to "${env.WORKSPACE}/template"
-    '''
-    currentBuild.result = "FAILURE"
-    return
-}
-
 common = new com.mirantis.mk.Common()
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
@@ -205,8 +178,16 @@
 
                 // download create-config-drive
                 // FIXME: that should be refactored, to use git clone - to be able download it from custom repo.
-                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/create_config_drive.sh"
-                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/master/config-drive/master_config.sh"
+                def mcpCommonScriptsBranch = templateContext.default_context.mcp_common_scripts_branch
+                if (mcpCommonScriptsBranch == '') {
+                    mcpCommonScriptsBranch = mcpVersion
+                    // Don't have nightly for mcp-common-scripts repo, therefore use master
+                    if(mcpVersion == "nightly"){
+                        mcpCommonScriptsBranch = 'master'
+                    }
+                }
+                def config_drive_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/create_config_drive.sh"
+                def user_data_script_url = "https://raw.githubusercontent.com/Mirantis/mcp-common-scripts/${mcpCommonScriptsBranch}/config-drive/master_config.sh"
 
                 sh "wget -O create-config-drive ${config_drive_script_url} && chmod +x create-config-drive"
                 sh "wget -O user_data.sh ${user_data_script_url}"
@@ -222,9 +203,14 @@
                 smc['DEPLOY_NETWORK_GW'] = templateContext['default_context']['deploy_network_gateway']
                 smc['DEPLOY_NETWORK_NETMASK'] = templateContext['default_context']['deploy_network_netmask']
                 smc['DNS_SERVERS'] = templateContext['default_context']['dns_server01']
+                smc['MCP_VERSION'] = "${mcpVersion}"
                 if (templateContext['default_context']['local_repositories'] == 'True'){
+                    def localRepoIP = templateContext['default_context']['local_repo_url']
+                    smc['MCP_SALT_REPO_KEY'] = "http://${localRepoIP}/public.gpg"
+                    smc['MCP_SALT_REPO_URL'] = "http://${localRepoIP}/ubuntu-xenial"
                     smc['PIPELINES_FROM_ISO'] = 'false'
-                    smc['PIPELINE_REPO_URL'] = 'http://' + templateContext['default_context']['aptly_server_deploy_address'] + ':8088'
+                    smc['PIPELINE_REPO_URL'] = "http://${localRepoIP}:8088"
+                    smc['LOCAL_REPOS'] = 'true'
                 }
                 if (templateContext['default_context']['upstream_proxy_enabled'] == 'True'){
                     if (templateContext['default_context']['upstream_proxy_auth_enabled'] == 'True'){
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index 10ec378..c98ff17 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -19,12 +19,7 @@
             python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Cassandra?"
-            }
+        stage('Restore') {
             try {
                 salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
             } catch (Exception er) {
@@ -68,10 +63,12 @@
 
             // wait until supervisor-database service is up
             salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(5)
+            sleep(60)
+
             // performs restore
-            salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', "su root -c 'salt-call state.sls cassandra'")
+            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
             salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+            sleep(5)
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
 
             // wait until supervisor-database service is up
diff --git a/restore-zookeeper.groovy b/restore-zookeeper.groovy
index d459266..185f097 100644
--- a/restore-zookeeper.groovy
+++ b/restore-zookeeper.groovy
@@ -19,13 +19,7 @@
             python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        stage('Start restore') {
-            // # actual upgrade
-
-            stage('Ask for manual confirmation') {
-                input message: "Are you sure you have the correct backups ready? Do you really want to continue to restore Zookeeper?"
-            }
-            // Zookeeper restore section
+        stage('Restore') {
             try {
                 salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
             } catch (Exception er) {
@@ -69,7 +63,7 @@
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
 
             // performs restore
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "su root -c 'salt-call state.sls zookeeper'")
+            salt.enforceState(pepperEnv, 'I@opencontrail:control', "zookeeper.backup")
 
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['zookeeper'], null, true)
             salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.start', ['supervisor-config'], null, true)