Merge "Switch VCP to LVM based partitions"
diff --git a/.gitignore b/.gitignore
index 4ae6f8b..fcd722e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
 images/
 packer_cache/
 */config-drive/cloudata.iso
-
+.idea/
diff --git a/common/ubuntu_salt_bootstrap.sh b/common/ubuntu_salt_bootstrap.sh
index cef2c3f..e0e5ff4 100644
--- a/common/ubuntu_salt_bootstrap.sh
+++ b/common/ubuntu_salt_bootstrap.sh
@@ -6,24 +6,20 @@
 fi
 #
 CLUSTER_NAME=${CLUSTER_NAME:-lost_cluster_name_variable}
-CLUSTER_MODEL=${CLUSTER_MODEL:-https://github.com/Mirantis/mcp-offline-model.git}
-CLUSTER_MODEL_REF=${CLUSTER_MODEL_REF:-master}
-FORMULA_VERSION=${FORMULA_VERSION:-testing}
+MCP_VERSION=${MCP_VERSION:-testing}
 SALTSTACK_GPG=${SALTSTACK_GPG:-"https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub"}
 SALTSTACK_REPO=${SALTSTACK_REPO:-"http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main"}
 APT_MIRANTIS_GPG=${APT_MIRANTIS_GPG:-"http://apt.mirantis.com/public.gpg"}
-APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $FORMULA_VERSION salt"}
-GIT_SALT_FORMULAS_SCRIPTS=${GIT_SALT_FORMULAS_SCRIPTS:-"https://github.com/salt-formulas/salt-formulas-scripts"}
-GIT_SALT_FORMULAS_SCRIPTS_REF=${GIT_SALT_FORMULAS_SCRIPTS_REF:-master}
+APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $MCP_VERSION salt"}
 
 function process_repos(){
 # TODO: those  should be unhardcoded and re-writed, using CC model
 wget -O - ${SALTSTACK_GPG} | sudo apt-key add -
 wget -O - ${APT_MIRANTIS_GPG} | apt-key add -
-wget -O - http://mirror.mirantis.com/${FORMULA_VERSION}/extra/xenial/archive-extra.key | apt-key add -
+wget -O - http://mirror.mirantis.com/${MCP_VERSION}/extra/xenial/archive-extra.key | apt-key add -
 
 echo "deb [arch=amd64] ${SALTSTACK_REPO}"  > /etc/apt/sources.list.d/mcp_saltstack.list
-echo "deb [arch=amd64] http://mirror.mirantis.com/${FORMULA_VERSION}/extra/xenial xenial main"  > /etc/apt/sources.list.d/mcp_extra.list
+echo "deb [arch=amd64] http://mirror.mirantis.com/${MCP_VERSION}/extra/xenial xenial main"  > /etc/apt/sources.list.d/mcp_extra.list
 
 # This Pin-Priority fix should be always aligned with
 # https://github.com/Mirantis/reclass-system-salt-model/blob/master/linux/system/repo/mcp/apt_mirantis/saltstack.yml
@@ -51,24 +47,16 @@
 
 rm -v /etc/apt/sources.list.d/mcp_extra.list /etc/apt/preferences.d/mcp_extra
 
-for g_host in ${CLUSTER_MODEL} ${GIT_SALT_FORMULAS_SCRIPTS} ; do
-  _tmp_host=$(echo ${g_host} | awk -F/ '{print $3}')
-  ssh-keyscan -T 1 -H ${_tmp_host} >> ~/.ssh/known_hosts || true
-done
-
+[ ! -d /srv/salt ] && mkdir -p /srv/salt
+mount /dev/cdrom /mnt
 if [[ ! -d /srv/salt/reclass ]]; then
-  git clone --recursive ${CLUSTER_MODEL} /srv/salt/reclass
-  pushd /srv/salt/reclass/
-    git checkout ${CLUSTER_MODEL_REF}
-  popd
+  cp -r /mnt/model /srv/salt/reclass
 fi
 
 if [[ ! -d /srv/salt/scripts ]]; then
-  git clone --recursive ${GIT_SALT_FORMULAS_SCRIPTS} /srv/salt/scripts
-  pushd /srv/salt/scripts/
-    git checkout ${GIT_SALT_FORMULAS_SCRIPTS_REF}
-  popd
+  cp -r /mnt/salt_scripts /srv/salt/scripts
 fi
+umount /mnt
 
 # bootstrap.sh opts
 export FORMULAS_SOURCE=pkg
diff --git a/day01-image/config-drive/meta-data b/day01-image/config-drive/meta-data
new file mode 100644
index 0000000..c7ba190
--- /dev/null
+++ b/day01-image/config-drive/meta-data
@@ -0,0 +1 @@
+hostname: ubuntu
diff --git a/day01-image/config-drive/user-data.yaml b/day01-image/config-drive/user-data
similarity index 100%
rename from day01-image/config-drive/user-data.yaml
rename to day01-image/config-drive/user-data
diff --git a/day01-image/files/tmp/bootstrap.saltstack.com.sh b/day01-image/files/opt/bootstrap.saltstack.com.sh
similarity index 100%
rename from day01-image/files/tmp/bootstrap.saltstack.com.sh
rename to day01-image/files/opt/bootstrap.saltstack.com.sh
diff --git a/day01-image/run.example.sh b/day01-image/run.example.sh
index 25efd13..6277a71 100755
--- a/day01-image/run.example.sh
+++ b/day01-image/run.example.sh
@@ -13,19 +13,17 @@
 #
 export IMAGE_NAME="cfg01"
 #
-export CLUSTER_MODEL=https://github.com/Mirantis/mcp-drivetrain-model.git
-export CLUSTER_MODEL_REF=master
+export CLUSTER_MODEL="http://gerrit.mcp.mirantis.com/mcp/mcp-drivetrain-model"
+export CLUSTER_MODEL_REF="master"
 export MCP_VERSION=proposed
 export SCRIPTS_REF=master
 export CLUSTER_NAME=mcp-day01
-export FORMULA_VERSION=proposed
-export BINARY_MCP_VERSION=proposed
 export UBUNTU_BASEURL="http://mirror.mirantis.com/proposed/ubuntu/"
 export SALTSTACK_REPO="http://mirror.mirantis.com/proposed/saltstack-2017.7/xenial xenial main"
 export APT_MIRANTIS_GPG="http://apt.mirantis.com/public.gpg"
 export SALTSTACK_GPG="https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub"
 export APT_MIRANTIS_SALT_REPO="http://apt.mirantis.com/xenial/ proposed salt"
-export GIT_SALT_FORMULAS_SCRIPTS=https://github.com/salt-formulas/salt-formulas-scripts.git
+export GIT_SALT_FORMULAS_SCRIPTS="https://gerrit.mcp.mirantis.com/salt-formulas/salt-formulas-scripts"
 export APT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.com/xenial/ proposed salt"
 export APT_REPOSITORY_GPG=http://apt.mirantis.com/public.gpg
 ###
@@ -34,7 +32,28 @@
 mkdir -p "${PACKER_IMAGES_CACHE}"
 
 export PACKER_LOG=1
+pushd config-drive
+  if [ ! -e model ]; then
+    git clone ${CLUSTER_MODEL} model/
+    pushd model
+      git fetch ${CLUSTER_MODEL} ${CLUSTER_MODEL_REF} && git checkout FETCH_HEAD
+      pushd classes/system
+        git submodule init
+        git submodule update
+      popd
+    popd
+  fi
+  if [ ! -e salt_scripts ]; then
+    git clone ${GIT_SALT_FORMULAS_SCRIPTS} salt_scripts/
+    pushd salt_scripts
+    git fetch ${GIT_SALT_FORMULAS_SCRIPTS} ${SCRIPTS_REF} && git checkout FETCH_HEAD
+    popd
+  fi
+popd
+
+[ -f config-drive/cloudata.iso ] && rm -v config-drive/cloudata.iso
+mkisofs -o config-drive/cloudata.iso -V cidata -r -J --quiet config-drive
+
 # For qemu test-build:
-cloud-localds  --hostname ubuntu --dsmode local config-drive/cloudata.iso  config-drive/user-data.yaml
 packer build -only=qemu -parallel=false -on-error=ask template.json
 #rm -rf ~/.packer.d/
diff --git a/day01-image/scripts/salt.sh b/day01-image/scripts/salt.sh
index 7e58372..6074f21 100644
--- a/day01-image/scripts/salt.sh
+++ b/day01-image/scripts/salt.sh
@@ -1,20 +1,29 @@
 #!/bin/bash -xe
 
-FORMULA_VERSION=${FORMULA_VERSION:-2018.3.1}
-APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $FORMULA_VERSION salt"}
+MCP_VERSION=${MCP_VERSION:-proposed}
+APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $MCP_VERSION salt"}
 SALT_OPTS="-t 10 --retcode-passthrough --no-color"
 
-salt-call ${SALT_OPTS} reclass.validate_pillar
-
 echo "deb [arch=amd64] ${APT_MIRANTIS_SALT_REPO}" > /etc/apt/sources.list.d/mcp_salt.list
 apt-get update
 apt-get install salt-formula* -y
 
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
-salt-call ${SALT_OPTS} state.sls salt
-salt-call ${SALT_OPTS} state.sls linux.system.repo,linux.system.package,linux.system.user,linux.system.directory,linux.system.config
+# Wait for salt-master and salt-minion to wake up after restart
+# since we just perform 'reboot.sh'
+salt-call --timeout=120 test.ping
+
+salt-call ${SALT_OPTS} saltutil.clear_cache
+salt-call ${SALT_OPTS} saltutil.refresh_pillar
+salt-call ${SALT_OPTS} saltutil.sync_all
+salt-call ${SALT_OPTS} reclass.validate_pillar
+
+salt-call ${SALT_OPTS} state.sls linux.system.repo,linux.system.package,linux.system.user,linux.system.directory,linux.system.file,linux.system.config
 salt-call ${SALT_OPTS} state.sls linux.network
+salt-call ${SALT_OPTS} state.sls salt.minion.ca
+
+salt-call ${SALT_OPTS} state.sls salt
+salt-call ${SALT_OPTS} state.sls docker.host
+
 salt-call ${SALT_OPTS} state.sls openssh
 salt-call ${SALT_OPTS} state.sls git.server
 salt-call ${SALT_OPTS} state.sls postgresql
@@ -24,25 +33,8 @@
 salt-call ${SALT_OPTS} state.sls_id maas_cluster_packages maas
 salt-call ${SALT_OPTS} state.sls_id maas_region_packages maas
 
-# linux.system.file only for backwards compatibility of jenkins - mcp-common-scripts
-salt-call ${SALT_OPTS} state.sls jenkins.master,linux.system.file
-sleep 60
-salt-call -t 5 --no-color state.sls jenkins.client.plugin
-systemctl restart jenkins
-sleep 60
-# Jenkins Mirantis theme
-# FIXME move those into cluster model
-git clone https://github.com/Mirantis/docker-jenkins.git
-cp -r docker-jenkins/theme /var/lib/jenkins/userContent
-chown -R jenkins:jenkins /var/lib/jenkins/userContent/*
-# finish Jenkins setup
-salt-call -t 5 --no-color state.sls jenkins.client.plugin
-systemctl restart jenkins
-sleep 60
-salt-call -t 5 --no-color state.sls jenkins.client
-systemctl restart jenkins
-sleep 60
-salt-call ${SALT_OPTS} state.sls jenkins.client
+# Flag for cloud-init script
+touch /opt/jenkins_in_docker
 
 # Duplicate of ubuntu_info.sh
 mkdir -p /var/log/bootstrap_logs/ ; pushd /var/log/bootstrap_logs/
diff --git a/day01-image/template.json b/day01-image/template.json
index 25de077..741b7b6 100644
--- a/day01-image/template.json
+++ b/day01-image/template.json
@@ -10,7 +10,6 @@
     "cluster_model_ref": "{{ env `CLUSTER_MODEL_REF` }}",
     "cluster_name": "{{ env `CLUSTER_NAME` }}",
     "bs_hostname": "cfg01",
-    "formula_version": "{{ env `FORMULA_VERSION` }}",
     "mcp_version": "{{ env `MCP_VERSION` }}",
     "ubuntu_baseurl": "{{ env `UBUNTU_BASEURL` }}",
     "saltstack_gpg": "{{ env `SALTSTACK_GPG` }}",
@@ -30,8 +29,24 @@
     },
     {
       "type": "file",
-      "source": "files/tmp/bootstrap.saltstack.com.sh",
-      "destination": "/tmp/bootstrap.saltstack.com.sh"
+      "source": "files/opt/bootstrap.saltstack.com.sh",
+      "destination": "/opt/bootstrap.saltstack.com.sh"
+    },
+    {
+      "environment_vars": [
+        "UBUNTU_BASEURL={{ user `ubuntu_baseurl` }}",
+        "PACKER_OFFLINE_BUILD=true"
+      ],
+      "type": "shell",
+      "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}",
+      "expect_disconnect": "true",
+      "scripts": [
+        "scripts/base_set_hostname.sh",
+        "scripts/base.sh",
+        "scripts/motd.sh",
+        "scripts/network.sh",
+        "scripts/reboot.sh"
+      ]
     },
     {
       "environment_vars": [
@@ -39,8 +54,6 @@
         "CLUSTER_MODEL_REF={{ user `cluster_model_ref` }}",
         "CLUSTER_NAME={{ user `cluster_name` }}",
         "BS_HOSTNAME={{ user `bs_hostname` }}",
-        "FORMULA_VERSION={{ user `formula_version` }}",
-        "UBUNTU_BASEURL={{ user `ubuntu_baseurl` }}",
         "SALTSTACK_GPG={{ user `saltstack_gpg` }}",
         "SALTSTACK_REPO={{ user `saltstack_repo` }}",
         "APT_MIRANTIS_GPG={{ user `apt_mirantis_gpg` }}",
@@ -49,15 +62,15 @@
         "APT_REPOSITORY={{ user `apt_repository` }}",
         "APT_REPOSITORY_GPG={{ user `apt_repository_gpg` }}",
         "APT_MIRANTIS_SALT_REPO={{ user `apt_mirantis_salt_repo` }}",
+        "BOOTSTRAP_SALTSTACK_COM=file:///opt/bootstrap.saltstack.com.sh",
+        "MCP_VERSION={{ user `mcp_version` }}",
         "PACKER_OFFLINE_BUILD=true"
       ],
       "type": "shell",
+      "pause_before": "60s",
       "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}",
+      "expect_disconnect": "true",
       "scripts": [
-        "scripts/base_set_hostname.sh",
-        "scripts/base.sh",
-        "scripts/motd.sh",
-        "scripts/network.sh",
         "scripts/salt_bootstrap.sh",
         "scripts/salt.sh"
       ]
@@ -70,7 +83,7 @@
     {
       "type": "file",
       "source": "files/var/jenkins",
-      "destination": "/var/lib/jenkins/org.codefirst.SimpleThemeDecorator.xml"
+      "destination": "/srv/volumes/jenkins/org.codefirst.SimpleThemeDecorator.xml"
     },
     {
       "environment_vars": [
@@ -103,7 +116,7 @@
           "8096M"
         ],
         [
-          "-fda",
+          "-cdrom",
           "config-drive/cloudata.iso"
         ],
         [
diff --git a/mirror-image/config-drive/meta-data b/mirror-image/config-drive/meta-data
new file mode 100644
index 0000000..bf77e0f
--- /dev/null
+++ b/mirror-image/config-drive/meta-data
@@ -0,0 +1 @@
+hostname: apt01
diff --git a/mirror-image/config-drive/user-data.yaml b/mirror-image/config-drive/user-data
similarity index 100%
rename from mirror-image/config-drive/user-data.yaml
rename to mirror-image/config-drive/user-data
diff --git a/mirror-image/run.example.sh b/mirror-image/run.example.sh
index f1d39be..acaea28 100755
--- a/mirror-image/run.example.sh
+++ b/mirror-image/run.example.sh
@@ -15,12 +15,11 @@
 export IMAGE_NAME="mcp-offline-mirror-$(date '+%Y-%m-%d-%H-%M-%S')"
 export PACKER_IMAGES_CACHE="${HOME}/packer_images_cache/"
 #
-export CLUSTER_MODEL="https://github.com/Mirantis/mcp-offline-model.git"
+export CLUSTER_MODEL="https://gerrit.mcp.mirantis.com/mcp/mcp-offline-model"
 export CLUSTER_MODEL_REF="master"
 export CLUSTER_NAME="mcp-offline"
 
 export MCP_VERSION="proposed"
-export FORMULA_VERSION="proposed"
 
 BINARY_MCP_VERSION="proposed"
 export UBUNTU_BASEURL="http://mirror.mirantis.com/${BINARY_MCP_VERSION}/ubuntu/"
@@ -29,7 +28,8 @@
 export APT_MIRANTIS_GPG="http://apt.mirantis.com/public.gpg"
 export APT_MIRANTIS_SALT_REPO="http://apt.mirantis.com/xenial/ ${BINARY_MCP_VERSION} salt "
 #
-export GIT_SALT_FORMULAS_SCRIPTS="https://github.com/salt-formulas/salt-formulas-scripts.git"
+export GIT_SALT_FORMULAS_SCRIPTS="https://gerrit.mcp.mirantis.com/salt-formulas/salt-formulas-scripts"
+export SCRIPTS_REF="master"
 #
 export APT_REPOSITORY=" deb [arch=amd64] ${APT_MIRANTIS_SALT_REPO} "
 export APT_REPOSITORY_GPG=${APT_MIRANTIS_GPG}
@@ -48,7 +48,27 @@
 mkdir -p "${PACKER_IMAGES_CACHE}"
 
 export PACKER_LOG=1
+pushd config-drive
+  if [ ! -e model ]; then
+    git clone ${CLUSTER_MODEL} model/
+    pushd model
+      git fetch ${CLUSTER_MODEL} ${CLUSTER_MODEL_REF} && git checkout FETCH_HEAD
+      pushd classes/system
+        git submodule init
+        git submodule update
+      popd
+    popd
+  fi
+  if [ ! -e salt_scripts ]; then
+    git clone ${GIT_SALT_FORMULAS_SCRIPTS} salt_scripts/
+    pushd salt_scripts
+    git fetch ${GIT_SALT_FORMULAS_SCRIPTS} ${SCRIPTS_REF} && git checkout FETCH_HEAD
+    popd
+  fi
+popd
+
+[ -f config-drive/cloudata.iso ] && rm -v config-drive/cloudata.iso
+mkisofs -o config-drive/cloudata.iso -V cidata -r -J --quiet config-drive
 # For qemu test-build:
-cloud-localds  --hostname apt01 --dsmode local config-drive/cloudata.iso  config-drive/user-data.yaml
 packer build -only=qemu -parallel=false -on-error=ask template.json
 # rm -rf ~/.packer.d/
diff --git a/mirror-image/scripts/mirrors.sh b/mirror-image/scripts/mirrors.sh
index 67cf90c..bdf686e 100644
--- a/mirror-image/scripts/mirrors.sh
+++ b/mirror-image/scripts/mirrors.sh
@@ -26,7 +26,7 @@
 # Those would prevent failures for 2018q3+ model releases, where
 # 'system.linux.system.repo.mcp.apt_mirantis.update.xxx' enabled by default
 pushd /srv/volumes/aptly/public/
-  CUR_V=$(salt-call pillar.get _param:apt_mk_version --out text | awk '{print $2}')
+  CUR_V=$(salt-call pillar.get _param:mcp_version --out text | awk '{print $2}')
   mkdir -p update
   ln -s ../${CUR_V} update/${CUR_V} || true
 popd
diff --git a/mirror-image/template.json b/mirror-image/template.json
index c79bf90..3c13117 100644
--- a/mirror-image/template.json
+++ b/mirror-image/template.json
@@ -10,7 +10,6 @@
     "cluster_model_ref": "{{ env `CLUSTER_MODEL_REF` }}",
     "cluster_name": "{{ env `CLUSTER_NAME` }}",
     "bs_hostname": "apt01",
-    "formula_version": "{{ env `FORMULA_VERSION` }}",
     "mcp_version": "{{ env `MCP_VERSION` }}",
     "ubuntu_baseurl": "{{ env `UBUNTU_BASEURL` }}",
     "saltstack_gpg": "{{ env `SALTSTACK_GPG` }}",
@@ -53,7 +52,6 @@
         "CLUSTER_MODEL_REF={{ user `cluster_model_ref` }}",
         "CLUSTER_NAME={{ user `cluster_name` }}",
         "BS_HOSTNAME={{ user `bs_hostname` }}",
-        "FORMULA_VERSION={{ user `formula_version` }}",
         "UBUNTU_BASEURL={{ user `ubuntu_baseurl` }}",
         "SALTSTACK_GPG={{ user `saltstack_gpg` }}",
         "SALTSTACK_REPO={{ user `saltstack_repo` }}",
@@ -139,7 +137,7 @@
           "8096M"
         ],
         [
-          "-fda",
+          "-cdrom",
           "config-drive/cloudata.iso"
         ],
         [
diff --git a/packer-image-buid.groovy b/packer-image-buid.groovy
index 2e82c44..a87918f 100644
--- a/packer-image-buid.groovy
+++ b/packer-image-buid.groovy
@@ -21,9 +21,9 @@
  *            ---
  *            IMAGE_NAME: 'ubuntu-16.04-proposed'
  *
- *  CREDENTIALS_ID  = Global jenkins cred. for clone DEFAULT_GIT_URL
+ *  CREDENTIALS_ID  = Global jenkins cred for clone DEFAULT_GIT_URL
  *  DEFAULT_GIT_URL
- *  DEFAULT_GIT_REF
+ *  REFSPEC
  *
  *  OS_VERSION         = OpenStack version
  *  OS_CREDENTIALS_ID  = ID of credentials for OpenStack API stored in Jenkins.
@@ -42,15 +42,16 @@
 def dateTime = date.format("ddMMyyyy-HHmmss")
 //
 def job_env = env.getEnvironment().findAll { k, v -> v }
+gerritCredentials = env.CREDENTIALS_ID ?: 'gerrit'
 
 /////
 extra_vars = readYaml text: job_env.get('EXTRA_VARIABLES_YAML','').trim()
 // FIXME: os_openrc should be refactored.
 os_openrc = readYaml text: job_env.get('OPENSTACK_OPENRC_YAML','').trim()
 if (job_env.get('TIMESTAMP_INAME', false).toBoolean()) {
-  imageName = job_env.IMAGE_NAME + "-" + dateTime
-}else {
-  imageName = job_env.IMAGE_NAME
+    imageName = job_env.IMAGE_NAME + "-" + dateTime
+} else {
+    imageName = job_env.IMAGE_NAME
 }
 // Overwrite IMAGE_NAME in template.json with expected
 extra_vars['IMAGE_NAME'] = imageName
@@ -61,246 +62,275 @@
 job_env['BUILD_ONLY'] = job_env.BUILD_ONLY.toLowerCase()
 job_env['PUBLISH_BACKEND'] = job_env.PUBLISH_BACKEND.toLowerCase()
 //
-defaultGitRef = job_env.get('DEFAULT_GIT_REF', 'HEAD')
+defaultGitRef = job_env.get('REFSPEC', 'HEAD')
 defaultGitUrl = job_env.get('DEFAULT_GIT_URL', null)
 slaveNode = (env.SLAVE_NODE ?: 'jsl23.mcp.mirantis.net')
 
 // Self-check
 for (String req_v : ['BUILD_OS', 'BUILD_ONLY','IMAGE_NAME'] ) {
-  if (!job_env.get(req_v, false)) {
-    throw new Exception("${req_v} not set!")
-  }
+    if (!job_env.get(req_v, false)) {
+        throw new Exception("${req_v} not set!")
+    }
 }
 
 def MapToList(input_map) {
 /**
  * Convert dict in bash-like list
  */
-  def data_list = []
-  for (i = 0; i < input_map.size(); i++) {
-    data = ''
-    data = input_map.keySet()[i] + "=" + input_map.values()[i]
-    data_list.add(data)
-  }
-  return data_list
+    def data_list = []
+    for (i = 0; i < input_map.size(); i++) {
+        data = ''
+        data = input_map.keySet()[i] + "=" + input_map.values()[i]
+        data_list.add(data)
+    }
+    return data_list
 }
 
 timeout(time: 6, unit: 'HOURS') {
-  node(slaveNode) {
-    def checkouted = false
-    def workspace = common.getWorkspace()
-    creds = common.getPasswordCredentials(job_env.CREDENTIALS_ID)
-    if (job_env.BUILD_ONLY == 'openstack' || job_env.PUBLISH_BACKEND == 'glance') {
-      rcFile = openstack.createOpenstackEnv(workspace, os_openrc.OS_AUTH_URL, job_env.OS_TENANT_ID, job_env.OS_TENANT_NAME, "default", "", "default", "2", "")
-      def openstackEnv = "${workspace}/venv"
-    }
-
-    try {
-      def _artifact_dir = "${workspace}/artifacts"
-      def _artifact_list = []
-      def ImagesCacheFolder = "${workspace}/../${env.JOB_NAME}_cache/"
-      stage("checkout") {
-        if (defaultGitRef && defaultGitUrl) {
-          checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", job_env.CREDENTIALS_ID)
-        } else {
-          throw new Exception("Cannot checkout gerrit patchset: DEFAULT_GIT_URL is null")
-        }
-      }
-      stage("Prepare env") {
-        if (!fileExists("${workspace}/tmp")) {
-          sh "mkdir -p ${workspace}/tmp"
-        }
-        if (!fileExists(ImagesCacheFolder)) {
-          sh "mkdir -p ${ImagesCacheFolder}"
-        }
-        if (!fileExists(_artifact_dir)) {
-          sh "mkdir -p ${_artifact_dir}"
-        }
-        if (!fileExists("bin")) {
-          common.infoMsg("Downloading packer")
-          sh "mkdir -p bin"
-          dir("bin") {
-            def zipname = sh(script: "basename ${job_env.PACKER_URL}", returnStdout: true).trim()
-            sh(script: "wget --quiet ${job_env.PACKER_URL}", returnStdout: true)
-            sh "echo \"${job_env.PACKER_ZIP_MD5} ${zipname}\" >> md5sum"
-            sh(script: "md5sum -c --status md5sum", returnStdout: true)
-            sh "unzip ${zipname}"
-          }
-        }
-        if (!fileExists("${job_env.BUILD_OS}/images")) {
-          // clean images dir before building
-          sh(script: "rm -rf ${job_env.BUILD_OS}/images/*", returnStatus: true)
-        }
-      }
-
-      stage("Build Instance") {
-        def _packer_args = "${job_env.get(PACKER_ARGS, '')}"
-        def _packer_log = "${workspace}/packer.log"
-        // clean old log, for correct status grepping
-        if (fileExists(_packer_log)) {
-          sh "rm -v ${_packer_log}"
+    node(slaveNode) {
+        def workspace = common.getWorkspace()
+        creds = common.getPasswordCredentials(job_env.CREDENTIALS_ID)
+        if (job_env.BUILD_ONLY == 'openstack' || job_env.PUBLISH_BACKEND == 'glance') {
+            rcFile = openstack.createOpenstackEnv(workspace, os_openrc.OS_AUTH_URL, job_env.OS_TENANT_ID, job_env.OS_TENANT_NAME, "default", "", "default", "2", "")
+            def openstackEnv = "${workspace}/venv"
         }
 
-        dir("${workspace}/${job_env.BUILD_OS}/") {
-          if (fileExists("config-drive/user-data.yaml")) {
-            common.infoMsg("Creating cloud-config drive")
-            if (fileExists("config-drive/cloudata.iso")) {
-              sh "rm -v config-drive/cloudata.iso"
+        stage("checkout") {
+            if (defaultGitRef && defaultGitUrl) {
+                checkouted = gerrit.gerritPatchsetCheckout(defaultGitUrl, defaultGitRef, "HEAD", job_env.CREDENTIALS_ID)
+            } else {
+                throw new Exception("Cannot checkout gerrit patchset: DEFAULT_GIT_URL is null")
             }
-            sh "cloud-localds config-drive/cloudata.iso  config-drive/user-data.yaml"
-          }
         }
 
-        if (job_env.BUILD_ONLY == "openstack") {
-          dir("${workspace}/${job_env.BUILD_OS}/") {
-            extra_vars_list = MapToList(extra_vars)
-            withEnv(["PATH=${env.PATH}:${workspace}/bin",
-                     "PACKER_LOG_PATH=${_packer_log}",
-                     "PACKER_LOG=1",
-                     "TMPDIR=${workspace}/tmp",
-                     "OS_USERNAME=${creds.username.toString()}",
-                     "OS_PASSWORD=${creds.password.toString()}"] + extra_vars_list) {
-
-              common.infoMsg("Run build with:")
-              sh(script: 'printenv|sort')
-              sh(script: "set -xe; packer build -only='openstack' ${_packer_args} -parallel=false template.json" )
-              _os_private = "${workspace}/${job_env.BUILD_OS}/os_${job_env.BUILD_OS}.pem"
-              if (fileExists(_os_private)) {
-                common.infoMsg("Packer private key:")
-                sh "cat ${_os_private}"
-              }
-              def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${_packer_log}", returnStatus: true)
-              // grep returns 0 if find something
-              if (packerStatus != 0) {
-                common.infoMsg("Openstack instance build complete")
-              } else {
-                throw new Exception("Openstack Packer build failed")
-              }
-
-              common.retry(3, 5) {
-                common.infoMsg("Attempt download openstack image..")
-                openstack.runOpenstackCommand("openstack image save --file ${_artifact_dir}/${imageName}.qcow2 ${imageName}", rcFile, openstackEnv)
-              }
-            }
-          }
-
-        } else if (job_env.BUILD_ONLY == 'qemu') {
-
-          dir("${workspace}/${job_env.BUILD_OS}/") {
-            extra_vars_list = MapToList(extra_vars)
-            withEnv(["PATH=${env.PATH}:${workspace}/bin",
-                     "PACKER_LOG_PATH=${_packer_log}",
-                     "PACKER_LOG=1",
-                     "TMPDIR=${workspace}/tmp",
-                     "PACKER_IMAGES_CACHE=${ImagesCacheFolder}"] + extra_vars_list) {
-              common.infoMsg("Run build with:")
-              sh(script: 'printenv|sort')
-              sh(script: "set -xe ; packer build -on-error=ask -only='qemu' ${_packer_args} -parallel=false template.json".toString())
-
-              def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${PACKER_LOG_PATH}", returnStatus: true)
-              // grep returns 0 if find something
-              if (packerStatus != 0) {
-                common.infoMsg("qemu instance build completed successfully")
-              } else {
-                throw new Exception("qemu instance build failed")
-              }
-              // collect artifacts
-              // TODO make it possible, process multiply artifacts by one run.
-              dir('images/') {
-                sh(script: 'find .', returnStdout: true)
-                def _files = findFiles(glob: "*qemu*/${imageName}*")
-                if (_files.size() > 1) {
-                  common.warningMsg("Multiply artifacts detected!Only first one will be processed!")
-                } else if (_files.size() == 0) {
-                  throw new Exception("No artifacts detected!BUILD_ONLY=${env.BUILD_ONLY} failed!")
+        try {
+            def _artifact_dir = "${workspace}/artifacts"
+            def _artifact_list = []
+            def ImagesCacheFolder = "${workspace}/../${env.JOB_NAME}_cache/"
+            stage("Prepare env") {
+                if (!fileExists("${workspace}/tmp")) {
+                    sh "mkdir -p ${workspace}/tmp"
                 }
-                for (String x : _files) {
-                  _file = sh(script: "set -x ; readlink -f ${x}", returnStdout: true)
-                  sh(script: "mv -v ${x} ${_artifact_dir}/${imageName}.qcow2")
-                  // Save filename to list
-                  _artifact_list.add("${imageName}.qcow2")
+                if (!fileExists(ImagesCacheFolder)) {
+                    sh "mkdir -p ${ImagesCacheFolder}"
                 }
-              }
+                if (!fileExists(_artifact_dir)) {
+                    sh "mkdir -p ${_artifact_dir}"
+                }
+                if (!fileExists("bin")) {
+                    common.infoMsg("Downloading packer")
+                    sh "mkdir -p bin"
+                    dir("bin") {
+                        def zipname = sh(script: "basename ${job_env.PACKER_URL}", returnStdout: true).trim()
+                        sh(script: "wget --quiet ${job_env.PACKER_URL}", returnStdout: true)
+                        sh "echo \"${job_env.PACKER_ZIP_MD5} ${zipname}\" >> md5sum"
+                        sh(script: "md5sum -c --status md5sum", returnStdout: true)
+                        sh "unzip ${zipname}"
+                    }
+                }
+                if (!fileExists("${job_env.BUILD_OS}/images")) {
+                    // clean images dir before building
+                    sh(script: "rm -rf ${job_env.BUILD_OS}/images/*", returnStatus: true)
+                }
             }
-          }
 
-        } else {
-          throw new Exception("Unexpected BUILD_ONLY=${env.BUILD_ONLY} target!")
-        }
-      }
+            stage("Build Instance") {
+                def _packer_args = "${job_env.get(PACKER_ARGS, '')}"
+                def _packer_log = "${workspace}/packer.log"
+                // clean old log, for correct status grepping
+                if (fileExists(_packer_log)) {
+                    sh "rm -v ${_packer_log}"
+                }
 
-      stage("Publish artifacts") {
-        dir(_artifact_dir) {
-          common.infoMsg("Processing md5 for artifacts")
-          for (String x : _artifact_list) {
-            _md5 = sh(script: "md5sum ${x} > ${x}.md5; cat ${x}.md5", returnStdout: true).trim()
-            _size = sh(script: "ls -alh ${x}", returnStdout: true).trim()
-            common.infoMsg("Artifact file: ${_size}\n${_md5}")
-          }
-          if (job_env.PUBLISH_BACKEND == 'local') {
-            common.infoMsg("Uploading to: local")
-            common.infoMsg("For local publish target - nothing to do, all files in: ${_artifact_dir}")
+                dir("${workspace}/${job_env.BUILD_OS}/") {
+                    if (fileExists("config-drive")) {
+                        def model = extra_vars.get('CLUSTER_MODEL', '')
+                        if (model != "") {
+                            checkout([
+                                    $class             : 'GitSCM',
+                                    branches           : [[name: 'FETCH_HEAD']],
+                                    recursiveSubmodules: true,
+                                    extensions         : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'config-drive/model']],
+                                    userRemoteConfigs  : [[url: model, refspec: extra_vars.get('CLUSTER_MODEL_REF', 'master'), credentialsId: gerritCredentials]]
+                            ])
+                        }
+                        def scripts = extra_vars.get('GIT_SALT_FORMULAS_SCRIPTS', '')
+                        if (scripts != "") {
+                            checkout([
+                                    $class           : 'GitSCM',
+                                    branches         : [[name: 'FETCH_HEAD']],
+                                    extensions       : [[$class: 'RelativeTargetDirectory', relativeTargetDir: 'config-drive/salt_scripts']],
+                                    userRemoteConfigs: [[url: scripts, refspec: extra_vars.get('SCRIPTS_REF', 'master'), credentialsId: gerritCredentials]]
+                            ])
+                        }
+
+                        common.infoMsg("Creating cloud-config drive")
+                        def isoFile = "config-drive/cloudata.iso"
+                        if (fileExists(isoFile)) {
+                            sh "rm -v ${isoFile}"
+                        }
+                        // This is left for backward-compatibility
+                        if (fileExists("config-drive/user-data.yaml")) {
+                            sh "mv config-drive/user-data.yaml config-drive/user-data"
+                            if (!fileExists("config-drive/meta-data")) {
+                                sh "echo 'hostname: ubuntu' > config-drive/meta-data"
+                            }
+                        }
+                        sh "mkisofs -o ${isoFile} -V cidata -r -J --quiet config-drive"
+                        archiveArtifacts artifacts: "${isoFile}"
+                    }
+                }
+
+                if (job_env.BUILD_ONLY == "openstack") {
+                    dir("${workspace}/${job_env.BUILD_OS}/") {
+                        extra_vars_list = MapToList(extra_vars)
+                        withEnv(["PATH=${env.PATH}:${workspace}/bin",
+                                 "PACKER_LOG_PATH=${_packer_log}",
+                                 "PACKER_LOG=1",
+                                 "TMPDIR=${workspace}/tmp",
+                                 "OS_USERNAME=${creds.username.toString()}",
+                                 "OS_PASSWORD=${creds.password.toString()}"] + extra_vars_list) {
+
+                            common.infoMsg("Run build with:")
+                            sh(script: 'printenv|sort')
+                            sh(script: "set -xe; packer build -only='openstack' ${_packer_args} -parallel=false template.json")
+                            _os_private = "${workspace}/${job_env.BUILD_OS}/os_${job_env.BUILD_OS}.pem"
+                            if (fileExists(_os_private)) {
+                                common.infoMsg("Packer private key:")
+                                sh "cat ${_os_private}"
+                            }
+                            def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${_packer_log}", returnStatus: true)
+                            // grep returns 0 if find something
+                            if (packerStatus != 0) {
+                                common.infoMsg("Openstack instance build complete")
+                            } else {
+                                throw new Exception("Openstack Packer build failed")
+                            }
+
+                            common.retry(3, 5) {
+                                common.infoMsg("Attempt download openstack image..")
+                                openstack.runOpenstackCommand("openstack image save --file ${_artifact_dir}/${imageName}.qcow2 ${imageName}", rcFile, openstackEnv)
+                            }
+                        }
+                    }
+
+                } else if (job_env.BUILD_ONLY == 'qemu') {
+
+                    dir("${workspace}/${job_env.BUILD_OS}/") {
+                        extra_vars_list = MapToList(extra_vars)
+                        withEnv(["PATH=${env.PATH}:${workspace}/bin",
+                                 "PACKER_LOG_PATH=${_packer_log}",
+                                 "PACKER_LOG=1",
+                                 "TMPDIR=${workspace}/tmp",
+                                 "PACKER_IMAGES_CACHE=${ImagesCacheFolder}"] + extra_vars_list) {
+                            common.infoMsg("Run build with:")
+                            sh(script: 'printenv|sort')
+                            sh(script: "set -xe ; packer build -on-error=ask -only='qemu' ${_packer_args} -parallel=false template.json".toString())
+
+                            def packerStatus = sh(script: "grep \"Some builds didn't complete successfully and had errors\" ${PACKER_LOG_PATH}", returnStatus: true)
+                            // grep returns 0 if find something
+                            if (packerStatus != 0) {
+                                common.infoMsg("qemu instance build completed successfully")
+                            } else {
+                                throw new Exception("qemu instance build failed")
+                            }
+                            // collect artifacts
+                            // TODO make it possible, process multiply artifacts by one run.
+                            dir('images/') {
+                                sh(script: 'find .', returnStdout: true)
+                                def _files = findFiles(glob: "*qemu*/${imageName}*")
+                                if (_files.size() > 1) {
+                                    common.warningMsg("Multiply artifacts detected!Only first one will be processed!")
+                                } else if (_files.size() == 0) {
+                                    throw new Exception("No artifacts detected!BUILD_ONLY=${env.BUILD_ONLY} failed!")
+                                }
+                                for (String x : _files) {
+                                    _file = sh(script: "set -x ; readlink -f ${x}", returnStdout: true)
+                                    sh(script: "mv -v ${x} ${_artifact_dir}/${imageName}.qcow2")
+                                    // Save filename to list
+                                    _artifact_list.add("${imageName}.qcow2")
+                                }
+                            }
+                        }
+                    }
+
+                } else {
+                    throw new Exception("Unexpected BUILD_ONLY=${env.BUILD_ONLY} target!")
+                }
+            }
+
+            stage("Publish artifacts") {
+                dir(_artifact_dir) {
+                    common.infoMsg("Processing md5 for artifacts")
+                    for (String x : _artifact_list) {
+                        _md5 = sh(script: "md5sum ${x} > ${x}.md5; cat ${x}.md5", returnStdout: true).trim()
+                        _size = sh(script: "ls -alh ${x}", returnStdout: true).trim()
+                        common.infoMsg("Artifact file: ${_size}\n${_md5}")
+                    }
+                    if (job_env.PUBLISH_BACKEND == 'local') {
+                        common.infoMsg("Uploading to: local")
+                        common.infoMsg("For local publish target - nothing to do, all files in: ${_artifact_dir}")
+                        if (job_env.get('CLEANUP_AFTER', false)) {
+                            common.warningMsg("You are trying to use 'local' publish method, along with enabled CLEANUP_AFTER! ")
+                            common.warningMsg("Disabling CLEANUP_AFTER option, to save you'r data ;) ")
+                            job_env.CLEANUP_AFTER = false
+                        }
+                    } else if (job_env.PUBLISH_BACKEND == 'glance') {
+                        common.infoMsg("Uploading to: glance-openstack")
+                        if (fileExists("${workspace}/venv")) {
+                            common.infoMsg("cleaning virtualenv at:${workspace}/venv")
+                            sh(script: "rm -rf ${workspace}/venv", returnStatus: true)
+                        }
+                        openstack.setupOpenstackVirtualenv(openstackEnv, job_env.OS_VERSION)
+                        for (String x : findFiles(glob: "*.*")) {
+                            if (x.endsWith('.md5')) {
+                                common.warningMsg("Skipping:${x} from openstack upload!")
+                                _md5sum = sh(script: "cat ${x}", returnStdout: true).trim().split()[0]
+                                continue
+                            }
+                            _property = "--property data=${dateTime} --property md5sum=${_md5sum}"
+                            _cmdline = String.format("glance image-create --visibility " +
+                                    "public %s --name '%s' %s --file %s", _property, imageShortName, glanceRunArgs, imageName)
+                            openstack.runOpenstackCommand(_cmdline, rcFile, openstackEnv)
+                        }
+                        // TODO
+                        currentBuild.description = "${imageName}.qcow2 uploaded tenant: "
+
+                    } else if (job_env.PUBLISH_BACKEND == 'http') {
+                        for (String u_file : findFiles(glob: '*.*')) {
+                            common.infoMsg("Uploading image ${imageName}")
+                            def uploadImageStatus = ''
+                            common.retry(3, 5) {
+                                uploadImageStatus = sh(script: "curl -f -T ${u_file} ${job_env.UPLOAD_URL}", returnStatus: true)
+                                if (uploadImageStatus != 0) {
+                                    throw new Exception("Uploading file: ${u_file} failed!")
+                                }
+                            }
+                            // Fixme for correct path ?
+                            currentBuild.description = "<a href='http://images.mcp.mirantis.net/${imageName}.qcow2'>${imageName}.qcow2</a>"
+                        }
+
+                    } else {
+                        throw new Exception("Unsupported publish backend:${job_env.PUBLISH_BACKEND}")
+                    }
+
+                }
+            }
+        } catch (Throwable e) {
+            // If there was an error or exception thrown, the build failed
+            currentBuild.result = "FAILURE"
+            throw e
+        } finally {
             if (job_env.get('CLEANUP_AFTER', false)) {
-              common.warningMsg("You are trying to use 'local' publish method, along with enabled CLEANUP_AFTER! ")
-              common.warningMsg("Disabling CLEANUP_AFTER option, to save you'r data ;) ")
-              job_env.CLEANUP_AFTER = false
-            }
-          } else if (job_env.PUBLISH_BACKEND == 'glance') {
-            common.infoMsg("Uploading to: glance-openstack")
-            if (fileExists("${workspace}/venv")) {
-              common.infoMsg("cleaning virtualenv at:${workspace}/venv")
-              sh(script: "rm -rf ${workspace}/venv", returnStatus: true)
-            }
-            openstack.setupOpenstackVirtualenv(openstackEnv, job_env.OS_VERSION)
-              for (String x : findFiles(glob: "*.*")) {
-                if (x.endsWith('.md5')) {
-                  common.warningMsg("Skipping:${x} from openstack upload!")
-                  _md5sum = sh(script: "cat ${x}", returnStdout: true).trim().split()[0]
-                  continue
+                dir(workspace) {
+                    sh "find . -mindepth 1 -delete || true"
                 }
-                _property = "--property data=${dateTime} --property md5sum=${_md5sum}"
-                _cmdline = String.format("glance image-create --visibility " +
-                    "public %s --name '%s' %s --file %s", _property, imageShortName, glanceRunArgs, imageName)
-                openstack.runOpenstackCommand(_cmdline, rcFile, openstackEnv)
-              }
-            // TODO
-            currentBuild.description = "${imageName}.qcow2 uploaded tenant: "
-
-          } else if (job_env.PUBLISH_BACKEND == 'http') {
-            for (String u_file : findFiles(glob: '*.*')) {
-              common.infoMsg("Uploading image ${imageName}")
-              def uploadImageStatus = ''
-              common.retry(3, 5) {
-                uploadImageStatus = sh(script: "curl -f -T ${u_file} ${job_env.UPLOAD_URL}", returnStatus: true)
-                if (uploadImageStatus != 0) {
-                  throw new Exception("Uploading file: ${u_file} failed!")
+                if (job_env.BUILD_ONLY == 'openstack') {
+                    common.warningMsg("openstack Env cleanup not implemented yet!")
                 }
-              }
-              // Fixme for correct path ?
-              currentBuild.description = "<a href='http://images.mcp.mirantis.net/${imageName}.qcow2'>${imageName}.qcow2</a>"
+            } else {
+                common.warningMsg("Env has not been cleaned!Please cleanup it manualy!")
             }
-
-          } else {
-            throw new Exception("Unsupported publish backend:${job_env.PUBLISH_BACKEND}")
-          }
-
         }
-      }
-    } catch (Throwable e) {
-      // If there was an error or exception thrown, the build failed
-      currentBuild.result = "FAILURE"
-      throw e
-    } finally {
-      if (job_env.get('CLEANUP_AFTER', false)) {
-        dir(workspace) {
-          sh "find . -mindepth 1 -delete || true"
-        }
-        if (job_env.BUILD_ONLY == 'openstack') {
-          common.warningMsg("openstack Env cleanup not implemented yet!")
-        }
-      } else {
-        common.warningMsg("Env has not been cleaned!Please cleanup it manualy!")
-      }
     }
-  }
 }
diff --git a/trymcp-day01-image/config-drive/meta-data b/trymcp-day01-image/config-drive/meta-data
new file mode 100644
index 0000000..c7ba190
--- /dev/null
+++ b/trymcp-day01-image/config-drive/meta-data
@@ -0,0 +1 @@
+hostname: ubuntu
diff --git a/trymcp-day01-image/config-drive/user-data.yaml b/trymcp-day01-image/config-drive/user-data
similarity index 100%
rename from trymcp-day01-image/config-drive/user-data.yaml
rename to trymcp-day01-image/config-drive/user-data
diff --git a/trymcp-day01-image/files/etc/cloud/cloud.cfg b/trymcp-day01-image/files/etc/cloud/cloud.cfg
index 953ceb8..10290e1 100644
--- a/trymcp-day01-image/files/etc/cloud/cloud.cfg
+++ b/trymcp-day01-image/files/etc/cloud/cloud.cfg
@@ -80,6 +80,8 @@
   - sed -i'.orig' -e's/PermitRootLogin.*/PermitRootLogin yes/g' -e's/PasswordAuthentication.*/PasswordAuthentication yes/g' /etc/ssh/sshd_config
   - service sshd restart
   - salt-call --timeout=120 test.ping
+  - salt-call saltutil.clear_cache
+  - salt-call saltutil.refresh_pillar
   - salt-call saltutil.sync_all
   - systemctl restart docker
   - sleep 20
@@ -96,3 +98,4 @@
   - salt-call state.sls gerrit
   - sleep 25
   - salt-call state.sls jenkins
+  - touch /done_cloud_init_bootstrap
diff --git a/trymcp-day01-image/run.example.sh b/trymcp-day01-image/run.example.sh
index 4fafb55..92ad386 100755
--- a/trymcp-day01-image/run.example.sh
+++ b/trymcp-day01-image/run.example.sh
@@ -17,17 +17,15 @@
 export APT_MIRANTIS_SALT_REPO="http://apt.mirantis.com/xenial/ nightly salt"
 export APT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.com/xenial/ proposed salt"
 export APT_REPOSITORY_GPG="http://apt.mirantis.com/public.gpg"
-export CLUSTER_MODEL="https://github.com/LotharKAtt/trymcp-drivetrain-model.git"
+export CLUSTER_MODEL="https://gerrit.mcp.mirantis.com/trymcp/drivetrain-model"
 export CLUSTER_MODEL_REF="master"
 export CLUSTER_NAME="try-mcp"
-export FORMULA_VERSION="nightly"
-export GIT_SALT_FORMULAS_SCRIPTS="https://github.com/salt-formulas/salt-formulas-scripts.git"
+export GIT_SALT_FORMULAS_SCRIPTS="https://gerrit.mcp.mirantis.com/salt-formulas/salt-formulas-scripts"
 export MCP_VERSION="proposed"
 export SALTSTACK_GPG="https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub"
 export SALTSTACK_REPO="http://mirror.mirantis.com/proposed/saltstack-2017.7/xenial xenial main"
 export SCRIPTS_REF="master"
 export UBUNTU_BASEURL="http://mirror.mirantis.com/proposed/ubuntu/"
-export BINARY_MCP_VERSION="proposed"
 
 ###
 # Hard-coded folder in template
@@ -35,7 +33,27 @@
 mkdir -p "${PACKER_IMAGES_CACHE}"
 
 export PACKER_LOG=1
+pushd config-drive
+  if [ ! -e model ]; then
+    git clone ${CLUSTER_MODEL} model/
+    pushd model
+      git fetch ${CLUSTER_MODEL} ${CLUSTER_MODEL_REF} && git checkout FETCH_HEAD
+      pushd classes/system
+        git submodule init
+        git submodule update
+      popd
+    popd
+  fi
+  if [ ! -e salt_scripts ]; then
+    git clone ${GIT_SALT_FORMULAS_SCRIPTS} salt_scripts/
+    pushd salt_scripts
+    git fetch ${GIT_SALT_FORMULAS_SCRIPTS} ${SCRIPTS_REF} && git checkout FETCH_HEAD
+    popd
+  fi
+popd
+
+[ -f config-drive/cloudata.iso ] && rm -v config-drive/cloudata.iso
+mkisofs -o config-drive/cloudata.iso -V cidata -r -J --quiet config-drive
 # For qemu test-build:
-cloud-localds  --hostname ubuntu --dsmode local config-drive/cloudata.iso  config-drive/user-data.yaml
 packer build -only=qemu -parallel=false -on-error=ask template.json
 #rm -rf ~/.packer.d/
diff --git a/trymcp-day01-image/scripts/salt.sh b/trymcp-day01-image/scripts/salt.sh
index 51eea35..2161aaf 100644
--- a/trymcp-day01-image/scripts/salt.sh
+++ b/trymcp-day01-image/scripts/salt.sh
@@ -1,17 +1,16 @@
 #!/bin/bash -xe
 
-FORMULA_VERSION=${FORMULA_VERSION:-2018.3.1}
-APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $FORMULA_VERSION salt"}
+MCP_VERSION=${MCP_VERSION:-proposed}
+APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $MCP_VERSION salt"}
 SALT_OPTS="-t 10 --retcode-passthrough --no-color"
 
 echo "deb [arch=amd64] ${APT_MIRANTIS_SALT_REPO}" > /etc/apt/sources.list.d/mcp_salt.list
 apt-get update
 apt-get install salt-formula* -y
 
-# Basic states
-salt-call saltutil.clear_cache
-salt-call saltutil.refresh_pillar
-salt-call saltutil.sync_all
+salt-call ${SALT_OPTS} saltutil.clear_cache
+salt-call ${SALT_OPTS} saltutil.refresh_pillar
+salt-call ${SALT_OPTS} saltutil.sync_all
 salt-call ${SALT_OPTS} reclass.validate_pillar
 
 salt-call ${SALT_OPTS} state.sls linux.system.repo,linux.system.package,linux.system.user,linux.system.directory,linux.system.config
@@ -20,20 +19,22 @@
 
 salt-call ${SALT_OPTS} state.sls salt
 salt-call ${SALT_OPTS} state.sls docker.host
-salt-call ${SALT_OPTS} saltutil.sync_all
 
-docker pull docker-prod-local.artifactory.mirantis.com/mirantis/cicd/mysql:2018.8.0
-docker pull docker-prod-local.artifactory.mirantis.com/mirantis/cicd/gerrit:2018.8.0
-docker pull docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jenkins:2018.8.0
-# FIXME: workaround for missing packages in jenkins slave image
-#docker pull docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave:2018.8.0
-docker pull quay.io/lotharkatt/jenkins-slave:latest
-docker pull docker-prod-local.artifactory.mirantis.com/mirantis/cicd/phpldapadmin:2018.8.0
+sed -i "s/mcp_version:.*/mcp_version: ${MCP_VERSION}/g" /srv/salt/reclass/classes/cluster/try-mcp/overrides.yml
+
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/cicd/mysql:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/cicd/gerrit:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jenkins:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/cicd/phpldapadmin:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/model-generator/operations-api:${MCP_VERSION}"
+docker pull "docker-prod-local.artifactory.mirantis.com/mirantis/model-generator/operations-ui:${MCP_VERSION}"
+# Mirror images below into artifactory
 docker pull jboss/keycloak:4.5.0.Final
 docker pull jboss/keycloak-proxy:3.4.2.Final
-docker pull mirantis/python-operations-api:latest
-docker pull cockroachdb/cockroach:latest
-docker pull mirantis/operations-ui:latest
+docker pull cockroachdb/cockroach:v2.1.0
+# FIXME: setup build for images below with proper versions and publish to artifactory
+docker pull atengler/cockroach-init:latest
 echo "---------------------"
 docker images
 echo "---------------------"
diff --git a/trymcp-day01-image/template.json b/trymcp-day01-image/template.json
index 72ff9e0..84f0a8e 100644
--- a/trymcp-day01-image/template.json
+++ b/trymcp-day01-image/template.json
@@ -10,7 +10,6 @@
     "cluster_model_ref": "{{ env `CLUSTER_MODEL_REF` }}",
     "cluster_name": "{{ env `CLUSTER_NAME` }}",
     "bs_hostname": "cfg01",
-    "formula_version": "{{ env `FORMULA_VERSION` }}",
     "mcp_version": "{{ env `MCP_VERSION` }}",
     "ubuntu_baseurl": "{{ env `UBUNTU_BASEURL` }}",
     "saltstack_gpg": "{{ env `SALTSTACK_GPG` }}",
@@ -54,7 +53,6 @@
         "CLUSTER_MODEL_REF={{ user `cluster_model_ref` }}",
         "CLUSTER_NAME={{ user `cluster_name` }}",
         "BS_HOSTNAME={{ user `bs_hostname` }}",
-        "FORMULA_VERSION={{ user `formula_version` }}",
         "SALTSTACK_GPG={{ user `saltstack_gpg` }}",
         "SALTSTACK_REPO={{ user `saltstack_repo` }}",
         "APT_MIRANTIS_GPG={{ user `apt_mirantis_gpg` }}",
@@ -62,9 +60,9 @@
         "GIT_SALT_FORMULAS_SCRIPTS={{ user `git_salt_formulas_scripts` }}",
         "APT_REPOSITORY={{ user `apt_repository` }}",
         "APT_REPOSITORY_GPG={{ user `apt_repository_gpg` }}",
-        "FORMULA_VERSION={{ user `formula_version` }}",
         "APT_MIRANTIS_SALT_REPO={{ user `apt_mirantis_salt_repo` }}",
         "BOOTSTRAP_SALTSTACK_COM=file:///opt/bootstrap.saltstack.com.sh",
+        "MCP_VERSION={{ user `mcp_version` }}",
         "PACKER_OFFLINE_BUILD=true"
       ],
       "type": "shell",
@@ -77,10 +75,6 @@
       ]
     },
     {
-      "environment_vars": [
-        "HOME=/root",
-        "MCP_VERSION={{ user `mcp_version` }}"
-      ],
       "type": "shell",
       "pause_before": "10s",
       "scripts": [
@@ -104,7 +98,7 @@
           "8096M"
         ],
         [
-          "-fda",
+          "-cdrom",
           "config-drive/cloudata.iso"
         ],
         [