create-config-drive refactoring

* What's new:
  - Added new create_config_drive script
  - Switched config drive from CIData to OpenStack native v2
  - Added ability to specify nework config via config drive

* Old setup:
  - stays untouched

New setup:
  - Deprecated network setup in user_data both for master and mirror
  - Added ability to specify instance user and credentials.
  - Deprecated ssh failsafe function
  - Migrated from state.sls to state.apply in new configurations
  - Fixed syntax for directories management
  - Added ability to use FORMULAS env variable if specified

Related-Prod: PROD-28144 (PROD:28144)
Related-Prod: PROD-23902 (PROD:23902)
Change-Id: Ieddc5dfb2969f9e827a3bfcb970feceeb1ca12e1
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..7f25710
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,56 @@
+Config drive creation tool
+==========================
+
+Script gives you an ability to build iso file for MCP instances.
+
+No, it is not for VCP instances. VCP instances config drives are configured by
+salt master. This script is intended only for building config drives for
+salt master and aptly mirror(for offline deployments).
+
+It has as many features as openstack cloud init format provides.
+You can use network_data.json vendor_data.json or not specify them at all.
+
+Networking part is a major part here.
+If you specify --network-data key with network_data.json it has highest priority
+on network set up, further network configuration is ignored and taken completely
+from json file you specify.
+This is how you can specify configuration for multiple interfaces if you wish.
+Without this file instance should have network configuration being done somehow
+and basic information is taken by passing:
+
+     --ip,
+     --netmask,
+     --gateway(optional, used for default route),
+     --interface(optional, default ens3 is used)
+
+argumetns.
+
+So in order to create iso file script should know these parameters and you need
+to pass them, otherwise one should implement logic based on write_files: section
+and catching SALT_MASTER_DEPLOY_IP or APTLY_DEPLOY_IP parameters which may
+change its names in time.
+In this case basic network configuration would be done and further actions, like
+setting up mtu, vlans, bridges, should be taken by config management tool (salt).
+
+You may want to skip network at all and just pass --skip-network, so instance
+would start with meta_data.json and user_data.
+
+Other parameters like MCP_VERSION are out of scope of this tool and are not
+going to be calculated. You need to edit yaml files before creating iso files
+and specify them on your own.
+
+Vendor metadata can be specified in native json format of openstack:
+- vendor_data.json (StaticJSON)
+
+If you want to add ssh key to your instance, you can specify it via --ssh-key
+parameter. If you are going to add multiple ssh keys, you need to use
+--ssh-keys parameter and specify path to a file in authorized_keys format which
+has them. If you specify both, they would be merged and deduplicated.
+
+If you want to have an access to your instance via ssh, you need to know default
+username for a cloud image.
+However you can specify it using --cloud-user-name parameter and ssh keys would
+be added to it. This user has sudo privileges.
+
+If you feel you need to get an access to your instance via serial tty, you can
+specify --cloud-user-pass parameter and user section would be updated.
diff --git a/config-drive/create_config_drive.py b/config-drive/create_config_drive.py
new file mode 100755
index 0000000..55b96e3
--- /dev/null
+++ b/config-drive/create_config_drive.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+#
+# Generate config drives v2 for MCP instances.
+#
+# Config Drive v2 links:
+# - structure: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html#version-2
+# - network configuration: https://cloudinit.readthedocs.io/en/latest/topics/network-config.html
+#
+# This script uses OpenStack Metadata Service Network format for configuring networking.
+#
+__author__    = "Dzmitry Stremkouski"
+__copyright__ = "Copyright 2019, Mirantis Inc."
+__license__   = "Apache 2.0"
+
+import argparse
+from crypt import crypt
+from json import dump as json_dump
+from os import makedirs, umask
+from shutil import copytree, copyfile, copyfileobj, rmtree
+from subprocess import call as run
+from sys import argv, exit
+from uuid import uuid1 as uuidgen
+from yaml import safe_load
+
+
+def crash_with_error(msg, exit_code=1):
+    print("ERROR: \n" + msg)
+    exit(exit_code)
+
+
+def xprint(msg):
+    if not args.quiet:
+        print(msg)
+
+
+def calculate_hostnames(name, hostname):
+
+    if len(name.split('.')) > 1:
+        crash_with_error("instance name should be in short format without domain")
+    else:
+        if len(hostname.split('.')) == 1:
+            if not name == uuid:
+                hostname = name
+            else:
+                name = hostname
+        else:
+            if name == uuid:
+                name = hostname.split('.')[0]
+
+    return [name, hostname]
+
+
+def validate_args(args):
+    if not args.user_data:
+        if args.cloud_user_name or args.cloud_user_pass:
+            crash_with_error("You have not specified user-data file path, but require cloud-user setup, which requires it.")
+
+    if not args.skip_network and not args.network_data:
+        if not args.ip or not args.netmask or not args.interface:
+            crash_with_error("You have not specified neither ip nor netmask nor interface nor network_data.json file.\nEither skip network configuration or provide network_data.json file path.")
+
+    if args.skip_network and args.network_data:
+        crash_with_error("--skip-network and --network-data are mutually exclusive.")
+
+
+def generate_iso(cfg_file_path, cfg_dir_path, quiet = ''):
+    xprint("Generating config drive image: %s" % cfg_file_path)
+    cmd = ["mkisofs", "-r", "-J", "-V", "config-2", "-input-charset", "utf-8"]
+    if quiet:
+        cmd.append("-quiet")
+    cmd += ["-o", cfg_file_path, cfg_dir_path]
+    run(cmd)
+
+
+def create_config_drive(args):
+    name, hostname = calculate_hostnames(args.name, args.hostname)
+    username = args.cloud_user_name
+    if args.cloud_user_pass:
+        userpass = args.cloud_user_pass
+    else:
+        userpass = ""
+
+    cfg_file_path = hostname + '-config.iso'
+    cfg_dir_path = '/var/tmp/config-drive-' + uuid
+    mcp_dir_path = cfg_dir_path + '/mcp'
+    model_path = mcp_dir_path + '/model'
+    mk_pipelines_path = mcp_dir_path + '/mk-pipelines'
+    pipeline_lib_path = mcp_dir_path + '/pipeline-library'
+    meta_dir_path = cfg_dir_path + '/openstack/latest'
+    meta_file_path = meta_dir_path + '/meta_data.json'
+    user_file_path = meta_dir_path + '/user_data'
+    net_file_path = meta_dir_path + '/network_data.json'
+    vendor_file_path = meta_dir_path + '/vendor_data.json'
+    gpg_file_path = mcp_dir_path + '/gpg'
+
+    umask(0o0027)
+    makedirs(mcp_dir_path)
+    makedirs(meta_dir_path)
+
+    meta_data = {}
+    meta_data["uuid"] = uuid
+    meta_data["hostname"] = hostname
+    meta_data["name"] = name
+    network_data = {}
+    gateway_ip = ""
+
+    ssh_keys = []
+
+    if args.ssh_key:
+        xprint("Adding authorized key to config drive: %s" % str(args.ssh_key))
+        ssh_keys.append(args.ssh_key)
+
+    if args.ssh_keys:
+        xprint("Adding authorized keys file entries to config drive: %s" % str(args.ssh_keys))
+        with open(args.ssh_keys, 'r') as ssh_keys_file:
+            ssh_keys += ssh_keys_file.readlines()
+        ssh_keys = [x.strip() for x in ssh_keys]
+
+    # Deduplicate keys if any
+    ssh_keys = list(set(ssh_keys))
+
+    # Load keys
+    if len(ssh_keys) > 0:
+        meta_data["public_keys"] = {}
+        for i in range(len(ssh_keys)):
+            meta_data["public_keys"][str(i)] = ssh_keys[i]
+
+    if args.model:
+        xprint("Adding cluster model to config drive: %s" % str(args.model))
+        copytree(args.model, model_path)
+
+    if args.pipeline_library:
+        xprint("Adding pipeline-library to config drive: %s" % str(args.pipeline_library))
+        copytree(args.pipeline_library, pipeline_lib_path)
+
+    if args.mk_pipelines:
+        xprint("Adding mk-pipelines to config drive: %s" % str(args.mk_pipelines))
+        copytree(args.mk_pipelines, mk_pipelines_path)
+
+    if args.gpg_key:
+        xprint("Adding gpg keys file to config drive: %s" % str(args.gpg_key))
+        makedirs(gpg_file_path)
+        copyfile(args.gpg_key, gpg_file_path + '/salt_master_pillar.asc')
+
+    if args.vendor_data:
+        xprint("Adding vendor metadata file to config drive: %s" % str(args.vendor_data))
+        copyfile(args.vendor_data, vendor_file_path)
+
+    with open(meta_file_path, 'w') as meta_file:
+        json_dump(meta_data, meta_file)
+
+    if args.user_data:
+        xprint("Adding user data file to config drive: %s" % str(args.user_data))
+        if username:
+            with open(user_file_path, 'a') as user_file:
+                users_data = "#cloud-config\n"
+                users_data = "users:\n"
+                users_data += "  - name: %s\n" % username
+                users_data += "    sudo: ALL=(ALL) NOPASSWD:ALL\n"
+                users_data += "    groups: admin\n"
+                users_data += "    lock_passwd: false\n"
+                if userpass:
+                    users_data += "    passwd: %s\n" % str(crypt(userpass, '$6$'))
+                if ssh_keys:
+                    users_data += "    ssh_authorized_keys:\n"
+                    for ssh_key in ssh_keys:
+                        users_data += "    - %s\n" % ssh_key
+                users_data += "\n"
+                user_file.write(users_data)
+                with open(args.user_data, 'r') as user_data_file:
+                    copyfileobj(user_data_file, user_file)
+        else:
+            copyfile(args.user_data, user_file_path)
+
+    if args.network_data:
+        xprint("Adding network metadata file to config drive: %s" % str(args.network_data))
+        copyfile(args.network_data, net_file_path)
+    else:
+        if not args.skip_network:
+            xprint("Configuring network metadata from specified parameters.")
+            network_data["links"] = []
+            network_data["networks"] = []
+            network_data["links"].append({"type": "phy", "id": args.interface, "name": args.interface})
+            network_data["networks"].append({"type": "ipv4", "netmask": args.netmask, "link": args.interface, "id": "private-ipv4", "ip_address": args.ip})
+            if args.dns_nameservers:
+                network_data["services"] = []
+                for nameserver in args.dns_nameservers.split(','):
+                    network_data["services"].append({"type": "dns", "address": nameserver})
+            if args.gateway:
+                network_data["networks"][0]["routes"] = []
+                network_data["networks"][0]["routes"].append({"netmask": "0.0.0.0", "gateway": args.gateway, "network": "0.0.0.0"})
+
+    # Check if network metadata is not skipped
+    if len(network_data) > 0:
+        with open(net_file_path, 'w') as net_file:
+            json_dump(network_data, net_file)
+
+    generate_iso(cfg_file_path, cfg_dir_path, args.quiet)
+    if args.clean_up:
+        xprint("Cleaning up working dir.")
+        rmtree(cfg_dir_path)
+
+
+if __name__ == '__main__':
+    uuid = str(uuidgen())
+    parser = argparse.ArgumentParser(description='Config drive generator for MCP instances.', prog=argv[0], usage='%(prog)s [options]')
+    parser.add_argument('--gpg-key', type=str, help='Upload gpg key for salt master. Specify path to file in asc format.', required=False)
+    parser.add_argument('--name', type=str, default=uuid, help='Specify instance name. Hostname in short format, without domain.', required=False)
+    parser.add_argument('--hostname',  type=str, default=uuid, help='Specify instance hostname. FQDN. Hostname in full format with domain. Shortname would be trated as name.', required=False)
+    parser.add_argument('--skip-network', action='store_true', help='Do not generate network_data for the instance.', required=False)
+    parser.add_argument('--interface', type=str, default='ens3', help='Specify interface for instance to configure.', required=False)
+    parser.add_argument('--ssh-key', type=str, help='Specify ssh public key to upload to cloud image.', required=False)
+    parser.add_argument('--ssh-keys', type=str, help='Upload authorized_keys to cloud image. Specify path to file in authorized_keys format.', required=False)
+    parser.add_argument('--cloud-user-name', type=str, help='Specify cloud user name.', required=False)
+    parser.add_argument('--cloud-user-pass', type=str, help='Specify cloud user password.', required=False)
+    parser.add_argument('--ip', type=str, help='Specify IP address for instance.', required=False)
+    parser.add_argument('--netmask', type=str, help='Specify netmask for instance.', required=False)
+    parser.add_argument('--gateway', type=str, help='Specify gateway address for instance.', required=False)
+    parser.add_argument('--dns-nameservers', type=str, help='Specify DNS nameservers delimited by comma.', required=False)
+    parser.add_argument('--user-data', type=str, help='Specify path to user_data file in yaml format.', required=False)
+    parser.add_argument('--vendor-data', type=str, help='Specify path to vendor_data.json in openstack vendor metadata format.', required=False)
+    parser.add_argument('--network-data', type=str, help='Specify path to network_data.json in openstack network metadata format.', required=False)
+    parser.add_argument('--model', type=str, help='Specify path to cluster model.', required=False)
+    parser.add_argument('--mk-pipelines', type=str, help='Specify path to mk-pipelines folder.', required=False)
+    parser.add_argument('--pipeline-library', type=str, help='Specify path to pipeline-library folder.', required=False)
+    parser.add_argument('--clean-up', action='store_true', help='Clean-up config-drive dir once ISO is created.', required=False)
+    parser.add_argument('--quiet', action='store_true', help='Keep silence. Do not write any output messages to stout.', required=False)
+    args = parser.parse_args()
+
+    if len(argv) < 2:
+        parser.print_help()
+        exit(0)
+
+    validate_args(args)
+    create_config_drive(args)
diff --git a/config-drive/master_config.yaml b/config-drive/master_config.yaml
index 6c747fe..c008ed4 100644
--- a/config-drive/master_config.yaml
+++ b/config-drive/master_config.yaml
@@ -9,10 +9,6 @@
       [ -f /etc/cloud/master_environment_override ] && . /etc/cloud/master_environment_override
       export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"172.16.164.15"}
       export SALT_MASTER_MINION_ID=${SALT_MASTER_MINION_ID:-"cfg01.deploy-name.local"}
-      export DEPLOY_NETWORK_GW=${DEPLOY_NETWORK_GW:-"172.16.164.1"}
-      export DEPLOY_NETWORK_NETMASK=${DEPLOY_NETWORK_NETMASK:-"255.255.255.192"}
-      export DEPLOY_NETWORK_MTU=${DEPLOY_NETWORK_MTU:-"1500"}
-      export DNS_SERVERS=${DNS_SERVERS:-"8.8.8.8"}
 
       export http_proxy=${http_proxy:-""}
       export https_proxy=${https_proxy:-""}
@@ -20,7 +16,8 @@
       export MCP_VERSION=${MCP_VERSION:-"stable"}
 
       export PIPELINES_FROM_ISO=${PIPELINES_FROM_ISO:-"true"}
-      export PIPELINE_REPO_URL=${PIPELINE_REPO_URL:-"https://github.com/Mirantis"}
+      export MK_PIPELINES_URL=${MK_PIPELINES_URL:-"https://gerrit.mcp.mirantis.com/mk/mk-pipelines.git"}
+      export PIPELINE_LIB_URL=${PIPELINE_LIB_URL:-"https://gerrit.mcp.mirantis.com/mcp-ci/pipeline-library.git"}
 
       export MCP_SALT_REPO_URL=${MCP_SALT_REPO_URL:-"http://mirror.mirantis.com/"}
       export MCP_SALT_REPO=${MCP_SALT_REPO:-"deb [arch=amd64] $MCP_SALT_REPO_URL/$MCP_VERSION/salt-formulas/xenial xenial main"}
@@ -43,7 +40,7 @@
       maas ${PROFILE} package-repository update ${main_arch_id} "arches=amd64" || true
       # Remove stale notifications, which appear during sources configuration.
       for i in $(maas ${PROFILE} notifications read | jq ".[]| .id"); do
-       maas ${PROFILE} notification delete ${i} || true
+        maas ${PROFILE} notification delete ${i} || true
       done
     }
 
@@ -54,8 +51,7 @@
       curl -s ${MCP_SALT_REPO_KEY} | apt-key add -
       echo "${MCP_SALT_REPO}" > /etc/apt/sources.list.d/mcp_salt.list
       echo "${MCP_SALT_REPO_UPDATES}" >> /etc/apt/sources.list.d/mcp_salt.list
-      apt-get update
-      apt-get install -y salt-formula-*
+      apt update && apt install -y "${FORMULAS}"
 
       [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service
       for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
@@ -78,40 +74,27 @@
       done
     }
 
-    function process_network(){
-      echo "Configuring network interfaces"
-      find /etc/network/interfaces.d/ -type f -delete
-      kill $(pidof /sbin/dhclient) || /bin/true
-      envsubst < /root/interfaces > /etc/network/interfaces
-      ip a flush dev ens3
-      rm -f /var/run/network/ifstate.ens3
-      if [[ $(grep -E "^\ *gateway\ " /etc/network/interfaces) ]]; then
-      (ip r s | grep ^default) && ip r d default || /bin/true
-      fi;
-      ifup ens3
-    }
-
     function process_maas(){
       maas_cluster_enabled=$(salt-call --out=text pillar.get maas:cluster:enabled | awk '{print $2}' | tr "[:upper:]" "[:lower:]" )
       _region=$(salt-call --out=text pillar.get maas:region:enabled | awk '{print $2}' | tr "[:upper:]" "[:lower:]" )
       if ([ -f /opt/postgresql_in_docker ] && ([[ "${maas_cluster_enabled}" == "true" ]] || [[ "${_region}" == "true" ]])); then
         systemctl disable postgresql.service
         wait_for_postgresql
-        salt-call ${SALT_OPTS} state.sls postgresql.client
+        salt-call ${SALT_OPTS} state.apply postgresql.client
       else
         postgres_enabled=$(salt-call --out=text pillar.get postgresql:server:enabled | awk '{print $2}' | tr "[:upper:]" "[:lower:]")
         if [[ "${postgres_enabled}" == "true" ]]; then
-          salt-call ${SALT_OPTS} state.sls postgresql.server
+          salt-call ${SALT_OPTS} state.apply postgresql.server
         fi
       fi
 
       if [[ "${maas_cluster_enabled}" == "true" ]]; then
-        salt-call ${SALT_OPTS} state.sls maas.cluster
+        salt-call ${SALT_OPTS} state.apply maas.cluster
       else
         echo "WARNING: maas.cluster skipped!"
       fi
       if [[ "$_region" == "true" ]]; then
-        salt-call ${SALT_OPTS} state.sls maas.region
+        salt-call ${SALT_OPTS} state.apply maas.region
       else
         echo "WARNING: maas.region skipped!"
       fi
@@ -146,13 +129,13 @@
     function process_swarm() {
       _swarm=$(salt-call --out=text pillar.get docker:swarm:advertise_addr | awk '{print $2}')
       if [[ "${_swarm}" != "" ]]; then
-        salt-call ${SALT_OPTS} state.sls docker.swarm
+        salt-call ${SALT_OPTS} state.apply docker.swarm
       fi
       _docker=$(salt-call --out=text pillar.get docker:client:enabled | awk '{print $2}')
       if [[ "${_docker}" != "" ]]; then
-        salt-call ${SALT_OPTS} state.sls docker.client
+        salt-call ${SALT_OPTS} state.apply docker.client
       fi
-      salt-call ${SALT_OPTS} state.sls docker.client.images
+      salt-call ${SALT_OPTS} state.apply docker.client.images
     }
 
     function process_jenkins() {
@@ -162,13 +145,13 @@
         export JENKINS_HOME=/srv/volumes/jenkins
         _nginx=$(salt-call --out=text pillar.get nginx:server:enabled | awk '{print $2}')
         if [[ "${_nginx}" != "" ]]; then
-          salt-call ${SALT_OPTS} state.sls nginx
+          salt-call ${SALT_OPTS} state.apply nginx
         fi
         _jenabled=$(salt-call --out=text pillar.get docker:client:stack:jenkins | awk '{print $2}')
         _jclient=$(salt-call --out=text pillar.get jenkins:client | awk '{print $2}')
         if [[ "${_jenabled}" != "" && "${_jclient}" != "" ]]; then
           wait_for_jenkins
-          salt-call ${SALT_OPTS} state.sls jenkins.client
+          salt-call ${SALT_OPTS} state.apply jenkins.client
         fi
       else
         export JENKINS_HOME=/var/lib/jenkins
@@ -177,7 +160,7 @@
         wait_for_jenkins
         _jjobs=$(salt-call --out=text pillar.get jenkins:client:job | awk '{print $2}')
         if [[ "${_jjobs}" != "" ]]; then
-          salt-call ${SALT_OPTS} state.sls jenkins.client
+          salt-call ${SALT_OPTS} state.apply jenkins.client
         fi
         systemctl stop jenkins
         find ${JENKINS_HOME}/jenkins.model.JenkinsLocationConfiguration.xml -type f -print0 | xargs -0 sed -i -e "s/10.167.4.15/$SALT_MASTER_DEPLOY_IP/g"
@@ -186,40 +169,27 @@
       ssh-keyscan cfg01 > ${JENKINS_HOME}/.ssh/known_hosts && chmod a+r ${JENKINS_HOME}/.ssh/known_hosts || true
     }
 
-    failsafe_ssh_key(){
-      if [ -f /mnt/root_auth_keys ]; then
-        echo "Installing failsafe public ssh key from /mnt/root_auth_keys to /root/.ssh/authorized_keys"
-        install -m 0700 -d /root/.ssh
-        cat /mnt/root_auth_keys >> /root/.ssh/authorized_keys
-        chmod 600 /root/.ssh/authorized_keys
-        sed -i "s/^PermitRootLogin.*/PermitRootLogin yes/g" /etc/ssh/sshd_config
-        sed -i "s/^PasswordAuthentication.*/PasswordAuthentication yes/g" /etc/ssh/sshd_config
-        service ssh restart
-      fi
-    }
-
     function process_salt_base(){
       # PROD-21179| PROD-21792 : To describe such trick's around salt.XX state ordering
-      salt-call ${SALT_OPTS} state.sls salt.master
+      salt-call ${SALT_OPTS} state.apply salt.master
       # Wait for salt-master to wake up after restart
       sleep 5
       salt-call --timeout=120 test.ping
       # Run salt.minion.ca to prepare CA certificate before salt.minion.cert is used
-      salt-call ${SALT_OPTS} state.sls salt.minion.ca
+      salt-call ${SALT_OPTS} state.apply salt.minion.ca
       # Add sleep for completion of postponed restart of salt-minion. PROD-25986
       sleep 15
-      salt-call ${SALT_OPTS} state.sls salt.minion
+      salt-call ${SALT_OPTS} state.apply salt.minion
       # Wait for salt-minion to wake up after restart
       sleep 5
       salt-call --timeout=120 test.ping
-      salt-call ${SALT_OPTS} state.sls salt
-      salt-call ${SALT_OPTS} state.sls reclass
+      salt-call ${SALT_OPTS} state.apply salt
+      salt-call ${SALT_OPTS} state.apply reclass
     }
     #== Body ==================================================================#
 
     . /etc/cloud/master_environment
     printenv | sort -u
-    process_network
 
     echo "Preparing metadata model"
     if [[ -n "${CFG_BOOTSTRAP_DRIVE_URL}" ]]; then
@@ -227,24 +197,21 @@
       _tname="cfg01_${RANDOM}.iso"
       _wget_opts="--progress=dot:mega --waitretry=15 --retry-connrefused"
       wget ${_wget_opts} -O /${_tname} "${CFG_BOOTSTRAP_DRIVE_URL}"
-      mount -o loop /${_tname} /mnt/
+      mount -o loop /${_tname} /mnt
     else
-      mount /dev/cdrom /mnt/
+      mount $(blkid -t TYPE=iso9660 -o device) /mnt
     fi
-    cp -rT /mnt/model/model /srv/salt/reclass
-    chown -R root:root /srv/salt/reclass/* || true
-    chown -R root:root /srv/salt/reclass/.git* || true
-    chmod -R 644 /srv/salt/reclass/classes/cluster/* || true
-    chmod -R 644 /srv/salt/reclass/classes/system/*  || true
-
-    failsafe_ssh_key
+    rsync -av /mnt/mcp/model/ /srv/salt/reclass/
+    chown -R root:root /srv/salt/reclass/ || true
+    find /srv/salt/reclass/classes/ -type d -exec chmod 0755 {} \;
+    find /srv/salt/reclass/classes/ -type f -exec chmod 0644 {} \;
 
     echo "Configuring salt"
     envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
-    if [ -f /mnt/gpg/salt_master_pillar.asc ]; then
+    if [ -f /mnt/mcp/gpg/salt_master_pillar.asc ]; then
       mkdir /etc/salt/gpgkeys
-      chmod 700 /etc/salt/gpgkeys
-      GNUPGHOME=/etc/salt/gpgkeys gpg --import /mnt/gpg/salt_master_pillar.asc
+      chmod 0700 /etc/salt/gpgkeys
+      GNUPGHOME=/etc/salt/gpgkeys gpg --import /mnt/mcp/gpg/salt_master_pillar.asc
     fi
     enable_services
 
@@ -252,23 +219,21 @@
     salt-call --timeout=120 test.ping
 
     while true; do
-    salt-key | grep "$SALT_MASTER_MINION_ID" && break
+    salt-key | grep -w "$SALT_MASTER_MINION_ID" && break
       sleep 5
     done
 
     echo "updating local git repos"
     if [[ "$PIPELINES_FROM_ISO" == "true" ]] ; then
-      cp -r /mnt/mk-pipelines/* /home/repo/mk/mk-pipelines/
-      cp -r /mnt/pipeline-library/* /home/repo/mcp-ci/pipeline-library/
+      rsync -av /mnt/mcp/mk-pipelines/ /home/repo/mk/mk-pipelines/
+      rsync -av /mnt/mcp/pipeline-library/ /home/repo/mcp-ci/pipeline-library/
       umount /mnt || true
-      chown -R git:www-data /home/repo/mk/mk-pipelines/*
-      chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+      chown -R git:www-data /home/repo/mk/mk-pipelines /home/repo/mcp-ci/pipeline-library
     else
       umount /mnt || true
-      git clone --mirror "${PIPELINE_REPO_URL}/mk-pipelines.git" /home/repo/mk/mk-pipelines/
-      git clone --mirror "${PIPELINE_REPO_URL}/pipeline-library.git" /home/repo/mcp-ci/pipeline-library/
-      chown -R git:www-data /home/repo/mk/mk-pipelines/*
-      chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*
+      git clone --mirror "${MK_PIPELINES_URL}" /home/repo/mk/mk-pipelines/
+      git clone --mirror "${PIPELINE_LIB_URL}" /home/repo/mcp-ci/pipeline-library/
+      chown -R git:www-data /home/repo/mk/mk-pipelines /home/repo/mcp-ci/pipeline-library
     fi
 
     process_formulas
@@ -280,7 +245,7 @@
       exit 1
     fi
 
-    salt-call ${SALT_OPTS} state.sls linux.network,linux,openssh
+    salt-call ${SALT_OPTS} state.apply linux.system.repo,linux.network,linux.system,linux,openssh
     process_salt_base
 
 
@@ -303,6 +268,6 @@
     mkdir -p /var/log/mcp/
     touch /var/log/mcp/.bootstrap_done
     sync
-    reboot
+    shutdown -r now
 runcmd:
    - [bash, -cex, *master_config]
diff --git a/config-drive/mirror_config.yaml b/config-drive/mirror_config.yaml
index 55972b0..c4d24ba 100644
--- a/config-drive/mirror_config.yaml
+++ b/config-drive/mirror_config.yaml
@@ -8,8 +8,6 @@
   content: |
     [ -f /etc/cloud/mirror_environment_override ] && . /etc/cloud/mirror_environment_override
     export SALT_MASTER_DEPLOY_IP=${SALT_MASTER_DEPLOY_IP:-"10.1.0.14"}
-    export APTLY_DEPLOY_IP=${APTLY_DEPLOY_IP:-"10.1.0.14"}
-    export APTLY_DEPLOY_NETMASK=${APTLY_DEPLOY_NETMASK:-"255.255.0.0"}
     export APTLY_MINION_ID=${APTLY_MINION_ID:-"apt01.deploy-name.local"}
 
 mirror_config:
@@ -54,17 +52,6 @@
 
     . /etc/cloud/mirror_environment
 
-    echo "Configuring network interfaces"
-    find /etc/network/interfaces.d/ -type f -delete
-    kill $(pidof /sbin/dhclient) || /bin/true
-    envsubst < /root/interfaces > /etc/network/interfaces
-    ip a flush dev ens3
-    rm -f /var/run/network/ifstate.ens3
-    if [[ $(grep -E '^\ *gateway\ ' /etc/network/interfaces) ]]; then
-      (ip r s | grep ^default) && ip r d default || /bin/true
-    fi;
-    ifup ens3
-
     echo "Configuring salt"
     rm -f /etc/salt/pki/minion/minion_master.pub
     envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf