Update scripts for TryMCP deployment

* check network configuration before creating virsh networks;
* add salt-minion boot check

Change-Id: I3c1ac0f7e8e3a19e940995551be87d46fed5beda
Related-Prod: PROD-29507
Related-Prod: PROD-29539
diff --git a/predefine-vm/define-slave-vm.sh b/predefine-vm/define-slave-vm.sh
index 1fc494d..246bf6d 100755
--- a/predefine-vm/define-slave-vm.sh
+++ b/predefine-vm/define-slave-vm.sh
@@ -22,27 +22,28 @@
   exit 1
 fi
 
-check_packages "slave"
+prereq_check "slave"
+
+qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
+#### Make sure that disk saved to system path which is available for libvirt-qemu:kvm
+export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt_owned_dir ${SLAVE_VM_SOURCE_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
+
+### Create simple ISO file for a slave vm
+networkDataFileBaseName='network_data.json'
+networkDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "${networkDataFileBaseName}")
+contextFilePath=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "context_data.yml")
+allocationDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "allocation_data.yml")
+saltMasterIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'infra_config_deploy_address' | cut -f 2 -d ':' | tr -d ' ')
+clusterDomain=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cluster_domain:' | cut -f 2 -d ':' | tr -d ' ')
+aioIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_deploy_address:' | cut -f 2 -d ':' | tr -d ' ')
+aioHostname=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_hostname:' | cut -f 2 -d ':' | tr -d ' ')
+aioFailSafeUserKey=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_ssh_public_key:' | cut -f 2 -d ':' | sed 's/ //')
+aioFailSafeUser=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_user:' | cut -f 2 -d ':' | tr -d ' ')
+networkDataForSlave=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${networkDataFile} | sed -e "s/${saltMasterIp}/${aioIp}/g")
 
 configDriveDir="$(dirname $0)/../config-drive"
 pushd "${configDriveDir}"
-tmpDir=$(mktemp -d -p $(pwd))
-mount ${VM_CONFIG_DISK} ${tmpDir}
-contextFile=$(find ${tmpDir}/mcp -name context_data.yml)
-allocationDataFile=$(find ${tmpDir}/mcp -name allocation_data.yml)
-saltMasterIp=$(grep salt_master_management_address ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-clusterDomain=$(grep cluster_domain ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-aioIp=$(grep 'aio_node_deploy_address:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
-aioHostname=$(grep 'aio_node_hostname:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
-aioFailSafeUserKey=$(grep cfg_failsafe_ssh_public_key ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-aioFailSafeUser=$(grep cfg_failsafe_user ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-networkDataFile=$(find ${tmpDir}/openstack -name network_data.json )
-networkDataFileBaseName=$(basename ${networkDataFile})
-cp ${networkDataFile} ./${networkDataFileBaseName}
-sed -i ${networkDataFileBaseName} -e "s/${saltMasterIp}/${aioIp}/g"
-umount ${tmpDir}
-rm -rf ${tmpDir}
-
+echo -e ${networkDataForSlave} > ${networkDataFileBaseName}
 cat <<EOF > ./user_data
 #cloud-config
 output : { all : '| tee -a /var/log/cloud-init-output.log' }
@@ -78,6 +79,7 @@
     [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
     echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf
     cat >> /etc/salt/minion.d/minion.conf << EOF
+    log_level: info
     max_event_size: 100000000
     acceptance_wait_time_max: 60
     acceptance_wait_time: 10
@@ -87,7 +89,13 @@
     recon_randomize: True
     auth_timeout: 60
     EOF
-    service salt-minion restart
+
+    systemctl restart salt-minion
+    sleep 90
+    cat /var/log/salt/minion
+    sync
+
+
 runcmd:
   - 'if lvs vg0; then pvresize /dev/vda3; fi'
   - 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi'
@@ -96,19 +104,15 @@
 
 isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --quiet --clean-up"
 if [[ -n "${aioFailSafeUser}" ]] && [[ -n "${aioFailSafeUserKey}" ]]; then
-	isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-key ${aioFailSafeUserKey}"
+  echo "${aioFailSafeUserKey}" > "failSafeKey.pub"
+  isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-keys failSafeKey.pub"
 fi
 python ./create_config_drive.py ${isoArgs}
-qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
-if [ -z "${NON_DEFAULT_LIBVIRT_DIR}" ]; then
-  #### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm
-  export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt ${SLAVE_VM_SOURCE_DISK})
-  export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt ${aioHostname}.${clusterDomain}-config.iso)
-fi
-export CREATE_NETWORKS=${CREATE_NETWORKS:-true}
+#### Make sure that iso file is saved to system path which is available for libvirt-qemu:kvm
+export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${aioHostname}.${clusterDomain}-config.iso ${NON_DEFAULT_LIBVIRT_DIR})
 popd
 
-render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}" "${CREATE_NETWORKS}"
+render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}"
 
 virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
 virsh autostart ${SLAVE_VM_NAME}
diff --git a/predefine-vm/define-vm.sh b/predefine-vm/define-vm.sh
index a438dbf..5435607 100755
--- a/predefine-vm/define-vm.sh
+++ b/predefine-vm/define-vm.sh
@@ -22,15 +22,13 @@
   exit 1
 fi
 
-check_packages
+prereq_check
 
-if [ -z "${NON_DEFAULT_LIBVIRT_DIR}" ]; then
-  #### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm
-  export VM_SOURCE_DISK=$(place_file_under_libvirt ${VM_SOURCE_DISK})
-  export VM_CONFIG_DISK=$(place_file_under_libvirt ${VM_CONFIG_DISK})
-fi
-export CREATE_NETWORKS=${CREATE_NETWORKS:-true}
-render_config "${VM_NAME}" "${VM_MEM_KB}" "${VM_CPUS}" "${VM_SOURCE_DISK}" "${VM_CONFIG_DISK}" "${CREATE_NETWORKS}"
+#### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm
+export VM_SOURCE_DISK=$(place_file_under_libvirt_owned_dir ${VM_SOURCE_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
+export VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${VM_CONFIG_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
+
+render_config "${VM_NAME}" "${VM_MEM_KB}" "${VM_CPUS}" "${VM_SOURCE_DISK}" "${VM_CONFIG_DISK}"
 
 virsh define $(pwd)/${VM_NAME}-vm.xml
 virsh autostart ${VM_NAME}
diff --git a/predefine-vm/env_vars.sh b/predefine-vm/env_vars.sh
index 38141a9..8b744ab 100644
--- a/predefine-vm/env_vars.sh
+++ b/predefine-vm/env_vars.sh
@@ -4,6 +4,7 @@
 export VM_CTL_BRIDGE_NAME=${VM_CTL_BRIDGE_NAME:-"br-ctl"}
 export VM_MGM_NETWORK_NAME=${VM_MGM_NETWORK_NAME:-"mgm_network"}
 export VM_CTL_NETWORK_NAME=${VM_CTL_NETWORK_NAME:-"ctl_network"}
+export RECREATE_NETWORKS_IF_EXISTS=${RECREATE_NETWORKS_IF_EXISTS:-false}
 export VM_MEM_KB=${VM_MEM_KB:-"12589056"}
 export VM_CPUS=${VM_CPUS:-"4"}
 # optional params if you won't use bridge on host
diff --git a/predefine-vm/functions.sh b/predefine-vm/functions.sh
index 8880872..ec51f6d 100644
--- a/predefine-vm/functions.sh
+++ b/predefine-vm/functions.sh
@@ -19,6 +19,36 @@
     done
 }
 
+function check_bridge_exists {
+    local bridgeName=${1}
+    local optionName=${2}
+    local bridgeExists=$(brctl show | grep ${bridgeName})
+    if [ -z "${bridgeExists}" ]; then
+        echo "Option ${optionName} is set to False, which means using bridge ${bridgeName}, but it doesn't exist."
+        echo "Consider to switch to ${optionName}=True, which will lead to using local hosted networks."
+        echo "Or create bridge ${bridgeName} manually: https://docs.mirantis.com/mcp/q4-18/mcp-deployment-guide/deploy-mcp-drivetrain/prerequisites-dtrain.html"
+        exit 1
+    fi
+}
+
+function prereq_check {
+    local slave=${1}
+    check_packages "${slave}"
+    [[ "${VM_MGM_BRIDGE_DISABLE}" =~ [Ff]alse ]] && check_bridge_exists "${VM_MGM_BRIDGE_NAME}" "VM_MGM_BRIDGE_DISABLE"
+    [[ "${VM_CTL_BRIDGE_DISABLE}" =~ [Ff]alse ]] && check_bridge_exists "${VM_CTL_BRIDGE_NAME}" "VM_CTL_BRIDGE_DISABLE"
+    [[ -n "${NON_DEFAULT_LIBVIRT_DIR}" ]] && echo "All files will be saved under ${NON_DEFAULT_LIBVIRT_DIR} directory. Make sure that libvirt-qemu:kvm has access rights to that path."
+}
+
+function do_create_new_network {
+    local netName=${1}
+    local netExists=$(virsh net-list | grep ${netName})
+    if [ -n "${netExists}" ] && [[ "${RECREATE_NETWORKS_IF_EXISTS}" =~ [Ff]alse ]]; then
+        echo 'false'
+    else
+        echo 'true'
+    fi
+}
+
 function create_network {
     local network=${1}
     virsh net-destroy ${network} 2> /dev/null || true
@@ -31,14 +61,17 @@
 function create_bridge_network {
     local network=$1
     local bridge_name=$2
-    cat <<EOF > $(pwd)/${network}.xml
+    local createNetwork=$(do_create_new_network "${network}")
+    if [ "${createNetwork}" == 'true' ]; then
+        cat <<EOF > $(pwd)/${network}.xml
 <network>
   <name>${network}</name>
   <forward mode="bridge"/>
   <bridge name="${bridge_name}" />
 </network>
 EOF
-    create_network ${network}
+        create_network ${network}
+    fi
 }
 
 function create_host_network {
@@ -46,29 +79,32 @@
     local gateway=$2
     local netmask=$3
     local nat=${4:-false}
-    cat <<EOF > $(pwd)/${network}.xml
+    local createNetwork=$(do_create_new_network "${network}")
+    if [ "${createNetwork}" == 'true' ]; then
+        cat <<EOF > $(pwd)/${network}.xml
 <network>
   <name>${network}</name>
   <bridge name="${network}" />
   <ip address="${gateway}" netmask="${netmask}"/>
 EOF
-    if [[ "${nat}" =~ [Tt]rue ]]; then
-        cat <<EOF>> $(pwd)/${network}.xml
+        if [[ "${nat}" =~ [Tt]rue ]]; then
+            cat <<EOF>> $(pwd)/${network}.xml
   <forward mode="nat"/>
 EOF
-    fi
-    cat <<EOF>> $(pwd)/${network}.xml
+        fi
+        cat <<EOF>> $(pwd)/${network}.xml
 </network>
 EOF
-    create_network ${network}
+        create_network ${network}
+    fi
 }
 
-function place_file_under_libvirt() {
-  local libvirtPath="/var/lib/libvirt/images"
-  local image=${1}
-  local basenameFile=$(basename ${image})
-  cp "${image}" "${libvirtPath}/${basenameFile}"
-  chown -R libvirt-qemu:kvm "${libvirtPath}"
+function place_file_under_libvirt_owned_dir() {
+  local file=${1}
+  local libvirtPath=${2-'/var/lib/libvirt/images'}
+  local basenameFile=$(basename ${file})
+  cp "${file}" "${libvirtPath}/${basenameFile}"
+  chown libvirt-qemu:kvm "${libvirtPath}/${basenameFile}"
   echo "${libvirtPath}/${basenameFile}"
 }
 
@@ -78,7 +114,6 @@
   local vmCPUs=$3
   local vmSourceDisk=$4
   local vmConfigDisk=$5
-  local createNetworks=${6:-true}
   # Template definition
   cat <<EOF > $(pwd)/${vmName}-vm.xml
 <domain type='kvm'>
@@ -128,7 +163,7 @@
   fi
 
   if [[ "${VM_MGM_BRIDGE_DISABLE}" =~ [Ff]alse ]]; then
-      [[ "${createNetworks}" =~ [Tt]rue ]] && create_bridge_network "${VM_MGM_NETWORK_NAME}" "${VM_MGM_BRIDGE_NAME}"
+      create_bridge_network "${VM_MGM_NETWORK_NAME}" "${VM_MGM_BRIDGE_NAME}"
       cat <<EOF >> $(pwd)/${vmName}-vm.xml
     <interface type='bridge'>
       <source bridge='$VM_MGM_BRIDGE_NAME'/>
@@ -137,7 +172,7 @@
     </interface>
 EOF
   else
-      [[ "${createNetworks}" =~ [Tt]rue ]] && create_host_network "${VM_MGM_NETWORK_NAME}" "${VM_MGM_NETWORK_GATEWAY}" "${VM_MGM_NETWORK_MASK}" true
+      create_host_network "${VM_MGM_NETWORK_NAME}" "${VM_MGM_NETWORK_GATEWAY}" "${VM_MGM_NETWORK_MASK}" true
       cat <<EOF >> $(pwd)/${vmName}-vm.xml
     <interface type='network'>
       <source network='$VM_MGM_NETWORK_NAME'/>
@@ -148,7 +183,7 @@
 fi
 
   if [[ "${VM_MGM_BRIDGE_DISABLE}" =~ [Ff]alse ]]; then
-      [[ "${createNetworks}" =~ [Tt]rue ]] && create_bridge_network "${VM_CTL_NETWORK_NAME}" "${VM_CTL_BRIDGE_NAME}"
+      create_bridge_network "${VM_CTL_NETWORK_NAME}" "${VM_CTL_BRIDGE_NAME}"
       cat <<EOF >> $(pwd)/${vmName}-vm.xml
     <interface type='bridge'>
       <source bridge='$VM_CTL_BRIDGE_NAME'/>
@@ -157,7 +192,7 @@
     </interface>
 EOF
   else
-      [[ "${createNetworks}" =~ [Tt]rue ]] && create_host_network "${VM_CTL_NETWORK_NAME}" "${VM_CTL_NETWORK_GATEWAY}" "${VM_CTL_NETWORK_MASK}"
+      create_host_network "${VM_CTL_NETWORK_NAME}" "${VM_CTL_NETWORK_GATEWAY}" "${VM_CTL_NETWORK_MASK}"
       cat <<EOF >> $(pwd)/${vmName}-vm.xml
     <interface type='network'>
       <source network='$VM_CTL_NETWORK_NAME'/>