Update scripts for TryMCP deployment
* check network configuration before creating virsh networks;
* add salt-minion boot check
Change-Id: I3c1ac0f7e8e3a19e940995551be87d46fed5beda
Related-Prod: PROD-29507
Related-Prod: PROD-29539
diff --git a/predefine-vm/define-slave-vm.sh b/predefine-vm/define-slave-vm.sh
index 1fc494d..246bf6d 100755
--- a/predefine-vm/define-slave-vm.sh
+++ b/predefine-vm/define-slave-vm.sh
@@ -22,27 +22,28 @@
exit 1
fi
-check_packages "slave"
+prereq_check "slave"
+
+qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
+#### Make sure that disk saved to system path which is available for libvirt-qemu:kvm
+export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt_owned_dir ${SLAVE_VM_SOURCE_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
+
+### Create simple ISO file for a slave vm
+networkDataFileBaseName='network_data.json'
+networkDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "${networkDataFileBaseName}")
+contextFilePath=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "context_data.yml")
+allocationDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "allocation_data.yml")
+saltMasterIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'infra_config_deploy_address' | cut -f 2 -d ':' | tr -d ' ')
+clusterDomain=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cluster_domain:' | cut -f 2 -d ':' | tr -d ' ')
+aioIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_deploy_address:' | cut -f 2 -d ':' | tr -d ' ')
+aioHostname=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_hostname:' | cut -f 2 -d ':' | tr -d ' ')
+aioFailSafeUserKey=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_ssh_public_key:' | cut -f 2 -d ':' | sed 's/ //')
+aioFailSafeUser=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_user:' | cut -f 2 -d ':' | tr -d ' ')
+networkDataForSlave=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${networkDataFile} | sed -e "s/${saltMasterIp}/${aioIp}/g")
configDriveDir="$(dirname $0)/../config-drive"
pushd "${configDriveDir}"
-tmpDir=$(mktemp -d -p $(pwd))
-mount ${VM_CONFIG_DISK} ${tmpDir}
-contextFile=$(find ${tmpDir}/mcp -name context_data.yml)
-allocationDataFile=$(find ${tmpDir}/mcp -name allocation_data.yml)
-saltMasterIp=$(grep salt_master_management_address ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-clusterDomain=$(grep cluster_domain ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-aioIp=$(grep 'aio_node_deploy_address:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
-aioHostname=$(grep 'aio_node_hostname:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
-aioFailSafeUserKey=$(grep cfg_failsafe_ssh_public_key ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-aioFailSafeUser=$(grep cfg_failsafe_user ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
-networkDataFile=$(find ${tmpDir}/openstack -name network_data.json )
-networkDataFileBaseName=$(basename ${networkDataFile})
-cp ${networkDataFile} ./${networkDataFileBaseName}
-sed -i ${networkDataFileBaseName} -e "s/${saltMasterIp}/${aioIp}/g"
-umount ${tmpDir}
-rm -rf ${tmpDir}
-
+echo -e ${networkDataForSlave} > ${networkDataFileBaseName}
cat <<EOF > ./user_data
#cloud-config
output : { all : '| tee -a /var/log/cloud-init-output.log' }
@@ -78,6 +79,7 @@
[ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf
cat >> /etc/salt/minion.d/minion.conf << EOF
+ log_level: info
max_event_size: 100000000
acceptance_wait_time_max: 60
acceptance_wait_time: 10
@@ -87,7 +89,13 @@
recon_randomize: True
auth_timeout: 60
EOF
- service salt-minion restart
+
+ systemctl restart salt-minion
+ sleep 90
+ cat /var/log/salt/minion
+ sync
+
+
runcmd:
- 'if lvs vg0; then pvresize /dev/vda3; fi'
- 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi'
@@ -96,19 +104,15 @@
isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --quiet --clean-up"
if [[ -n "${aioFailSafeUser}" ]] && [[ -n "${aioFailSafeUserKey}" ]]; then
- isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-key ${aioFailSafeUserKey}"
+ echo "${aioFailSafeUserKey}" > "failSafeKey.pub"
+ isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-keys failSafeKey.pub"
fi
python ./create_config_drive.py ${isoArgs}
-qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
-if [ -z "${NON_DEFAULT_LIBVIRT_DIR}" ]; then
- #### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm
- export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt ${SLAVE_VM_SOURCE_DISK})
- export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt ${aioHostname}.${clusterDomain}-config.iso)
-fi
-export CREATE_NETWORKS=${CREATE_NETWORKS:-true}
+#### Make sure that iso file is saved to system path which is available for libvirt-qemu:kvm
+export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${aioHostname}.${clusterDomain}-config.iso ${NON_DEFAULT_LIBVIRT_DIR})
popd
-render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}" "${CREATE_NETWORKS}"
+render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}"
virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
virsh autostart ${SLAVE_VM_NAME}