blob: 246bf6dc293dbba739a24b132f00dbc7f6ae03e9 [file] [log] [blame]
Denis Egorenkoc2656402019-04-19 18:17:50 +04001#!/bin/bash
2
3functionsFile="$(pwd)/functions.sh"
4
5if [[ ! -f ${functionsFile} ]]; then
6 echo "ERROR: Can not find 'functions' libfile (${functionsFile}), check your mcp/mcp-common-scripts repo."
7 exit 1
8else
9 source ${functionsFile}
10fi
11
12if [[ -z ${SLAVE_VM_NAME} ]]; then
13 echo "ERROR: \$SLAVE_VM_NAME not set!"
14 exit 1
15fi
16if [[ -z ${SLAVE_VM_SOURCE_DISK} ]] || [[ ! -f ${SLAVE_VM_SOURCE_DISK} ]]; then
17 echo "ERROR: \$SLAVE_VM_SOURCE_DISK not set, or file does not exist!"
18 exit 1
19fi
20if [[ -z ${VM_CONFIG_DISK} ]] || [[ ! -f ${VM_CONFIG_DISK} ]]; then
21 echo "ERROR: \$VM_CONFIG_DISK not set, or file does not exist!"
22 exit 1
23fi
24
Denis Egorenkoabe23c52019-05-30 16:53:55 +040025prereq_check "slave"
26
27qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
28#### Make sure that disk saved to system path which is available for libvirt-qemu:kvm
29export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt_owned_dir ${SLAVE_VM_SOURCE_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
30
31### Create simple ISO file for a slave vm
32networkDataFileBaseName='network_data.json'
33networkDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "${networkDataFileBaseName}")
34contextFilePath=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "context_data.yml")
35allocationDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "allocation_data.yml")
36saltMasterIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'infra_config_deploy_address' | cut -f 2 -d ':' | tr -d ' ')
37clusterDomain=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cluster_domain:' | cut -f 2 -d ':' | tr -d ' ')
38aioIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_deploy_address:' | cut -f 2 -d ':' | tr -d ' ')
39aioHostname=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_hostname:' | cut -f 2 -d ':' | tr -d ' ')
40aioFailSafeUserKey=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_ssh_public_key:' | cut -f 2 -d ':' | sed 's/ //')
41aioFailSafeUser=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_user:' | cut -f 2 -d ':' | tr -d ' ')
42networkDataForSlave=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${networkDataFile} | sed -e "s/${saltMasterIp}/${aioIp}/g")
Denis Egorenkoc2656402019-04-19 18:17:50 +040043
44configDriveDir="$(dirname $0)/../config-drive"
45pushd "${configDriveDir}"
Denis Egorenkoabe23c52019-05-30 16:53:55 +040046echo -e ${networkDataForSlave} > ${networkDataFileBaseName}
Denis Egorenkoc2656402019-04-19 18:17:50 +040047cat <<EOF > ./user_data
48#cloud-config
49output : { all : '| tee -a /var/log/cloud-init-output.log' }
50growpart:
51 mode: auto
52 devices:
53 - '/'
54 - '/dev/vda3'
55 ignore_growroot_disabled: false
56write_files:
57 - content: |
58 root:
59 size: '70%VG'
60 var_log:
61 size: '10%VG'
62 var_log_audit:
63 size: '500M'
64 var_tmp:
65 size: '3000M'
66 tmp:
67 size: '500M'
68 owner: root:root
69 path: /usr/share/growlvm/image-layout.yml
70slave_boot:
71 - &slave_boot |
72 #!/bin/bash
73
74 # Redirect all outputs
75 exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1
76 set -xe
77
78 echo "Configuring Salt minion ..."
79 [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
80 echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf
81 cat >> /etc/salt/minion.d/minion.conf << EOF
Denis Egorenkoabe23c52019-05-30 16:53:55 +040082 log_level: info
Denis Egorenkoc2656402019-04-19 18:17:50 +040083 max_event_size: 100000000
84 acceptance_wait_time_max: 60
85 acceptance_wait_time: 10
86 random_reauth_delay: 270
87 recon_default: 1000
88 recon_max: 60000
89 recon_randomize: True
90 auth_timeout: 60
91 EOF
Denis Egorenkoabe23c52019-05-30 16:53:55 +040092
93 systemctl restart salt-minion
94 sleep 90
95 cat /var/log/salt/minion
96 sync
97
98
Denis Egorenkoc2656402019-04-19 18:17:50 +040099runcmd:
100 - 'if lvs vg0; then pvresize /dev/vda3; fi'
101 - 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi'
102 - [bash, -cex, *slave_boot]
103EOF
104
Denis Egorenkof6432c82019-04-26 19:19:04 +0400105isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --quiet --clean-up"
106if [[ -n "${aioFailSafeUser}" ]] && [[ -n "${aioFailSafeUserKey}" ]]; then
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400107 echo "${aioFailSafeUserKey}" > "failSafeKey.pub"
108 isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-keys failSafeKey.pub"
Denis Egorenkof6432c82019-04-26 19:19:04 +0400109fi
Denis Egorenkoc2656402019-04-19 18:17:50 +0400110python ./create_config_drive.py ${isoArgs}
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400111#### Make sure that iso file is saved to system path which is available for libvirt-qemu:kvm
112export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${aioHostname}.${clusterDomain}-config.iso ${NON_DEFAULT_LIBVIRT_DIR})
Denis Egorenkoc2656402019-04-19 18:17:50 +0400113popd
114
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400115render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}"
Denis Egorenkoc2656402019-04-19 18:17:50 +0400116
117virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
118virsh autostart ${SLAVE_VM_NAME}