blob: fd60dcaafc6f202562b8bea0e3871396562d978b [file] [log] [blame]
Denis Egorenkoc2656402019-04-19 18:17:50 +04001#!/bin/bash
2
3functionsFile="$(pwd)/functions.sh"
4
5if [[ ! -f ${functionsFile} ]]; then
6 echo "ERROR: Can not find 'functions' libfile (${functionsFile}), check your mcp/mcp-common-scripts repo."
7 exit 1
8else
9 source ${functionsFile}
10fi
11
12if [[ -z ${SLAVE_VM_NAME} ]]; then
13 echo "ERROR: \$SLAVE_VM_NAME not set!"
14 exit 1
15fi
16if [[ -z ${SLAVE_VM_SOURCE_DISK} ]] || [[ ! -f ${SLAVE_VM_SOURCE_DISK} ]]; then
17 echo "ERROR: \$SLAVE_VM_SOURCE_DISK not set, or file does not exist!"
18 exit 1
19fi
20if [[ -z ${VM_CONFIG_DISK} ]] || [[ ! -f ${VM_CONFIG_DISK} ]]; then
21 echo "ERROR: \$VM_CONFIG_DISK not set, or file does not exist!"
22 exit 1
23fi
24
25check_packages "slave"
26
27configDriveDir="$(dirname $0)/../config-drive"
28pushd "${configDriveDir}"
29tmpDir=$(mktemp -d -p $(pwd))
30mount ${VM_CONFIG_DISK} ${tmpDir}
31contextFile=$(find ${tmpDir}/mcp -name context_data.yml)
32allocationDataFile=$(find ${tmpDir}/mcp -name allocation_data.yml)
33saltMasterIp=$(grep salt_master_management_address ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
34clusterDomain=$(grep cluster_domain ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
35aioIp=$(grep 'aio_node_deploy_address:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
36aioHostname=$(grep 'aio_node_hostname:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ')
37aioFailSafeUserKey=$(grep cfg_failsafe_ssh_public_key ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
38aioFailSafeUser=$(grep cfg_failsafe_user ${contextFile} | cut -f 2 -d ':' | tr -d ' ')
39networkDataFile=$(find ${tmpDir}/openstack -name network_data.json )
40networkDataFileBaseName=$(basename ${networkDataFile})
41cp ${networkDataFile} ./${networkDataFileBaseName}
42sed -i ${networkDataFileBaseName} -e "s/${saltMasterIp}/${aioIp}/g"
43umount ${tmpDir}
44rm -rf ${tmpDir}
45
46cat <<EOF > ./user_data
47#cloud-config
48output : { all : '| tee -a /var/log/cloud-init-output.log' }
49growpart:
50 mode: auto
51 devices:
52 - '/'
53 - '/dev/vda3'
54 ignore_growroot_disabled: false
55write_files:
56 - content: |
57 root:
58 size: '70%VG'
59 var_log:
60 size: '10%VG'
61 var_log_audit:
62 size: '500M'
63 var_tmp:
64 size: '3000M'
65 tmp:
66 size: '500M'
67 owner: root:root
68 path: /usr/share/growlvm/image-layout.yml
69slave_boot:
70 - &slave_boot |
71 #!/bin/bash
72
73 # Redirect all outputs
74 exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1
75 set -xe
76
77 echo "Configuring Salt minion ..."
78 [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
79 echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf
80 cat >> /etc/salt/minion.d/minion.conf << EOF
81 max_event_size: 100000000
82 acceptance_wait_time_max: 60
83 acceptance_wait_time: 10
84 random_reauth_delay: 270
85 recon_default: 1000
86 recon_max: 60000
87 recon_randomize: True
88 auth_timeout: 60
89 EOF
90 service salt-minion restart
91runcmd:
92 - 'if lvs vg0; then pvresize /dev/vda3; fi'
93 - 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi'
94 - [bash, -cex, *slave_boot]
95EOF
96
97isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --cloud-user-name ${aioFailSafeUser} --ssh-key ${aioFailSafeUserKey} --quiet --clean-up"
98python ./create_config_drive.py ${isoArgs}
99qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
100#### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm
101export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt ${SLAVE_VM_SOURCE_DISK})
102export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt ${aioHostname}.${clusterDomain}-config.iso)
103export CREATE_NETWORKS=${CREATE_NETWORKS:-true}
104popd
105
106render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}" "${CREATE_NETWORKS}"
107
108virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
109virsh autostart ${SLAVE_VM_NAME}