blob: 1f4e8c896ba80f713c1a7896d079a6215f161198 [file] [log] [blame]
Denis Egorenkoc2656402019-04-19 18:17:50 +04001#!/bin/bash
2
Denis Egorenko0d045ae2019-06-04 18:23:11 +04003set -e
4
Denis Egorenkoc2656402019-04-19 18:17:50 +04005functionsFile="$(pwd)/functions.sh"
6
7if [[ ! -f ${functionsFile} ]]; then
8 echo "ERROR: Can not find 'functions' libfile (${functionsFile}), check your mcp/mcp-common-scripts repo."
9 exit 1
10else
11 source ${functionsFile}
12fi
13
14if [[ -z ${SLAVE_VM_NAME} ]]; then
15 echo "ERROR: \$SLAVE_VM_NAME not set!"
16 exit 1
17fi
18if [[ -z ${SLAVE_VM_SOURCE_DISK} ]] || [[ ! -f ${SLAVE_VM_SOURCE_DISK} ]]; then
19 echo "ERROR: \$SLAVE_VM_SOURCE_DISK not set, or file does not exist!"
20 exit 1
21fi
22if [[ -z ${VM_CONFIG_DISK} ]] || [[ ! -f ${VM_CONFIG_DISK} ]]; then
23 echo "ERROR: \$VM_CONFIG_DISK not set, or file does not exist!"
24 exit 1
25fi
26
Denis Egorenkoabe23c52019-05-30 16:53:55 +040027prereq_check "slave"
28
29qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G
30#### Make sure that disk saved to system path which is available for libvirt-qemu:kvm
31export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt_owned_dir ${SLAVE_VM_SOURCE_DISK} ${NON_DEFAULT_LIBVIRT_DIR})
32
33### Create simple ISO file for a slave vm
34networkDataFileBaseName='network_data.json'
35networkDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "${networkDataFileBaseName}")
36contextFilePath=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "context_data.yml")
37allocationDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "allocation_data.yml")
38saltMasterIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'infra_config_deploy_address' | cut -f 2 -d ':' | tr -d ' ')
39clusterDomain=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cluster_domain:' | cut -f 2 -d ':' | tr -d ' ')
40aioIp=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_deploy_address:' | cut -f 2 -d ':' | tr -d ' ')
41aioHostname=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_hostname:' | cut -f 2 -d ':' | tr -d ' ')
42aioFailSafeUserKey=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_ssh_public_key:' | cut -f 2 -d ':' | sed 's/ //')
43aioFailSafeUser=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${contextFilePath} | grep -w 'cfg_failsafe_user:' | cut -f 2 -d ':' | tr -d ' ')
44networkDataForSlave=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${networkDataFile} | sed -e "s/${saltMasterIp}/${aioIp}/g")
Denis Egorenkoc2656402019-04-19 18:17:50 +040045
46configDriveDir="$(dirname $0)/../config-drive"
47pushd "${configDriveDir}"
Denis Egorenkoabe23c52019-05-30 16:53:55 +040048echo -e ${networkDataForSlave} > ${networkDataFileBaseName}
Denis Egorenkoc2656402019-04-19 18:17:50 +040049cat <<EOF > ./user_data
50#cloud-config
51output : { all : '| tee -a /var/log/cloud-init-output.log' }
52growpart:
53 mode: auto
54 devices:
55 - '/'
56 - '/dev/vda3'
57 ignore_growroot_disabled: false
58write_files:
59 - content: |
60 root:
61 size: '70%VG'
62 var_log:
63 size: '10%VG'
64 var_log_audit:
65 size: '500M'
66 var_tmp:
67 size: '3000M'
68 tmp:
69 size: '500M'
70 owner: root:root
71 path: /usr/share/growlvm/image-layout.yml
72slave_boot:
73 - &slave_boot |
74 #!/bin/bash
75
76 # Redirect all outputs
77 exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1
78 set -xe
79
80 echo "Configuring Salt minion ..."
81 [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d
82 echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf
83 cat >> /etc/salt/minion.d/minion.conf << EOF
Denis Egorenkoabe23c52019-05-30 16:53:55 +040084 log_level: info
Denis Egorenkoc2656402019-04-19 18:17:50 +040085 max_event_size: 100000000
86 acceptance_wait_time_max: 60
87 acceptance_wait_time: 10
88 random_reauth_delay: 270
89 recon_default: 1000
90 recon_max: 60000
91 recon_randomize: True
92 auth_timeout: 60
93 EOF
Denis Egorenkoabe23c52019-05-30 16:53:55 +040094
95 systemctl restart salt-minion
96 sleep 90
97 cat /var/log/salt/minion
98 sync
99
100
Denis Egorenkoc2656402019-04-19 18:17:50 +0400101runcmd:
102 - 'if lvs vg0; then pvresize /dev/vda3; fi'
103 - 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi'
104 - [bash, -cex, *slave_boot]
105EOF
106
Denis Egorenkof6432c82019-04-26 19:19:04 +0400107isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --quiet --clean-up"
108if [[ -n "${aioFailSafeUser}" ]] && [[ -n "${aioFailSafeUserKey}" ]]; then
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400109 echo "${aioFailSafeUserKey}" > "failSafeKey.pub"
110 isoArgs="${isoArgs} --cloud-user-name ${aioFailSafeUser} --ssh-keys failSafeKey.pub"
Denis Egorenkof6432c82019-04-26 19:19:04 +0400111fi
Denis Egorenkoc2656402019-04-19 18:17:50 +0400112python ./create_config_drive.py ${isoArgs}
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400113#### Make sure that iso file is saved to system path which is available for libvirt-qemu:kvm
114export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${aioHostname}.${clusterDomain}-config.iso ${NON_DEFAULT_LIBVIRT_DIR})
Denis Egorenkoc2656402019-04-19 18:17:50 +0400115popd
116
Denis Egorenkoabe23c52019-05-30 16:53:55 +0400117render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}"
Denis Egorenkoc2656402019-04-19 18:17:50 +0400118
119virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
120virsh autostart ${SLAVE_VM_NAME}
Denis Egorenko0d045ae2019-06-04 18:23:11 +0400121
122openstackAIOFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "setup_aio.yml")
123openstackScheme=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${openstackAIOFile} | grep -w 'cluster_public_protocol' | cut -f 2 -d ':' | tr -d ' ')
124openstackAddress=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_address' | cut -f 2 -d ':' | tr -d ' ')
125secretsFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "infra/secrets.yml")
126openstackPassword=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${secretsFile} | grep -w 'keystone_admin_password_generated' | cut -f 2 -d ':' | tr -d ' ')
127echo "Once OpenStack deploy job is finished successfully OpenStack UI will be available on: ${openstackScheme}://${openstackAddress}:8078"
128echo "Login creds are: admin / ${openstackPassword}"