Denis Egorenko | c265640 | 2019-04-19 18:17:50 +0400 | [diff] [blame^] | 1 | #!/bin/bash |
| 2 | |
| 3 | functionsFile="$(pwd)/functions.sh" |
| 4 | |
| 5 | if [[ ! -f ${functionsFile} ]]; then |
| 6 | echo "ERROR: Can not find 'functions' libfile (${functionsFile}), check your mcp/mcp-common-scripts repo." |
| 7 | exit 1 |
| 8 | else |
| 9 | source ${functionsFile} |
| 10 | fi |
| 11 | |
| 12 | if [[ -z ${SLAVE_VM_NAME} ]]; then |
| 13 | echo "ERROR: \$SLAVE_VM_NAME not set!" |
| 14 | exit 1 |
| 15 | fi |
| 16 | if [[ -z ${SLAVE_VM_SOURCE_DISK} ]] || [[ ! -f ${SLAVE_VM_SOURCE_DISK} ]]; then |
| 17 | echo "ERROR: \$SLAVE_VM_SOURCE_DISK not set, or file does not exist!" |
| 18 | exit 1 |
| 19 | fi |
| 20 | if [[ -z ${VM_CONFIG_DISK} ]] || [[ ! -f ${VM_CONFIG_DISK} ]]; then |
| 21 | echo "ERROR: \$VM_CONFIG_DISK not set, or file does not exist!" |
| 22 | exit 1 |
| 23 | fi |
| 24 | |
| 25 | check_packages "slave" |
| 26 | |
| 27 | configDriveDir="$(dirname $0)/../config-drive" |
| 28 | pushd "${configDriveDir}" |
| 29 | tmpDir=$(mktemp -d -p $(pwd)) |
| 30 | mount ${VM_CONFIG_DISK} ${tmpDir} |
| 31 | contextFile=$(find ${tmpDir}/mcp -name context_data.yml) |
| 32 | allocationDataFile=$(find ${tmpDir}/mcp -name allocation_data.yml) |
| 33 | saltMasterIp=$(grep salt_master_management_address ${contextFile} | cut -f 2 -d ':' | tr -d ' ') |
| 34 | clusterDomain=$(grep cluster_domain ${contextFile} | cut -f 2 -d ':' | tr -d ' ') |
| 35 | aioIp=$(grep 'aio_node_deploy_address:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ') |
| 36 | aioHostname=$(grep 'aio_node_hostname:' ${allocationDataFile} | cut -f 2 -d ':' | tr -d ' ') |
| 37 | aioFailSafeUserKey=$(grep cfg_failsafe_ssh_public_key ${contextFile} | cut -f 2 -d ':' | tr -d ' ') |
| 38 | aioFailSafeUser=$(grep cfg_failsafe_user ${contextFile} | cut -f 2 -d ':' | tr -d ' ') |
| 39 | networkDataFile=$(find ${tmpDir}/openstack -name network_data.json ) |
| 40 | networkDataFileBaseName=$(basename ${networkDataFile}) |
| 41 | cp ${networkDataFile} ./${networkDataFileBaseName} |
| 42 | sed -i ${networkDataFileBaseName} -e "s/${saltMasterIp}/${aioIp}/g" |
| 43 | umount ${tmpDir} |
| 44 | rm -rf ${tmpDir} |
| 45 | |
| 46 | cat <<EOF > ./user_data |
| 47 | #cloud-config |
| 48 | output : { all : '| tee -a /var/log/cloud-init-output.log' } |
| 49 | growpart: |
| 50 | mode: auto |
| 51 | devices: |
| 52 | - '/' |
| 53 | - '/dev/vda3' |
| 54 | ignore_growroot_disabled: false |
| 55 | write_files: |
| 56 | - content: | |
| 57 | root: |
| 58 | size: '70%VG' |
| 59 | var_log: |
| 60 | size: '10%VG' |
| 61 | var_log_audit: |
| 62 | size: '500M' |
| 63 | var_tmp: |
| 64 | size: '3000M' |
| 65 | tmp: |
| 66 | size: '500M' |
| 67 | owner: root:root |
| 68 | path: /usr/share/growlvm/image-layout.yml |
| 69 | slave_boot: |
| 70 | - &slave_boot | |
| 71 | #!/bin/bash |
| 72 | |
| 73 | # Redirect all outputs |
| 74 | exec > >(tee -i /tmp/cloud-init-bootstrap.log) 2>&1 |
| 75 | set -xe |
| 76 | |
| 77 | echo "Configuring Salt minion ..." |
| 78 | [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d |
| 79 | echo -e "id: ${aioHostname}.${clusterDomain}\nmaster: ${saltMasterIp}" > /etc/salt/minion.d/minion.conf |
| 80 | cat >> /etc/salt/minion.d/minion.conf << EOF |
| 81 | max_event_size: 100000000 |
| 82 | acceptance_wait_time_max: 60 |
| 83 | acceptance_wait_time: 10 |
| 84 | random_reauth_delay: 270 |
| 85 | recon_default: 1000 |
| 86 | recon_max: 60000 |
| 87 | recon_randomize: True |
| 88 | auth_timeout: 60 |
| 89 | EOF |
| 90 | service salt-minion restart |
| 91 | runcmd: |
| 92 | - 'if lvs vg0; then pvresize /dev/vda3; fi' |
| 93 | - 'if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi' |
| 94 | - [bash, -cex, *slave_boot] |
| 95 | EOF |
| 96 | |
| 97 | isoArgs="--name ${aioHostname} --hostname ${aioHostname}.${clusterDomain} --user-data $(pwd)/user_data --network-data $(pwd)/${networkDataFileBaseName} --cloud-user-name ${aioFailSafeUser} --ssh-key ${aioFailSafeUserKey} --quiet --clean-up" |
| 98 | python ./create_config_drive.py ${isoArgs} |
| 99 | qemu-img resize ${SLAVE_VM_SOURCE_DISK} 80G |
| 100 | #### Make sure that both files are saved to system path which is available for libvirt-qemu:kvm |
| 101 | export SLAVE_VM_SOURCE_DISK=$(place_file_under_libvirt ${SLAVE_VM_SOURCE_DISK}) |
| 102 | export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt ${aioHostname}.${clusterDomain}-config.iso) |
| 103 | export CREATE_NETWORKS=${CREATE_NETWORKS:-true} |
| 104 | popd |
| 105 | |
| 106 | render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}" "${CREATE_NETWORKS}" |
| 107 | |
| 108 | virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml |
| 109 | virsh autostart ${SLAVE_VM_NAME} |