Update deployment scripts for TryMCP

Introduce 3 Ethernet controller for slave AIO node to support
public floating address for host and internet access from vms.

Change-Id: Idf0c423559d1bf9a13617760295bc0c943800fdb
Related-Prod: TRYMCP-6
diff --git a/predefine-vm/README.rst b/predefine-vm/README.rst
index 2438818..3351bb1 100644
--- a/predefine-vm/README.rst
+++ b/predefine-vm/README.rst
@@ -59,6 +59,11 @@
       You can download and use the following image: http://images.mcp.mirantis.net/ubuntu-16-04-x64-mcp2019.2.0.qcow2
     * SLAVE_VM_MEM_KB - amount of RAM for VM in KB. Default is: 16777216
     * SLAVE_VM_CPUS - amount of CPUs to use. Default is: 4.
+    * VM_PUBLIC_NET_NEEDED - create or not additional public interface for instance, which is used to provide
+    public addresses for OpenStack. Default is: false. But for slave AIO node can not be overrided and set to true.
+    * VM_PUBLIC_BRIDGE_NAME - Bridge name to use for public network. Optional, default: 'br-pub'
+    * VM_PUBLIC_BRIDGE_DISABLE - Do not use host bridge for public OpenStack network and create host-only based new network. Optional, default: false
+    * VM_PUBLIC_NETWORK_NAME - Name for public OpenStack network. Optional, default: 'pub_network'
 
 Next parameters should be same as for cfg01 node:
 
@@ -70,13 +75,19 @@
     * VM_MGM_NETWORK_NAME
     * VM_CTL_NETWORK_NAME
 
-Also once you setup cfg01 setup the next parameter: export CREATE_NEWORKS=false
-This parameter will disable network recreation, which can be needed in case of changing network setup.
+Next parameter:
+
+    * RECREATE_NETWORKS_IF_EXISTS
+
+will disable network recreation, which can be needed in case of changing network setup.
+By default set to `false`, to prevent network recreation in case of rebuilding nodes. If
+you need to recreate networks set it to true or manually destroy existing networks.
 
 Also if you are not going to use system bridges, set next parameters to true:
 
-    * VM_MGM_BRIDGE_DISABLE=true
-    * VM_CTL_BRIDGE_DISABLE=true
+    * export VM_MGM_BRIDGE_DISABLE=true
+    * export VM_CTL_BRIDGE_DISABLE=true
+    * export VM_PUBLIC_BRIDGE_DISABLE=true
 
 This will switch using to locally created virsh networks.
 
diff --git a/predefine-vm/define-slave-vm.sh b/predefine-vm/define-slave-vm.sh
index 129a140..b9c3df6 100755
--- a/predefine-vm/define-slave-vm.sh
+++ b/predefine-vm/define-slave-vm.sh
@@ -114,14 +114,15 @@
 export SLAVE_VM_CONFIG_DISK=$(place_file_under_libvirt_owned_dir ${aioHostname}.${clusterDomain}-config.iso ${NON_DEFAULT_LIBVIRT_DIR})
 popd
 
+export VM_PUBLIC_NET_NEEDED=true
 render_config "${SLAVE_VM_NAME}" "${SLAVE_VM_MEM_KB}" "${SLAVE_VM_CPUS}" "${SLAVE_VM_SOURCE_DISK}" "${SLAVE_VM_CONFIG_DISK}"
 
 virsh define $(pwd)/${SLAVE_VM_NAME}-vm.xml
 virsh autostart ${SLAVE_VM_NAME}
 
-openstackAIOFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "setup_aio.yml")
+openstackAIOFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "openstack_aio/setup_aio.yml")
 openstackScheme=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${openstackAIOFile} | grep -w 'cluster_public_protocol' | cut -f 2 -d ':' | tr -d ' ')
-openstackAddress=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'aio_node_address' | cut -f 2 -d ':' | tr -d ' ')
+openstackAddress=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'cluster_public_host' | cut -f 2 -d ':' | tr -d ' ')
 secretsFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "infra/secrets.yml")
 openstackPassword=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${secretsFile} | grep -E -w 'keystone_admin_password(_generated)?' | cut -f 2 -d ':' | tr -d ' ')
 echo "Once OpenStack deploy job is finished successfully OpenStack UI will be available on: ${openstackScheme}://${openstackAddress}:8078"
diff --git a/predefine-vm/define-vm.sh b/predefine-vm/define-vm.sh
index 05c0a8a..98400dc 100755
--- a/predefine-vm/define-vm.sh
+++ b/predefine-vm/define-vm.sh
@@ -37,6 +37,6 @@
 
 allocationDataFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep -w "allocation_data.yml")
 secretsFile=$(isoinfo -i ${VM_CONFIG_DISK} -J -f | grep "infra/secrets.yml")
-cfgJenkinsPassword=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${secretsFile} | grep 'jenkins_cfg_admin_password_generated')
-cfgJenkinsAddress=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep 'infra_config_deploy_address')
+cfgJenkinsPassword=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${secretsFile} | grep 'jenkins_cfg_admin_password_generated' | cut -f 2 -d ':' | tr -d ' ')
+cfgJenkinsAddress=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep 'infra_config_deploy_address' | cut -f 2 -d ':' | tr -d ' ')
 echo "Once deployed, Jenkins will be available via: http://${cfgJenkinsAddress}:8081 and login creds are: admin / ${cfgJenkinsPassword}"
\ No newline at end of file
diff --git a/predefine-vm/env_vars.sh b/predefine-vm/env_vars.sh
index ff29cbf..d996b6a 100644
--- a/predefine-vm/env_vars.sh
+++ b/predefine-vm/env_vars.sh
@@ -7,7 +7,12 @@
 export RECREATE_NETWORKS_IF_EXISTS=${RECREATE_NETWORKS_IF_EXISTS:-false}
 export VM_MEM_KB=${VM_MEM_KB:-"12589056"}
 export VM_CPUS=${VM_CPUS:-"4"}
-# optional params if you want enable slave vm
+# Params if you want deploy slave vm AIO node
 export SLAVE_VM_MEM_KB=${SLAVE_VM_MEM_KB:-"16777216"}
 export SLAVE_VM_CPUS=${SLAVE_VM_CPUS:-"4"}
-export SLAVE_VM_DISK_SIZE=${SLAVE_VM_DISK_SIZE:-"50G"}
\ No newline at end of file
+export SLAVE_VM_DISK_SIZE=${SLAVE_VM_DISK_SIZE:-"50G"}
+export VM_PUBLIC_BRIDGE_DISABLE=${VM_PUBLIC_BRIDGE_DISABLE:-false}
+export VM_PUBLIC_BRIDGE_NAME=${VM_PUBLIC_BRIDGE_NAME:-"br-pub"}
+export VM_PUBLIC_NETWORK_NAME=${VM_PUBLIC_NETWORK_NAME:-"pub_network"}
+# will be auto set to true for slave AIO deployment
+export VM_PUBLIC_NET_NEEDED=${VM_PUBLIC_NET_NEEDED:-false}
\ No newline at end of file
diff --git a/predefine-vm/functions.sh b/predefine-vm/functions.sh
index d5b1cec..b0d7976 100644
--- a/predefine-vm/functions.sh
+++ b/predefine-vm/functions.sh
@@ -10,6 +10,20 @@
   source ${envFile}
 fi
 
+function count_netmask {
+    local network=$1
+    local cidr=$(echo $network | cut -f 2 -d '/')
+    local ones="printf '1%.0s' {1..${cidr}}"
+    local zeros="printf '0%.0s' {1..$(( 32 - ${cidr} ))}"
+    local netmask_binary="$(echo $ones | bash)$(echo $zeros | bash)"
+    local netmask_decimal=""
+    for i in 0 8 16 24; do
+        netmask_decimal+="$(echo $((2#${netmask_binary:${i}:8})))"
+        [[ "${i}" != '24' ]] && netmask_decimal+='.'
+    done
+    echo "${netmask_decimal}"
+}
+
 function check_packages {
     local slave=$1
     local packages="libvirt-bin qemu-kvm"
@@ -215,6 +229,31 @@
 EOF
 fi
 
+  if [[ "${VM_PUBLIC_NET_NEEDED}" =~ [Tt]rue ]]; then
+    if [[ "${VM_PUBLIC_BRIDGE_DISABLE}" =~ [Ff]alse ]]; then
+        create_bridge_network "${VM_PUBLIC_NETWORK_NAME}" "${VM_PUBLIC_BRIDGE_NAME}"
+        cat <<EOF >> $(pwd)/${vmName}-vm.xml
+    <interface type='bridge'>
+      <source bridge='$VM_PUBLIC_BRIDGE_NAME'/>
+      <model type='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </interface>
+EOF
+    else
+        local vmPublicNetworkGateway=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'openstack_public_neutron_subnet_gateway' | cut -f 2 -d ':' | tr -d ' ')
+        local vmPublicNetworkCIDR=$(isoinfo -i ${VM_CONFIG_DISK} -J -x ${allocationDataFile} | grep -w 'openstack_public_neutron_subnet_cidr' | cut -f 2 -d ':' | tr -d ' ')
+        local vmPublicNetworkMask=$(count_netmask "${vmPublicNetworkCIDR}")
+        create_host_network "${VM_PUBLIC_NETWORK_NAME}" "${vmPublicNetworkGateway}" "${vmPublicNetworkMask}" true
+        cat <<EOF >> $(pwd)/${vmName}-vm.xml
+    <interface type='network'>
+      <source network='$VM_PUBLIC_NETWORK_NAME'/>
+      <model type='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </interface>
+EOF
+    fi
+fi
+
   cat <<EOF >> $(pwd)/${vmName}-vm.xml
     <serial type='pty'>
       <source path='/dev/pts/1'/>