Added .gitreview

Change-Id: I2d40c493ee10ded61e416e79ceec8eb81ba64256
diff --git a/scripts/bootstrap_all.sh b/scripts/bootstrap_all.sh
new file mode 100755
index 0000000..7d9d3ad
--- /dev/null
+++ b/scripts/bootstrap_all.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -x
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+"$CWD"/fuel_config_verify.sh
+"$CWD"/fuel_infra_install.sh
+"$CWD"/openstack_infra_install.sh
+"$CWD"/openstack_control_install.sh
+"$CWD"/opencontrail_control_install.sh
+"$CWD"/stacklight_infra_install.sh
+"$CWD"/openstack_compute_install.sh
+"$CWD"/stacklight_monitor_install.sh
diff --git a/scripts/common_functions.sh b/scripts/common_functions.sh
new file mode 100755
index 0000000..66c4b88
--- /dev/null
+++ b/scripts/common_functions.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Function used to return list of node names according
+# to given string parameter match criteria
+function get_nodes_names {
+	# Enforce 1st parameter availability
+	if [ -z "$1" ]; then
+		match="[0-9]"
+	else
+		match="$1"
+	fi
+	salt-call pillar.get linux:network:host --out key | sed 's/:.*//' | grep "$match"
+}
+
+# Function used to wait for node availability
+# (aka answering to salt pings)
+# 1st parameter (mandatory) is number of nodes to wait for
+# 2nd parameter (optional) is nodes names to wait for
+#    (* = all nodes is default)
+function wait_for {
+	# Enforce 1st parameter availability
+	if [ -z "$1" ]; then
+		echo "wait_for function requires at least 1 parameter"
+		return 1
+	fi
+	if [ "$1" -lt "1" ]; then
+		echo "wait_for function requires 1st parameter to be number greater than 0 ($1 invalid)"
+		return 1
+	fi
+	wanted=$1
+	nodes=${2:-"*"}
+	# Default max waiting time is 5mn
+	MAX_WAIT=${MAX_WAIT:-300}
+	while true; do
+        nb_nodes=$(salt "$nodes" test.ping --out txt | grep -c True)
+		if [ -n "$nb_nodes" ] && [ "$nb_nodes" -eq "$wanted" ]; then
+			echo "All nodes are now answering to salt pings"
+			break
+		fi
+		MAX_WAIT=$(( MAX_WAIT - 15 ))
+		if [ $MAX_WAIT -le 0 ]; then
+			echo "Only $nb_nodes answering to salt pings out of $wanted after maximum timeout"
+			return 2
+		fi
+		echo -n "Only $nb_nodes answering to salt pings out of $wanted. Waiting a bit longer ..."
+		sleep 15
+		echo
+	done
+	return 0
+}
diff --git a/scripts/fuel_config_verify.sh b/scripts/fuel_config_verify.sh
new file mode 100755
index 0000000..dd1d159
--- /dev/null
+++ b/scripts/fuel_config_verify.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+# Import common functions
+COMMONS=$CWD/common_functions.sh
+if [ ! -f "$COMMONS" ]; then
+	echo "File $COMMONS does not exist"
+	exit 1
+fi
+. "$COMMONS"
+
+# Verify that Salt master is correctly bootstrapped
+salt-key
+reclass-salt --top
+
+# Verify that Salt minions are responding and have the same version as the master
+salt-call --version
+salt '*' test.version
diff --git a/scripts/fuel_infra_install.sh b/scripts/fuel_infra_install.sh
new file mode 100755
index 0000000..6166440
--- /dev/null
+++ b/scripts/fuel_infra_install.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# Refresh salt master config
+salt -C 'I@salt:master' state.sls salt.master,reclass
+
+# Refresh minion's pillar data
+salt '*' saltutil.refresh_pillar
+
+# Sync all salt resources
+salt '*' saltutil.sync_all
+
+sleep 5
+
+# Bootstrap all nodes
+salt "*" state.sls linux,openssh,salt.minion,ntp,rsyslog
diff --git a/scripts/opencontrail_control_install.sh b/scripts/opencontrail_control_install.sh
new file mode 100755
index 0000000..eca1773
--- /dev/null
+++ b/scripts/opencontrail_control_install.sh
@@ -0,0 +1,31 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+# Import common functions
+COMMONS=$CWD/common_functions.sh
+if [ ! -f "$COMMONS" ]; then
+	echo "File $COMMONS does not exist"
+	exit 1
+fi
+. "$COMMONS"
+
+# Install opencontrail database services
+salt -C 'I@opencontrail:database' state.sls opencontrail.database -b 1
+# Install opencontrail control services
+salt -C 'I@opencontrail:control' state.sls opencontrail -b 1
+
+# Provision opencontrail control services
+hosts=($(get_nodes_names "ctl[0-9]"))
+vip=$(salt-call pillar.get _param:cluster_vip_address | grep '^ ' | sed -e 's/  *//')
+nb=$(( ${#hosts[@]} - 1 ))
+for i in $(seq 0 $nb); do
+	h=${hosts[$i]}
+	ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/  *//')
+	salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_control.py --api_server_ip $vip --api_server_port 8082 --host_name $h --host_ip $ip --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add"
+done
+
+# Test opencontrail
+salt -C 'I@opencontrail:control' cmd.run "contrail-status"
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; neutron net-list; nova net-list"
diff --git a/scripts/openstack_compute_install.sh b/scripts/openstack_compute_install.sh
new file mode 100755
index 0000000..64597ca
--- /dev/null
+++ b/scripts/openstack_compute_install.sh
@@ -0,0 +1,32 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+# Import common functions
+COMMONS=$CWD/common_functions.sh
+if [ ! -f "$COMMONS" ]; then
+	echo "File $COMMONS does not exist"
+	exit 1
+fi
+. "$COMMONS"
+
+# Configure compute nodes
+salt "cmp*" state.apply
+salt "cmp*" state.apply
+
+# Provision opencontrail virtual routers
+hosts=($(salt-call pillar.get linux:network:host | egrep 'cmp0.*:' | sed -e 's/  *//' -e 's/://'))
+vip=$(salt-call pillar.get _param:cluster_vip_address | grep '^ ' | sed -e 's/  *//')
+nb=$(( ${#hosts[@]} - 1 ))
+for i in $(seq 0 $nb); do
+	h=${hosts[$i]}
+	ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/  *//')
+	salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_vrouter.py --host_name $h --host_ip $ip --api_server_ip $vip --oper add --admin_user admin --admin_password workshop --admin_tenant_name admin"
+done
+
+# Reboot compute nodes
+salt "cmp*" system.reboot
+
+# Wait for all compute nodes in current deployment to be available
+wait_for "$(get_nodes_names "cmp[0-9]" | wc -l)" "cmp*"
diff --git a/scripts/openstack_control_install.sh b/scripts/openstack_control_install.sh
new file mode 100755
index 0000000..8291844
--- /dev/null
+++ b/scripts/openstack_control_install.sh
@@ -0,0 +1,45 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# setup keystone service
+salt -C 'I@keystone:server' state.sls keystone.server -b 1
+# populate keystone services/tenants/admins
+salt -C 'I@keystone:client' state.sls keystone.client
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; keystone service-list"
+
+# Install glance and ensure glusterfs clusters
+salt -C 'I@glance:server' state.sls glance -b 1
+salt -C 'I@glance:server' state.sls glusterfs.client
+# Update fernet tokens before doing request on keystone server. Otherwise
+# you will get an error like:
+# "No encryption keys found; run keystone-manage fernet_setup to bootstrap one"
+salt -C 'I@keystone:server' state.sls keystone.server
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; glance image-list"
+
+# Install nova service
+salt -C 'I@nova:controller' state.sls nova -b 1
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; nova service-list"
+
+# Install cinder service
+salt -C 'I@cinder:controller' state.sls cinder -b 1
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; cinder list"
+
+# Install neutron service
+salt -C 'I@neutron:server' state.sls neutron -b 1
+salt -C 'I@neutron:gateway' state.sls neutron
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; neutron agent-list"
+
+# Install heat service
+salt -C 'I@heat:server' state.sls heat -b 1
+salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; heat resource-type-list"
+
+# Install horizon dashboard
+salt -C 'I@horizon:server' state.sls horizon
+salt -C 'I@nginx:server' state.sls nginx
+
+# Install ceilometer services
+salt -C 'I@ceilometer:server' state.sls ceilometer -b 1
+salt -C 'I@heka:ceilometer_collector:enabled:True' state.sls heka.ceilometer_collector
+
+# Install aodh services
+salt -C 'I@aodh:server' state.sls aodh -b 1
diff --git a/scripts/openstack_infra_install.sh b/scripts/openstack_infra_install.sh
new file mode 100755
index 0000000..ca56361
--- /dev/null
+++ b/scripts/openstack_infra_install.sh
@@ -0,0 +1,33 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# Install keepaliveds
+salt -C 'I@keepalived:cluster' state.sls keepalived -b 1
+# Check the VIPs
+salt -C 'I@keepalived:cluster' cmd.run "ip a | grep 172.16.10.2"
+
+# Install gluster
+salt -C 'I@glusterfs:server' state.sls glusterfs.server.service
+salt -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+# Check the gluster status
+salt -C 'I@glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+
+# Install rabbitmq
+salt -C 'I@rabbitmq:server' state.sls rabbitmq
+# Check the rabbitmq status
+salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+
+# Install galera
+salt -C 'I@galera:master' state.sls galera
+salt -C 'I@galera:slave' state.sls galera
+# Check galera status
+salt -C 'I@galera:master' mysql.status | grep -A1 wsrep_cluster_size
+salt -C 'I@galera:slave' mysql.status | grep -A1 wsrep_cluster_size
+
+# Install haproxy
+salt -C 'I@haproxy:proxy' state.sls haproxy
+salt -C 'I@haproxy:proxy' service.status haproxy
+salt -I 'haproxy:proxy' service.restart rsyslog
+
+# Install memcached
+salt -C 'I@memcached:server' state.sls memcached
diff --git a/scripts/stacklight_infra_install.sh b/scripts/stacklight_infra_install.sh
new file mode 100755
index 0000000..3769aa9
--- /dev/null
+++ b/scripts/stacklight_infra_install.sh
@@ -0,0 +1,15 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# Install the StackLight backends
+salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+salt -C 'I@influxdb:server' state.sls influxdb -b 1
+salt -C 'I@kibana:server' state.sls kibana.server -b 1
+salt -C 'I@grafana:server' state.sls grafana.server -b 1
+salt -C 'I@nagios:server' state.sls nagios.server
+salt -C 'I@elasticsearch:client' state.sls elasticsearch.client.service
+salt -C 'I@kibana:client' state.sls kibana.client.service
+salt -C 'I@kibana:client or I@elasticsearch:client' --async service.restart salt-minion
+sleep 10
+salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+salt -C 'I@kibana:client' state.sls kibana.client
diff --git a/scripts/stacklight_monitor_install.sh b/scripts/stacklight_monitor_install.sh
new file mode 100755
index 0000000..159ad04
--- /dev/null
+++ b/scripts/stacklight_monitor_install.sh
@@ -0,0 +1,52 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# Start by flusing Salt Mine to make sure it is clean
+# Also clean-up the grains files to make sure that we start from a clean state
+salt "*" mine.flush
+salt "*" file.remove /etc/salt/grains.d/collectd
+salt "*" file.remove /etc/salt/grains.d/grafana
+salt "*" file.remove /etc/salt/grains.d/heka
+salt "*" file.remove /etc/salt/grains
+
+# Install collectd and heka services on the nodes, this will also generate the
+# metadata that goes into the grains and eventually into Salt Mine
+salt "*" state.sls collectd
+salt "*" state.sls heka
+
+# Gather the Grafana metadata as grains
+salt -C 'I@grafana:collector' state.sls grafana.collector
+
+# Update Salt Mine
+salt "*" state.sls salt.minion.grains
+salt "*" saltutil.refresh_modules
+salt "*" mine.update
+
+sleep 5
+
+# Update Heka
+salt -C 'I@heka:aggregator:enabled:True or I@heka:remote_collector:enabled:True' state.sls heka
+
+# Update collectd
+salt -C 'I@collectd:remote_client:enabled:True' state.sls collectd
+
+# Update Nagios
+salt -C 'I@nagios:server' state.sls nagios
+# Stop the Nagios service because the package starts it by default and it will
+# started later only on the node holding the VIP address
+salt -C 'I@nagios:server' service.stop nagios3
+
+# Finalize the configuration of Grafana (add the dashboards...)
+salt -C 'I@grafana:client' state.sls grafana.client.service
+salt -C 'I@grafana:client' --async service.restart salt-minion; sleep 10
+salt -C 'I@grafana:client' state.sls grafana.client
+
+# Get the StackLight monitoring VIP addres
+vip=$(salt-call pillar.data _param:stacklight_monitor_address --out key|grep _param: |awk '{print $2}')
+vip=${vip:=172.16.10.253}
+
+# (re)Start manually the services that are bound to the monitoring VIP
+salt -G "ipv4:$vip" service.restart remote_collectd
+salt -G "ipv4:$vip" service.restart remote_collector
+salt -G "ipv4:$vip" service.restart aggregator
+salt -G "ipv4:$vip" service.restart nagios3