Merge "Sync scripts from stacklight-model"
diff --git a/scripts/bootstrap_all.sh b/scripts/bootstrap_all.sh
index 7d9d3ad..baa7584 100755
--- a/scripts/bootstrap_all.sh
+++ b/scripts/bootstrap_all.sh
@@ -2,11 +2,31 @@
CWD="$(dirname "$(readlink -f "$0")")"
-"$CWD"/fuel_config_verify.sh
-"$CWD"/fuel_infra_install.sh
-"$CWD"/openstack_infra_install.sh
-"$CWD"/openstack_control_install.sh
-"$CWD"/opencontrail_control_install.sh
-"$CWD"/stacklight_infra_install.sh
-"$CWD"/openstack_compute_install.sh
-"$CWD"/stacklight_monitor_install.sh
+K8S_BOOTSTRAP=$(salt -C 'I@kubernetes:master' test.ping 1>/dev/null 2>&1 && echo true)
+OPENSTACK_BOOTSTRAP=$(salt -C 'I@nova' test.ping 1>/dev/null 2>&1 && echo true)
+OPENCONTRAIL_BOOTSTRAP=$(salt -C 'I@opencontrail:control' test.ping 1>/dev/null 2>&1 && echo true)
+STACKLIGHTV2_BOOTSTRAP=$(salt -C 'I@prometheus:server' test.ping 1>/dev/null 2>&1 && echo true)
+
+"$CWD"/config_verify.sh
+"$CWD"/infra_install.sh
+"$CWD"/core_services_install.sh
+if [[ "$STACKLIGHTV2_BOOTSTRAP" == "true" ]]; then
+ "$CWD"/docker_swarm_install.sh
+fi
+if [[ "$K8S_BOOTSTRAP" == "true" ]]; then
+ "$CWD"/kubernetes_install.sh
+fi
+if [[ "$STACKLIGHTV2_BOOTSTRAP" == "true" ]]; then
+ "$CWD"/stacklightv2_infra_install.sh
+fi
+if [[ "$OPENSTACK_BOOTSTRAP" == "true" ]]; then
+ "$CWD"/openstack_control_install.sh
+ "$CWD"/stacklight_infra_install.sh
+ if [[ "$OPENCONTRAIL_BOOTSTRAP" == "true" ]]; then
+ "$CWD"/opencontrail_control_install.sh
+ "$CWD"/opencontrail_compute_install.sh
+ else
+ "$CWD"/ovs_compute_install.sh
+ fi
+ "$CWD"/stacklight_monitor_install.sh
+fi
diff --git a/scripts/bootstrap_kubernetes.sh b/scripts/bootstrap_kubernetes.sh
deleted file mode 100755
index 63c5112..0000000
--- a/scripts/bootstrap_kubernetes.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash -x
-
-CWD="$(dirname "$(readlink -f "$0")")"
-
-"$CWD"/fuel_config_verify.sh
-"$CWD"/fuel_infra_install.sh
-"$CWD"/docker_swarm.sh
-"$CWD"/kubernetes_install.sh
diff --git a/scripts/fuel_config_verify.sh b/scripts/config_verify.sh
similarity index 92%
rename from scripts/fuel_config_verify.sh
rename to scripts/config_verify.sh
index dd1d159..cafa9b3 100755
--- a/scripts/fuel_config_verify.sh
+++ b/scripts/config_verify.sh
@@ -4,7 +4,7 @@
CWD="$(dirname "$(readlink -f "$0")")"
# Import common functions
-COMMONS=$CWD/common_functions.sh
+COMMONS="$CWD"/common_functions.sh
if [ ! -f "$COMMONS" ]; then
echo "File $COMMONS does not exist"
exit 1
diff --git a/scripts/core_services_install.sh b/scripts/core_services_install.sh
new file mode 100755
index 0000000..62f32e1
--- /dev/null
+++ b/scripts/core_services_install.sh
@@ -0,0 +1,51 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+RABBITMQ_SERVICE=$(salt -C 'I@rabbitmq:server' test.ping 1>/dev/null 2>&1 && echo true)
+GALERA_SERVICE=$(salt -C 'I@galera:master or I@galera:slave' test.ping 1>/dev/null 2>&1 && echo true)
+MEMCACHED_SERVICE=$(salt -C 'I@memcached:server' test.ping 1>/dev/null 2>&1 && echo true)
+NGINX_SERVICE=$(salt -C 'I@nginx:server' test.ping 1>/dev/null 2>&1 && echo true)
+
+# Install keepaliveds
+salt -C 'I@keepalived:cluster' state.sls keepalived -b 1
+# Check the VIPs
+salt -C 'I@keepalived:cluster' cmd.run "ip a | grep 172.16.10.2"
+
+# Install gluster services
+salt -C 'I@glusterfs:server' state.sls glusterfs.server.service
+salt -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+# Check the gluster status
+salt -C 'I@glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+# Configure gluster clients
+salt -C 'I@glusterfs:client' state.sls glusterfs.client
+
+if [[ "$RABBITMQ_SERVICE" == "true" ]]; then
+ # Install rabbitmq
+ salt -C 'I@rabbitmq:server' state.sls rabbitmq
+ # Check the rabbitmq status
+ salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+fi
+
+if [[ "$GALERA_SERVICE" == "true" ]]; then
+ # Install galera
+ salt -C 'I@galera:master' state.sls galera
+ salt -C 'I@galera:slave' state.sls galera
+ # Check galera status
+ salt -C 'I@galera:master' mysql.status | grep -A1 wsrep_cluster_size
+ salt -C 'I@galera:slave' mysql.status | grep -A1 wsrep_cluster_size
+fi
+
+# Install haproxy
+salt -C 'I@haproxy:proxy' state.sls haproxy
+salt -C 'I@haproxy:proxy' service.status haproxy
+salt -I 'haproxy:proxy' service.restart rsyslog
+
+if [[ "$MEMCACHED_SERVICE" == "true" ]]; then
+ # Install memcached
+ salt -C 'I@memcached:server' state.sls memcached
+fi
+
+if [[ "$NGINX_SERVICE" == "true" ]]; then
+ # Install memcached
+ salt -C 'I@nginx:server' state.sls nginx
+fi
diff --git a/scripts/docker_swarm.sh b/scripts/docker_swarm_install.sh
similarity index 63%
rename from scripts/docker_swarm.sh
rename to scripts/docker_swarm_install.sh
index 8037590..19a4341 100755
--- a/scripts/docker_swarm.sh
+++ b/scripts/docker_swarm_install.sh
@@ -1,9 +1,17 @@
#!/bin/bash -x
exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+# Configure base Docker service
salt -C 'I@docker:swarm' state.sls docker.host
+# Configure the Swarm master node
salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+# Send grains to mine for the Swarm slave nodes
salt -C 'I@docker:swarm' state.sls salt.minion.grains
salt -C 'I@docker:swarm' mine.update
+salt -C 'I@docker:swarm' saltutil.refresh_modules
+sleep 5
+# Configure Swarm slave nodes
salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+
+# List registered Docker Swarm nodes
salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
diff --git a/scripts/fuel_infra_install.sh b/scripts/infra_install.sh
similarity index 100%
rename from scripts/fuel_infra_install.sh
rename to scripts/infra_install.sh
diff --git a/scripts/kubernetes_install.sh b/scripts/kubernetes_install.sh
index c1b35c1..092843d 100755
--- a/scripts/kubernetes_install.sh
+++ b/scripts/kubernetes_install.sh
@@ -33,7 +33,3 @@
# Register addons
salt --state-output=terse -C 'I@kubernetes:master' --subset 1 state.sls kubernetes.master.setup
-
-# Nginx needs to be configured
-#salt --state-output=terse -C 'I@nginx:server' state.sls nginx
-# IMHO not related to k8s installation
diff --git a/scripts/opencontrail_compute_install.sh b/scripts/opencontrail_compute_install.sh
new file mode 100755
index 0000000..d4dbcd3
--- /dev/null
+++ b/scripts/opencontrail_compute_install.sh
@@ -0,0 +1,32 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+# Import common functions
+COMMONS="$CWD"/common_functions.sh
+if [ ! -f "$COMMONS" ]; then
+ echo "File $COMMONS does not exist"
+ exit 1
+fi
+. "$COMMONS"
+
+# Configure compute nodes
+salt "cmp*" state.apply
+salt "cmp*" state.apply
+
+# Provision opencontrail virtual routers
+hosts=($(salt-call pillar.get linux:network:host | egrep 'cmp0.*:' | sed -e 's/ *//' -e 's/://'))
+vip=$(salt-call pillar.get _param:openstack_control_address | grep '^ ' | sed -e 's/ *//')
+nb=$(( ${#hosts[@]} - 1 ))
+for i in $(seq 0 $nb); do
+ h=${hosts[$i]}
+ ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/ *//')
+ salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_vrouter.py --host_name $h --host_ip $ip --api_server_ip $vip --oper add --admin_user admin --admin_password workshop --admin_tenant_name admin"
+done
+
+# Reboot compute nodes
+salt "cmp*" system.reboot
+
+# Wait for all compute nodes in current deployment to be available
+wait_for "$(get_nodes_names "cmp[0-9]" | wc -l)" "cmp*"
diff --git a/scripts/opencontrail_control_install.sh b/scripts/opencontrail_control_install.sh
index eca1773..f83053f 100755
--- a/scripts/opencontrail_control_install.sh
+++ b/scripts/opencontrail_control_install.sh
@@ -4,10 +4,10 @@
CWD="$(dirname "$(readlink -f "$0")")"
# Import common functions
-COMMONS=$CWD/common_functions.sh
+COMMONS="$CWD"/common_functions.sh
if [ ! -f "$COMMONS" ]; then
- echo "File $COMMONS does not exist"
- exit 1
+ echo "File $COMMONS does not exist"
+ exit 1
fi
. "$COMMONS"
@@ -18,12 +18,12 @@
# Provision opencontrail control services
hosts=($(get_nodes_names "ctl[0-9]"))
-vip=$(salt-call pillar.get _param:cluster_vip_address | grep '^ ' | sed -e 's/ *//')
+vip=$(salt-call pillar.get _param:openstack_control_address | grep '^ ' | sed -e 's/ *//')
nb=$(( ${#hosts[@]} - 1 ))
for i in $(seq 0 $nb); do
- h=${hosts[$i]}
- ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/ *//')
- salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_control.py --api_server_ip $vip --api_server_port 8082 --host_name $h --host_ip $ip --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add"
+ h=${hosts[$i]}
+ ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/ *//')
+ salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_control.py --api_server_ip $vip --api_server_port 8082 --host_name $h --host_ip $ip --router_asn 64512 --admin_password workshop --admin_user admin --admin_tenant_name admin --oper add"
done
# Test opencontrail
diff --git a/scripts/openstack_compute_install.sh b/scripts/openstack_compute_install.sh
deleted file mode 100755
index 64597ca..0000000
--- a/scripts/openstack_compute_install.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash -x
-exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
-
-CWD="$(dirname "$(readlink -f "$0")")"
-
-# Import common functions
-COMMONS=$CWD/common_functions.sh
-if [ ! -f "$COMMONS" ]; then
- echo "File $COMMONS does not exist"
- exit 1
-fi
-. "$COMMONS"
-
-# Configure compute nodes
-salt "cmp*" state.apply
-salt "cmp*" state.apply
-
-# Provision opencontrail virtual routers
-hosts=($(salt-call pillar.get linux:network:host | egrep 'cmp0.*:' | sed -e 's/ *//' -e 's/://'))
-vip=$(salt-call pillar.get _param:cluster_vip_address | grep '^ ' | sed -e 's/ *//')
-nb=$(( ${#hosts[@]} - 1 ))
-for i in $(seq 0 $nb); do
- h=${hosts[$i]}
- ip=$(salt-call pillar.get linux:network:host:"${h}":address | grep '^ ' | sed -e 's/ *//')
- salt -C 'I@opencontrail:control:id:1' cmd.run "/usr/share/contrail-utils/provision_vrouter.py --host_name $h --host_ip $ip --api_server_ip $vip --oper add --admin_user admin --admin_password workshop --admin_tenant_name admin"
-done
-
-# Reboot compute nodes
-salt "cmp*" system.reboot
-
-# Wait for all compute nodes in current deployment to be available
-wait_for "$(get_nodes_names "cmp[0-9]" | wc -l)" "cmp*"
diff --git a/scripts/openstack_control_install.sh b/scripts/openstack_control_install.sh
index 8291844..86294b0 100755
--- a/scripts/openstack_control_install.sh
+++ b/scripts/openstack_control_install.sh
@@ -5,11 +5,12 @@
salt -C 'I@keystone:server' state.sls keystone.server -b 1
# populate keystone services/tenants/admins
salt -C 'I@keystone:client' state.sls keystone.client
+# salt-minion should be restarted in case keystone.client has changed the Salt configuration
+salt -C 'I@keystone:client' --async service.restart salt-minion; sleep 5
salt -C 'I@keystone:server' cmd.run ". /root/keystonerc; keystone service-list"
-# Install glance and ensure glusterfs clusters
+# Install glance
salt -C 'I@glance:server' state.sls glance -b 1
-salt -C 'I@glance:server' state.sls glusterfs.client
# Update fernet tokens before doing request on keystone server. Otherwise
# you will get an error like:
# "No encryption keys found; run keystone-manage fernet_setup to bootstrap one"
@@ -39,7 +40,9 @@
# Install ceilometer services
salt -C 'I@ceilometer:server' state.sls ceilometer -b 1
-salt -C 'I@heka:ceilometer_collector:enabled:True' state.sls heka.ceilometer_collector
# Install aodh services
salt -C 'I@aodh:server' state.sls aodh -b 1
+
+# Create the Nova resources (if any)
+salt -C 'I@nova:client' state.sls nova
diff --git a/scripts/openstack_infra_install.sh b/scripts/openstack_infra_install.sh
deleted file mode 100755
index ca56361..0000000
--- a/scripts/openstack_infra_install.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash -x
-exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
-
-# Install keepaliveds
-salt -C 'I@keepalived:cluster' state.sls keepalived -b 1
-# Check the VIPs
-salt -C 'I@keepalived:cluster' cmd.run "ip a | grep 172.16.10.2"
-
-# Install gluster
-salt -C 'I@glusterfs:server' state.sls glusterfs.server.service
-salt -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
-# Check the gluster status
-salt -C 'I@glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
-
-# Install rabbitmq
-salt -C 'I@rabbitmq:server' state.sls rabbitmq
-# Check the rabbitmq status
-salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
-
-# Install galera
-salt -C 'I@galera:master' state.sls galera
-salt -C 'I@galera:slave' state.sls galera
-# Check galera status
-salt -C 'I@galera:master' mysql.status | grep -A1 wsrep_cluster_size
-salt -C 'I@galera:slave' mysql.status | grep -A1 wsrep_cluster_size
-
-# Install haproxy
-salt -C 'I@haproxy:proxy' state.sls haproxy
-salt -C 'I@haproxy:proxy' service.status haproxy
-salt -I 'haproxy:proxy' service.restart rsyslog
-
-# Install memcached
-salt -C 'I@memcached:server' state.sls memcached
diff --git a/scripts/ovs_compute_install.sh b/scripts/ovs_compute_install.sh
new file mode 100755
index 0000000..18eaf02
--- /dev/null
+++ b/scripts/ovs_compute_install.sh
@@ -0,0 +1,24 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+CWD="$(dirname "$(readlink -f "$0")")"
+
+# Import common functions
+COMMONS="$CWD"/common_functions.sh
+if [ ! -f "$COMMONS" ]; then
+ echo "File $COMMONS does not exist"
+ exit 1
+fi
+. "$COMMONS"
+
+# OVS deployment
+salt -C 'I@nova:compute' state.sls nova
+# If the compute nodes aren't in the default 'nova' AZ, the previous run will
+# fail because adding compute nodes to their AZ requires the compute services
+# to be registered.
+# So wait a bit and run the state once again
+sleep 10
+salt -C 'I@nova:compute' state.sls nova
+salt -C 'I@cinder:volume' state.sls cinder
+salt -C 'I@neutron:compute' state.sls neutron
+salt -C 'I@ceilometer:agent' state.sls ceilometer
diff --git a/scripts/stacklightv2_infra_install.sh b/scripts/stacklightv2_infra_install.sh
new file mode 100755
index 0000000..35495ae
--- /dev/null
+++ b/scripts/stacklightv2_infra_install.sh
@@ -0,0 +1,30 @@
+#!/bin/bash -x
+exec > >(tee -i /tmp/"$(basename "$0" .sh)"_"$(date '+%Y-%m-%d_%H-%M-%S')".log) 2>&1
+
+# Configure Telegraf
+salt -C 'I@telegraf:agent' state.sls telegraf
+
+# Configure Elasticsearch/Kibana services
+salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+salt -C 'I@kibana:server' state.sls kibana.server -b 1
+salt -C 'I@elasticsearch:client' state.sls elasticsearch.client.service
+salt -C 'I@kibana:client' state.sls kibana.client.service
+salt -C 'I@kibana:client or I@elasticsearch:client' --async service.restart salt-minion
+sleep 10
+salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+salt -C 'I@kibana:client' state.sls kibana.client
+
+# Collect grains needed to configure the services
+salt -C 'I@salt:minion' state.sls salt.minion.grains
+salt -C 'I@salt:minion' saltutil.refresh_modules
+salt -C 'I@salt:minion' mine.update
+sleep 5
+
+# Configure the services running in Docker Swarm
+salt -C 'I@docker:swarm:role:master' state.sls prometheus.server
+for img in pushgateway alertmanager prometheus; do
+ salt -C 'I@docker:swarm' dockerng.pull "docker-sandbox.sandbox.mirantis.net/bkupidura/$img"
+ salt -C 'I@docker:swarm' dockerng.tag "docker-sandbox.sandbox.mirantis.net/bkupidura/$img:latest" "$img:latest"
+done
+salt -C 'I@docker:swarm:role:master' state.sls docker
+salt -C 'I@docker:swarm' dockerng.ps