Fix priority parameters for VIP instances

Prior this change, all Keepalived instances (except for OpenStack VIP)
had the same priority which leads to unpredictable master elections.

Change-Id: Ifdc468b83f904b36489830678e550fc82c206985
diff --git a/reclass/storage/system/stacklight_server_cluster.yml b/reclass/storage/system/stacklight_server_cluster.yml
index 26b9ebe..818e732 100644
--- a/reclass/storage/system/stacklight_server_cluster.yml
+++ b/reclass/storage/system/stacklight_server_cluster.yml
@@ -20,7 +20,7 @@
             salt_master_host: ${_param:reclass_config_master}
             linux_system_codename: xenial
             single_address: ${_param:stacklight_monitor_node01_address}
-            keepalived_vip_priority: 103
+            keepalived_stacklight_monitor_vip_priority: 103
             rabbitmq_cluster_role: master
         stacklight_server_node02:
           name: ${_param:stacklight_monitor_node02_hostname}
@@ -33,7 +33,7 @@
             salt_master_host: ${_param:reclass_config_master}
             linux_system_codename: xenial
             single_address: ${_param:stacklight_monitor_node02_address}
-            keepalived_vip_priority: 102
+            keepalived_stacklight_monitor_vip_priority: 102
             rabbitmq_cluster_role: slave
         stacklight_server_node03:
           name: ${_param:stacklight_monitor_node03_hostname}
@@ -46,5 +46,5 @@
             salt_master_host: ${_param:reclass_config_master}
             linux_system_codename: xenial
             single_address: ${_param:stacklight_monitor_node03_address}
-            keepalived_vip_priority: 101
+            keepalived_stacklight_monitor_vip_priority: 101
             rabbitmq_cluster_role: slave