Fix modules for mk22-qa-lab01

- Load missing modules after upgrade kernel to 4.4 on Ubuntu14.04
- Move workaround for cassandra memory consumption to the proper file
- Actualize the salt steps according to the upstream scripts
- switch to the master branch for the mk-lab-salt-models
diff --git a/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml b/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml
index d3980d1..96d38d6 100644
--- a/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml
+++ b/tcp_tests/templates/common-services/mk22-qa-lab01-common-services.yaml
@@ -1,129 +1,110 @@
 # Install support services
-- description: Install keepalived on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
-    keepalived
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 3, delay: 5}
-  skip_fail: true
-- description: Install keepalived on other controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    keepalived -b 1
+
+- description: Install keepalived
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' state.sls keepalived -b 1
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 10, delay: 10}
   skip_fail: true
+
 - description: Check the VIP
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
-    'ip a | grep 172.16.10.254' | grep -B1 172.16.10.254
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 3, delay: 5}
-  skip_fail: false
-
-- description: Install keepalived on primary database server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs01*' state.sls
-    keepalived
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 3, delay: 5}
-  skip_fail: true
-- description: Install keepalived on other database servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
-    keepalived -b 1
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 10, delay: 10}
-  skip_fail: true
-- description: Check the database VIP
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' cmd.run
-    'ip a | grep 172.16.10.252' | grep -B1 172.16.10.252
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@keepalived:cluster' cmd.run 'ip a | grep 172.16.10.2' | grep -B1 172.16.10.2
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
 
 
-- description: Install glusterfs on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    glusterfs.server.service
+- description: Install glusterfs
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.service
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
+
 - description: Setup glusterfs on primary controller
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' state.sls
-    glusterfs.server.setup
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Setup glusterfs on other controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    glusterfs.server.setup -b 1
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 3, delay: 5}
-  skip_fail: false
+
 - description: Check the gluster status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-    'gluster peer status; gluster volume status'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Install RabbitMQ on all database servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
-    rabbitmq
+
+
+- description: Install RabbitMQ
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' state.sls rabbitmq
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
+
 - description: Check the rabbitmq status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' cmd.run
-    'rabbitmqctl cluster_status'
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
+
+
 - description: '*Workaround* Update salt-formula-galera on config node to the latest
     version'
   cmd: apt-get -y --force-yes install salt-formula-galera
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Install Galera on first dbs01 server
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs01*' state.sls
-    galera
+
+- description: Install Galera on first server
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:master' state.sls galera
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Install Galera on other dbs* servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
-    galera
+
+- description: Install Galera on other servers
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:slave' state.sls galera
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
+
 - description: Check mysql status
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' mysql.status | grep -A1 wsrep_incoming_addresses
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: true
-- description: Install haproxy on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    haproxy
+
+
+- description: Install haproxy
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' state.sls haproxy
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Check haproxy status on controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' service.status
-    haproxy
+
+- description: Check haproxy status
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.status haproxy
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Install haproxy on all database servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' state.sls
-    haproxy
+
+- description: Restart rsyslog
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@haproxy:proxy' service.restart rsyslog
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false
-- description: Check haproxy status on database servers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'dbs*' service.status
-    haproxy
-  node_name: cfg01.mk22-qa-lab01.local
-  retry: {count: 3, delay: 5}
-  skip_fail: false
+
 - description: Install memcached on all controllers
-  cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' state.sls
-    memcached
+  cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+    -C 'I@memcached:server' state.sls memcached
   node_name: cfg01.mk22-qa-lab01.local
   retry: {count: 3, delay: 5}
   skip_fail: false