Add initial backup support

* sahred backup with backup feature configuration steps
* Initial structure for backup  manager
* include backup configuration steps into ceph_rgw

Change-Id: I0b88eef4e0e5e6eab93488ac99d8ea9face1c205
diff --git a/tcp_tests/templates/shared-backup-restore.yaml b/tcp_tests/templates/shared-backup-restore.yaml
new file mode 100644
index 0000000..c85e684
--- /dev/null
+++ b/tcp_tests/templates/shared-backup-restore.yaml
@@ -0,0 +1,137 @@
+{# Collection of common macroses shared across different deployments #}
+
+{%- macro MACRO_BACKUP_BACKUPNINJA() %}
+- description: Apply backup state on minions
+  cmd: salt -C 'I@backupninja:server or backupninja:client' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains and mine for the backupninja client node
+  cmd: |
+    salt -C 'I@backupninja:client' state.sls salt.minion.grains
+    salt -C 'I@backupninja:client' mine.flush
+    salt -C 'I@backupninja:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backupninja state to the backupninja client node
+  cmd: |
+    salt -C 'I@backupninja:client' state.sls backupninja
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the backupninja server node
+  cmd: |
+    salt -C 'I@backupninja:server' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backupninja state to the backupninja server node
+  cmd: |
+    salt -C 'I@backupninja:server' state.sls backupninja
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_BACKUP_XTRABACKUP() %}
+
+- description: Refresh pillars
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the salt.minion state
+  cmd: |
+    salt -C 'I@xtrabackup:client or I@xtrabackup:server' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the xtrabackup client node
+  cmd: salt -C 'I@xtrabackup:client' saltutil.sync_grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Update the mine for the xtrabackup client node
+  cmd: |
+    salt -C 'I@xtrabackup:client' mine.flush
+    salt -C 'I@xtrabackup:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the xtrabackup client state
+  cmd: |
+    salt -C 'I@xtrabackup:client' state.sls openssh.client,xtrabackup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the xtrabackup server state
+  cmd: |
+    salt -C 'I@xtrabackup:server' state.sls xtrabackup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_BACKUP_CEPH() %}
+
+- description: Refresh pillars
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the salt.minion state
+  cmd: |
+    salt -C 'I@ceph:backup:client or I@ceph:backup:server' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the ceph client node
+  cmd: salt -C 'I@ceph:backup:client' saltutil.sync_grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Update the mine for the  client node
+  cmd: |
+    salt -C 'I@ceph:backup:client' mine.flush
+    salt -C 'I@ceph:backup:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backup client state
+  cmd: |
+    salt -C 'I@ceph:backup:client' state.sls openssh.client,ceph.backup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backup server state
+  cmd: |
+    salt -C 'I@ceph:backup:server' state.sls ceph.backup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+
+
+
+
+
+
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index b042a01..f779d06 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -269,6 +269,8 @@
     find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10/==IPV4_NET_CONTROL_PREFIX==/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0/==IPV4_NET_TENANT_PREFIX==/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.60\.0/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.70\.0/==IPV4_NET_ADMIN_PREFIX==/g' {} +
 
     find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 6ad7b6e..30b046a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -4,6 +4,8 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
 # Install ceph mons
 - description: Update grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -165,4 +167,5 @@
   retry: {count: 2, delay: 5}
   skip_fail: false
 
+{{ BACKUP.MACRO_BACKUP_CEPH() }}
 {{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
index 121d4c4..f285934 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
@@ -1,5 +1,7 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
 # Install support services
 - description: Install keepalived on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -115,3 +117,7 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
+
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
index 80e29a1..94e5308 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "backupninja" "glusterfs"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -37,4 +37,4 @@
   cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
-  skip_fail: false
\ No newline at end of file
+  skip_fail: false