Add initial backup support

* sahred backup with backup feature configuration steps
* Initial structure for backup  manager
* include backup configuration steps into ceph_rgw

Change-Id: I0b88eef4e0e5e6eab93488ac99d8ea9face1c205
diff --git a/tcp_tests/managers/backup_restore_manager.py b/tcp_tests/managers/backup_restore_manager.py
new file mode 100644
index 0000000..e3b8c23
--- /dev/null
+++ b/tcp_tests/managers/backup_restore_manager.py
@@ -0,0 +1,148 @@
+#    Copyright 2018 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests import logger
+
+
+LOG = logger.logger
+
+
+class BackupRestoreManager(object):
+    """Helper manager for execution backup restore"""
+
+    backup_cmd = 'backupninja -n --run /etc/backup.d/200.backup.rsync'
+
+    def __init__(self, underlay, salt_api, backup_cmd=None):
+        self.underlay = underlay
+        self.__salt_api = salt_api
+        self.backup_cmd = backup_cmd or self.backup_cmd
+
+    @property
+    def salt_api(self):
+        return self.__salt_api
+
+    def create_backup(self, tgt, backup_cmd=backup_cmd):
+        return self.salt_api.enforce_state(tgt, 'cmd.run', backup_cmd)
+
+    def restore_salt_master(self, tgt):
+        return self.salt_api.local(tgt, 'salt.master.restore')
+
+    def restore_salt_minion(self, tgt):
+        return self.salt_api.local(tgt, 'salt.minion.restore')
+
+    def create_mysql_backup_backupninja(self, tgt, ):
+        rets = []
+        res = self.salt_api.enforce_state(
+            tgt, 'cmd.run',
+            'backupninja -n --run /etc/backup.d/101.mysql')
+        rets.append(res)
+        res_rsync = self.salt_api.enforce_state(tgt, 'cmd.run')
+        rets.append(res_rsync)
+        return rets
+
+    def restore_mysql_backupninja(self, tgt):
+        # Running this state restores the databases and creates a file
+        # for every restored database in /root/mysql/flags.
+        return self.salt_api.local(tgt, 'mysql.client')
+
+    def create_mysql_xtrabackup(self, tgt, backup_cmd=backup_cmd):
+        # Should be run on mysql master node
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', '/usr/local/bin/innobackupex-runner.sh')
+
+    def check_mysql_xtrabackup_rsynced(self, tgt='I@xtrabackup:server'):
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', 'ls /var/backups/mysql/xtrabackup/full')
+
+    def stop_mysql_slave(self, tgt='I@galera:slave'):
+        return self.salt_api.enforce_state(tgt, 'service.stop mysql')
+
+    def remove_mysql_logs(self, tgt='I@galera:slave'):
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', 'rm /var/lib/mysql/ib_logfile*')
+
+    def stop_mysql_master(self, tgt='I@galera:master'):
+        return self.salt_api.enforce_state(tgt, 'service.stop mysql')
+
+    def disconnect_wresp_master(self, tgt='I@galera:master'):
+        # TODO fins the way updated wresp
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', 'wsrep_cluster_address=gcomm://')
+
+    def move_dbs_files_to_new_location(self, tgt='I@galera:master'):
+        cmds = ['mkdir -p /root/mysql/mysql.bak/',
+                'mv /var/lib/mysql/* /root/mysql/mysql.bak',
+                'rm /var/lib/mysql/.galera_bootstrap']
+        rest = []
+        for cmd in cmds:
+            res = self.salt_api.enforce_state(tgt, 'cmd.run', cmd)
+            rest.append(res)
+        return rest
+
+    def check_dbs_files_removed(self, tgt='I@galera:master'):
+        cmds = ['ls /var/lib/mysql/',
+                'ls -ld /var/lib/mysql/.?*']
+        rest = []
+        for cmd in cmds:
+            res = self.salt_api.enforce_state(tgt, 'cmd.run', cmd)
+            rest.append(res)
+        return rest
+
+    # run xtrabackup state on node where bacakup
+    def run_xtrabackup(self, tgt):
+        return self.salt_api.local(tgt, 'xtrabackup')
+
+    def start_mysql(self):
+        tgts = ['I@galera:master', 'I@galera:slave']
+        ret = []
+        for tgt in tgts:
+            res = self.salt_api.enforce_state(tgt, 'service.start mysql')
+            ret.append(res)
+        return ret
+
+    def check_galera_cluster(self, tgt='I@galera:master'):
+        return self.salt_api.enforce_state(
+            tgt, 'mysql.status | grep -A1 wsrep_cluster_size')
+
+    # #################Backup_Restore_Glance###################
+
+    def copy_glance_images_to_backup(self, path_to_backup,
+                                     tgt="I@glance:server and *01*"):
+        cmd = 'cp -a /var/lib/glance/images/. {}'.format(path_to_backup)
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', cmd)
+
+    def copy_glance_images_from_backup(self, path_to_backup,
+                                       tgt="I@glance:server and *01*"):
+        cmd = 'cp -a {}/. /var/lib/glance/images/'.format(path_to_backup)
+        return self.salt_api.enforce_state(
+            tgt, 'cmd.run', cmd)
+
+    def check_images_after_backup(self, tgt="I@keystone:client"):
+        # TODO If the context of the Glance
+        # images files is lost, run the following commands:
+        # salt -C 'I@glance:server' cmd.run
+        # "chown glance:glance <IMAGE_FILE_NAME>"
+        # salt -C 'I@glance:server' cmd.run "chmod 640 <IMAGE_FILE_NAME>"
+        cmd = '. /root/keystonercv3; openstack image list'
+        return self.salt_api.enforce_state(tgt, 'cmd.run', cmd)
+
+    # #################Backup_Restore_cinder_volumes_and_snapshots###
+    # TODO Verify that a complete backup was created on
+    # the MySQL Galera Database Master node
+    # ls /var/backups/mysql/xtrabackup/full
+    # TODO(tleontovich): add method to check needed configs
+    # TODO (tleontovich): check pillars
+    # TODO (tleontovich): check  backup is created, and
+    # restore restores
diff --git a/tcp_tests/templates/shared-backup-restore.yaml b/tcp_tests/templates/shared-backup-restore.yaml
new file mode 100644
index 0000000..c85e684
--- /dev/null
+++ b/tcp_tests/templates/shared-backup-restore.yaml
@@ -0,0 +1,137 @@
+{# Collection of common macroses shared across different deployments #}
+
+{%- macro MACRO_BACKUP_BACKUPNINJA() %}
+- description: Apply backup state on minions
+  cmd: salt -C 'I@backupninja:server or backupninja:client' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains and mine for the backupninja client node
+  cmd: |
+    salt -C 'I@backupninja:client' state.sls salt.minion.grains
+    salt -C 'I@backupninja:client' mine.flush
+    salt -C 'I@backupninja:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backupninja state to the backupninja client node
+  cmd: |
+    salt -C 'I@backupninja:client' state.sls backupninja
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the backupninja server node
+  cmd: |
+    salt -C 'I@backupninja:server' state.sls salt.minion.grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backupninja state to the backupninja server node
+  cmd: |
+    salt -C 'I@backupninja:server' state.sls backupninja
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_BACKUP_XTRABACKUP() %}
+
+- description: Refresh pillars
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the salt.minion state
+  cmd: |
+    salt -C 'I@xtrabackup:client or I@xtrabackup:server' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the xtrabackup client node
+  cmd: salt -C 'I@xtrabackup:client' saltutil.sync_grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Update the mine for the xtrabackup client node
+  cmd: |
+    salt -C 'I@xtrabackup:client' mine.flush
+    salt -C 'I@xtrabackup:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the xtrabackup client state
+  cmd: |
+    salt -C 'I@xtrabackup:client' state.sls openssh.client,xtrabackup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the xtrabackup server state
+  cmd: |
+    salt -C 'I@xtrabackup:server' state.sls xtrabackup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+{%- macro MACRO_BACKUP_CEPH() %}
+
+- description: Refresh pillars
+  cmd: salt '*' saltutil.refresh_pillar
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the salt.minion state
+  cmd: |
+    salt -C 'I@ceph:backup:client or I@ceph:backup:server' state.sls salt.minion
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Refresh grains for the ceph client node
+  cmd: salt -C 'I@ceph:backup:client' saltutil.sync_grains
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Update the mine for the  client node
+  cmd: |
+    salt -C 'I@ceph:backup:client' mine.flush
+    salt -C 'I@ceph:backup:client' mine.update
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backup client state
+  cmd: |
+    salt -C 'I@ceph:backup:client' state.sls openssh.client,ceph.backup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+- description: Apply the backup server state
+  cmd: |
+    salt -C 'I@ceph:backup:server' state.sls ceph.backup
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 1}
+  skip_fail: false
+
+{%- endmacro %}
+
+
+
+
+
+
+
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index b042a01..f779d06 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -269,6 +269,8 @@
     find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.16\.10/==IPV4_NET_CONTROL_PREFIX==/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.1\.0/==IPV4_NET_TENANT_PREFIX==/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.16\.0/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.60\.0/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+    find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.70\.0/==IPV4_NET_ADMIN_PREFIX==/g' {} +
 
     find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
     find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 6ad7b6e..30b046a 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -4,6 +4,8 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
 {% import 'shared-salt.yaml' as SHARED with context %}
 
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
 # Install ceph mons
 - description: Update grains
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -165,4 +167,5 @@
   retry: {count: 2, delay: 5}
   skip_fail: false
 
+{{ BACKUP.MACRO_BACKUP_CEPH() }}
 {{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
index 121d4c4..f285934 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
@@ -1,5 +1,7 @@
 {% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
 
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
 # Install support services
 - description: Install keepalived on ctl01
   cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -115,3 +117,7 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 3, delay: 10}
   skip_fail: false
+
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
+
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
index 80e29a1..94e5308 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
@@ -11,7 +11,7 @@
 
 {{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
 
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "backupninja" "glusterfs"') }}
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
 
@@ -37,4 +37,4 @@
   cmd: salt 'cmp02*' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
-  skip_fail: false
\ No newline at end of file
+  skip_fail: false