Fix for SaltMaster backup test procedure

Add job to test adding the RGW node
Add maxfail parameter to fail test scenarios immediatly after the first fail
Clarify parameter for testing the backup for SaltMaster w/wo MAAS

PROD-36273


Change-Id: I3d459ece4d969888313060f86a91bb6a997d6ad2
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index e6a8f06..af90602 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -63,27 +63,27 @@
     test_scenario:
 
       - backup-saltmaster:
-          run-test-opts: '-k TestBackupRestoreMaster'
+          run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and not maas'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Backup/Restore SaltMaster
 
       - backup-saltmaster-with-maas:
-         run-test-opts: '-k TestBackupRestoreMaster'
+         run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and maas'
          deployment: bm-cicd-queens-ovs-maas
          display-name: Backup/Restore SaltMaster (with MAAS)
 
       - backup-cassandra-queens-contrail-sl:
-         run-test-opts: '-k TestBackupRestoreCassandra'
+         run-test-opts: '--maxfail=1 -k TestBackupRestoreCassandra'
          deployment: heat-cicd-queens-contrail41-sl
          display-name: Backup/restore Cassandra
 
       - backup-galera-queens-sl:
-          run-test-opts: '-k TestBackupRestoreGalera'
+          run-test-opts: '--maxfail=1 -k TestBackupRestoreGalera'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Backup/restore Galera
 
       - backup-zookeeper-queens-sl:
-          run-test-opts: '-k TestBackupRestoreZooKeeper'
+          run-test-opts: '--maxfail=1 -k TestBackupRestoreZooKeeper'
           deployment: heat-cicd-queens-contrail41-sl
           display-name: Backup/restore Zookeeper
 
@@ -94,15 +94,19 @@
     name: 'ceph-tests'
     test_scenario:
       - ceph_osd-queens-dvr-sl:
-          run-test-opts: '-k TestCephOsd'
+          run-test-opts: '--maxfail=1 -k TestCephOsd'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove OSD node
       - ceph_cmn-queens-dvr-sl:
-          run-test-opts: '-k TestCephMon'
+          run-test-opts: '--maxfail=1 -k TestCephMon'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove CMN node
       - ceph_mgr-queens-dvr-sl:
-          run-test-opts: '-k TestCephMgr'
+          run-test-opts: '--maxfail=1 -k TestCephMgr'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Add/Remove MGR node
+      - ceph_rgw-queens-dvr-sl:
+          run-test-opts: '--maxfail=1 -k TestCephRgw'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove MGR node
     jobs:
diff --git a/tcp_tests/tests/system/test_backup_restore.py b/tcp_tests/tests/system/test_backup_restore.py
index 7d29e4f..275f00c 100644
--- a/tcp_tests/tests/system/test_backup_restore.py
+++ b/tcp_tests/tests/system/test_backup_restore.py
@@ -97,11 +97,11 @@
         for path in (self.MAAS_YAML, self.SM_YAML):
             reclass_actions.add_key(
                 "parameters.backupninja.client.backup_times.hour",
-                "\"'1'\"",
+                "1",
                 path)
             reclass_actions.add_key(
                 "parameters.backupninja.client.backup_times.minute",
-                "\"'0'\"",
+                "0",
                 path)
 
     def check_backup(self, ssh, server, path, client_name, dirs):
@@ -171,7 +171,7 @@
             enabled: True
             client:
               backup_times:
-                hour: *
+                hour: '*'
                 minute: 10
         """