Review tests

Add restart salt-minion in add_ceph_node tests
Return the Ceph health check back in ceph_failover tests
Wait for healthy CEPH after each node restart in ceph_failover tests
Change cvp-sanity and tempest parameters for ceph_failover tests
Add JJB template for Ceph Failover tests
Fix parameters to start SaltMaster backup/restore tests

PROD-36643

Change-Id: I52017158d07373d7cb90846e42edb4276e385552
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index 70a384a..9543e36 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -63,12 +63,16 @@
     test_scenario:
 
       - backup-saltmaster:
-          run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and not maas'
+          run-test-opts: |-
+            tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore_pipeline \
+            tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_salt_master_manual_backup_restore
           deployment: heat-cicd-queens-dvr-sl
           display-name: Backup/Restore SaltMaster
 
       - backup-saltmaster-with-maas:
-         run-test-opts: '--maxfail=1 -k TestBackupRestoreMaster and maas'
+         run-test-opts: |-
+           tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_manual_backup_restore_pipeline \
+           tcp_tests/tests/system/test_backup_restore.py::TestBackupRestoreMaster::test_maas_backup_restore_manual
          deployment: bm-cicd-queens-ovs-maas
          display-name: Backup/Restore SaltMaster (with MAAS)
 
@@ -111,6 +115,10 @@
           run-test-opts: '--maxfail=1 -k TestCephRgw'
           deployment: heat-cicd-queens-dvr-sl
           display-name: Add/Remove RGW node
+      - ceph_failover:
+          run-test-opts: '--maxfail=1 -k TestFailoverCeph'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Failover tests for Ceph nodes
     jobs:
       - '{test_scenario}'
     logrotate: