Merge "Optimize checking SSH connectivity PROD-37096"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 3c0b328..c01a820 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -222,6 +222,20 @@
                     } // node
                 }
             }
+            //run upgrade pike to queens only for pike proposed
+            if (env.AUTO_UPGRADE_TO_QUEENS == "true" && currentBuild.result == 'SUCCESS') {
+                def deploy = build job: "os-update-pike-to-queens",
+                    parameters: [
+                        string(name: 'PARENT_NODE_NAME', value: "openstack_slave_${env.ENV_NAME}"),
+                        string(name: 'TCP_QA_REFS', value: env.TCP_QA_REFS),
+                        string(name: 'PASSED_STEPS', value: steps),
+                        string(name: 'TEMPEST_TEST_SUITE_NAME', value: env.TEMPEST_TEST_SUITE_NAME),
+                        string(name: 'NODE', value: "openstack_slave_${env.ENV_NAME}"),
+                        string(name: 'RUN_TEST_OPTS', value: '-k TestUpdatePikeToQueens')
+                    ],
+                    wait: false,
+                    propagate: false
+            }
             //run upgrade env to proposed
             if (env.RUN_UPGRADE_AFTER_JOB == "true" && currentBuild.result == 'SUCCESS') {
                 network_backend = env.PLATFORM_STACK_INSTALL.contains("contrail") ? 'contrail' : 'dvr'
diff --git a/jobs/pipelines/self-deploy-jobs.groovy b/jobs/pipelines/self-deploy-jobs.groovy
index 57f1c9e..2d048be 100644
--- a/jobs/pipelines/self-deploy-jobs.groovy
+++ b/jobs/pipelines/self-deploy-jobs.groovy
@@ -12,7 +12,7 @@
                   virtualenv -p python3 .venv
                 fi
                 if [ -z "\$(./.venv/bin/pip freeze | grep  jenkins-job-builder)" ]; then
-                   ./.venv/bin/pip install 'jenkins-job-builder>=2.0.0.0b2'
+                   ./.venv/bin/pip install 'jenkins-job-builder>=2.0.0.0b2' dataclasses
                 fi
                 """
             }
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 21fdb9e..3b1ee17 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -66,6 +66,7 @@
 
                         shared.run_sh(sources + installed + """
                             mkdir -p tmp
+                            echo ${RUN_TEST_OPTS} > tmp/test-run.txt
                             export PYTHONHTTPSVERIFY=0
                             export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
                             export ENV_MANAGER=$ENV_MANAGER  # use 'hardware' fixture to manage fuel-devops environment
diff --git a/jobs/templates/heat-cicd-pike-dvr-sl.yml b/jobs/templates/heat-cicd-pike-dvr-sl.yml
index 04a28b7..3c1fd06 100644
--- a/jobs/templates/heat-cicd-pike-dvr-sl.yml
+++ b/jobs/templates/heat-cicd-pike-dvr-sl.yml
@@ -210,6 +210,10 @@
         default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
+    - bool:
+        default: false
+        description: 'Run upgrade pike_to_queens job after deploy and tests'
+        name: AUTO_UPGRADE_TO_QUEENS
     - text:
         default: ''
         description: |-
diff --git a/tcp_tests/tests/system/test_backup_restore_cassandra.py b/tcp_tests/tests/system/test_backup_restore_cassandra.py
index a6170dd..b14e9b1 100644
--- a/tcp_tests/tests/system/test_backup_restore_cassandra.py
+++ b/tcp_tests/tests/system/test_backup_restore_cassandra.py
@@ -48,6 +48,32 @@
                                   " No Network found for " +\
                                   "{}".format(network_name)
 
+    def wait_cassandra(self, salt_actions, target, time_sec=180):
+        salt = salt_actions
+        tgt = 'I@opencontrail:control:role:primary'
+
+        status = False
+        db_ip = salt.get_single_pillar(
+            tgt=tgt, pillar="opencontrail:database:bind:host")
+        print(db_ip)
+        db_port = salt.get_single_pillar(
+            tgt=tgt, pillar="opencontrail:database:bind:port_configdb")
+        print(db_port)
+        cmd = 'nc -v -z -w2 ' + db_ip + ' ' + str(db_port)
+
+        start_time = time.time()
+        while not status and time.time() - start_time < time_sec:
+            connection = salt.run_state(target, "cmd.run", cmd)
+            LOG.info(connection)
+            if 'succeeded' in str(connection):
+                LOG.info(connection)
+                status = True
+                break
+            LOG.info("Retry connection to cassandra")
+            time.sleep(10)
+
+        return status
+
     @pytest.fixture()
     def handle_restore_params(self, reclass_actions):
         reclass_actions.add_key(
@@ -193,16 +219,20 @@
             0. Prepare restore parameters
             1. Create network to be backuped
             2. Create an instant backup
-            3. Restore from the backup. Stop the supervisor-database service
+            3. Stop neutron-server
+            4. Restore from the backup. Stop the supervisor-database service
                 on the OpenContrail control nodes
-            4. Restore: Remove the Cassandra files on control nodes
-            5. Restore: Start the supervisor-database service on the
+            5. Restore: Remove the Cassandra files on control nodes
+            6. Restore: Start the supervisor-database service on the
                 Cassandra client backup node
-            6. Restore: Apply the cassandra state
-            7. Restore: Reboot the Cassandra backup client role node
-            8. Restore: Reboot the other OpenContrail control nodes
-            9. Restore: Restart the supervisor-database service
-            10. Restore: verify that OpenContrail is in correct state
+            7. Restore: remove restore-already-happened file if any is present
+            8. Restore: Apply the cassandra state
+            9. Restore: Start the supervisor-database service on
+                control node
+            10. Restore: Start analytics containers node
+            11. Restore: Restart contrail-control services on control nodes
+            12. Start neutron-server
+            13. Check network restored and services is ok
         """
         salt = salt_actions
         fixture_network_name = "backuptest2"
@@ -211,40 +241,73 @@
 
         show_step(1)
         self.create_network(underlay_actions, fixture_network_name, ctl_node)
+
+        time.sleep(30)
         show_step(2)
         create_instant_backup(salt)
+
+        time.sleep(30)
         show_step(3)
-        salt.run_state("I@opencontrail:control",
+        salt.run_state("I@neutron:server",
                        "cmd.run",
-                       "doctrail controller systemctl stop contrail-database")
+                       "service neutron-server stop")
+
         show_step(4)
-        salt.run_state("I@opencontrail:control",
+        salt.run_state("I@opencontrail:control or "
+                       "I@opencontrail:collector",
                        "cmd.run",
-                       "rm -rf /var/lib/configdb/*")
+                       "cd /etc/docker/compose/opencontrail/ "
+                       "&& docker-compose down")
         show_step(5)
         salt.run_state("I@opencontrail:control",
                        "cmd.run",
-                       "doctrail controller systemctl start contrail-database")
+                       "rm -rf /var/lib/configdb/*")
         show_step(6)
         salt.run_state("I@cassandra:backup:client",
                        "cmd.run",
-                       "/var/backups/cassandra/dbrestored")
-        salt.run_state("I@cassandra:backup:client", "state.sls", "cassandra")
+                       "cd /etc/docker/compose/opencontrail/ "
+                       "&& docker-compose up -d")
+
+        # wait for cassandra to be online
+        self.wait_cassandra(salt_actions, 'I@cassandra:backup:client')
+
         show_step(7)
-        salt.run_state("I@cassandra:backup:client", "system.reboot")
+        salt.run_state("I@cassandra:backup:client",
+                       "cmd.run",
+                       "rm /var/backups/cassandra/dbrestored")
         show_step(8)
-        salt.run_state(
-            "I@opencontrail:control and not I@cassandra:backup:client",
-            "system.reboot")
+        salt.run_state("I@cassandra:backup:client", "state.sls", "cassandra")
         show_step(9)
+        salt.run_state("I@opencontrail:control "
+                       "and not I@cassandra:backup:client",
+                       "cmd.run",
+                       "cd /etc/docker/compose/opencontrail/ "
+                       "&& docker-compose up -d")
+
+        # wait for cassandra to be online
+        self.wait_cassandra(salt_actions, 'I@opencontrail:control '
+                                          'and not I@cassandra:backup:client')
+        show_step(10)
+        salt.run_state("I@opencontrail:collector",
+                       "cmd.run",
+                       "cd /etc/docker/compose/opencontrail/ "
+                       "&& docker-compose up -d")
         time.sleep(60)
+        show_step(11)
         salt.run_state(
             "I@opencontrail:control",
             "cmd.run",
-            "doctrail controller systemctl restart contrail-database")
+            "doctrail controller service contrail-control restart")
 
-        show_step(10)
+        show_step(12)
+
+        time.sleep(60)
+        salt.run_state("I@neutron:server",
+                       "cmd.run",
+                       "service neutron-server start")
+
         time.sleep(80)
+        show_step(13)
         network_presented = self.is_network_restored(
             underlay_actions,
             fixture_network_name,
@@ -270,4 +333,5 @@
                                     " {} \n".format(node_name,
                                                     service.strip(),
                                                     status)
+
         assert statuses_ok, failures