Add swarm-run-test job

- add swarm-run-test job to run pytest tests
- workaround for timesync
- common groovy script for k8s and openstack deployments

Change-Id: Icfe13371fbed231bb4f5fd794d0a43edf347c06a
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
new file mode 100644
index 0000000..d44ce62
--- /dev/null
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -0,0 +1,56 @@
+@Library('tcp-qa')_
+
+def common = new com.mirantis.mk.Common()
+def shared = new com.mirantis.system_qa.SharedPipeline()
+def steps = "hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL
+
+node ("${NODE_NAME}") {
+  try {
+
+    stage("Clean the environment and clone tcp-qa") {
+        shared.prepare_working_dir()
+    }
+
+    stage("Create environment, generate model, bootstrap the salt-cluster") {
+        // steps: "hardware,create_model,salt"
+        shared.swarm_bootstrap_salt_cluster_devops()
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        // steps: env.DRIVETRAIN_STACK_INSTALL
+        shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
+    }
+
+    stage("Install core infrastructure and deploy CICD nodes") {
+        // steps: env.PLATFORM_STACK_INSTALL
+        shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
+    }
+
+    stage("Run tests") {
+        shared.swarm_run_pytest(steps)
+    }
+
+  } catch (e) {
+      common.printMsg("Job failed", "red")
+      shared.run_cmd("""\
+          dos.py suspend ${ENV_NAME} || true
+          dos.py snapshot ${ENV_NAME} test_failed || true
+          """)
+      throw e
+  } finally {
+    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+    // and report appropriate data to TestRail
+    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+        shared.run_cmd("""\
+            dos.py resume ${ENV_NAME} || true
+            dos.py time-sync ${ENV_NAME} || true
+        """)
+    } else {
+        shared.run_cmd("""\
+            dos.py destroy ${ENV_NAME} || true
+        """)
+    }
+    shared.report_deploy_result(steps)
+    shared.report_test_result()
+  }
+}
diff --git a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy b/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
deleted file mode 100644
index baa9853..0000000
--- a/jobs/pipelines/deploy-cicd-and-test-k8s.groovy
+++ /dev/null
@@ -1,87 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-
-node ("${NODE_NAME}") {
-  try {
-
-    stage("Clean the environment and clone tcp-qa") {
-        shared.prepare_working_dir()
-    }
-
-    stage("Create environment, generate model, bootstrap the salt-cluster") {
-        shared.swarm_bootstrap_salt_cluster_devops()
-    }
-
-    stage("Install core infrastructure and deploy CICD nodes") {
-        shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
-    }
-
-    stage("Install core infrastructure and deploy CICD nodes") {
-        shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
-    }
-
-    stage("Run tests") {
-        shared.run_cmd("""\
-            export ENV_NAME=${ENV_NAME}
-            . ./tcp_tests/utils/env_salt
-            . ./tcp_tests/utils/env_k8s
-
-            # Prepare snapshots that may be used in tests if MANAGER=devops
-            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_k8s_deployed.ini
-            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
-            #dos.py suspend ${ENV_NAME}
-            #dos.py snapshot ${ENV_NAME} k8s_deployed
-            #dos.py snapshot ${ENV_NAME} stacklight_deployed
-            #dos.py resume ${ENV_NAME}
-            #dos.py time-sync ${ENV_NAME}
-
-            # Initialize variables used in tcp-qa tests
-            export CURRENT_SNAPSHOT=stacklight_deployed  # provide the snapshot name required by the test
-            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
-
-            #export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
-            export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
-            export MAKE_SNAPSHOT_STAGES=false  # skip 'hardware' fixture, disable snapshot/revert features
-            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
-            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
-            export salt_master_port=6969
-            export SALT_USER=\$SALTAPI_USER
-            export SALT_PASSWORD=\$SALTAPI_PASS
-            export CORE_INSTALLED=true  # skip core_deployed fixture
-            export K8S_INSTALLED=true              # skip k8s_deployed fixture
-            export sl_installed=true              # skip stacklight_deployed fixture
-
-            py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
-            dos.py suspend ${ENV_NAME}
-            dos.py snapshot ${ENV_NAME} test_completed
-            """)
-    }
-
-  } catch (e) {
-      common.printMsg("Job failed", "red")
-    shared.run_cmd("""\
-        dos.py suspend ${ENV_NAME} || true
-        dos.py snapshot ${ENV_NAME} test_failed || true
-        """)
-      throw e
-  } finally {
-    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-    // and report appropriate data to TestRail
-    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-        shared.run_cmd("""\
-            dos.py destroy ${ENV_NAME} || true
-        """)
-    } else {
-        shared.run_cmd("""\
-            dos.py resume ${ENV_NAME} || true
-            dos.py time-sync ${ENV_NAME} || true
-        """)
-    }
-    shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
-    shared.report_test_result()
-  }
-}
\ No newline at end of file
diff --git a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy b/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
deleted file mode 100644
index 585ad83..0000000
--- a/jobs/pipelines/deploy-cicd-and-test-openstack.groovy
+++ /dev/null
@@ -1,89 +0,0 @@
-@Library('tcp-qa')_
-
-common = new com.mirantis.mk.Common()
-shared = new com.mirantis.system_qa.SharedPipeline()
-
-
-node ("${NODE_NAME}") {
-  try {
-
-    stage("Clean the environment and clone tcp-qa") {
-        shared.prepare_working_dir()
-    }
-
-    stage("Create environment, generate model, bootstrap the salt-cluster") {
-        shared.swarm_bootstrap_salt_cluster_devops()
-    }
-
-    stage("Install core infrastructure and deploy CICD nodes") {
-        shared.swarm_deploy_cicd(env.DRIVETRAIN_STACK_INSTALL)
-    }
-
-    stage("Install core infrastructure and deploy CICD nodes") {
-        shared.swarm_deploy_platform(env.PLATFORM_STACK_INSTALL)
-    }
-
-    stage("Run tests") {
-        shared.run_cmd("""\
-            export ENV_NAME=${ENV_NAME}
-            . ./tcp_tests/utils/env_salt
-            # TODO: . ./tcp_tests/utils/env_keystonercv3
-
-            # Prepare snapshots that may be used in tests if MANAGER=devops
-            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_openstack_deployed.ini
-            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_stacklight_deployed.ini
-            cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_sl_os_deployed.ini
-            #dos.py suspend ${ENV_NAME}
-            #dos.py snapshot ${ENV_NAME} openstack_deployed
-            #dos.py snapshot ${ENV_NAME} stacklight_deployed
-            #dos.py snapshot ${ENV_NAME} sl_os_deployed
-            #dos.py resume ${ENV_NAME}
-            #dos.py time-sync ${ENV_NAME}
-
-            # Initialize variables used in tcp-qa tests
-            export CURRENT_SNAPSHOT=stacklight_deployed  # provide the snapshot name required by the test
-            export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini  # some SSH data may be filled separatelly
-
-            #export MANAGER=empty  # skip 'hardware' fixture, disable snapshot/revert features
-            export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
-            export MAKE_SNAPSHOT_STAGES=false  # skip 'hardware' fixture, disable snapshot/revert features
-            # export SSH='{...}'  # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
-            export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
-            export salt_master_port=6969
-            export SALT_USER=\$SALTAPI_USER
-            export SALT_PASSWORD=\$SALTAPI_PASS
-            export CORE_INSTALLED=true  # skip core_deployed fixture
-            export OPENSTACK_INSTALLED=true              # skip k8s_deployed fixture
-            export sl_installed=true              # skip stacklight_deployed fixture
-
-            py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
-
-            dos.py suspend ${ENV_NAME}
-            dos.py snapshot ${ENV_NAME} test_completed
-            """)
-    }
-
-  } catch (e) {
-      common.printMsg("Job failed", "red")
-      shared.run_cmd("""\
-          dos.py suspend ${ENV_NAME} || true
-          dos.py snapshot ${ENV_NAME} test_failed || true
-          """)
-      throw e
-  } finally {
-    // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
-    // and report appropriate data to TestRail
-    if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
-        shared.run_cmd("""\
-            dos.py destroy ${ENV_NAME} || true
-        """)
-    } else {
-        shared.run_cmd("""\
-            dos.py resume ${ENV_NAME} || true
-            dos.py time-sync ${ENV_NAME} || true
-        """)
-    }
-    shared.report_deploy_result("hardware,create_model,salt," + env.DRIVETRAIN_STACK_INSTALL + "," + env.PLATFORM_STACK_INSTALL)
-    shared.report_test_result()
-  }
-}
\ No newline at end of file
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
new file mode 100644
index 0000000..d0422fd
--- /dev/null
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -0,0 +1,80 @@
+/**
+ *
+ * Deploy the product cluster using Jenkins master on CICD cluster
+ *
+ * Expected parameters:
+
+ *   ENV_NAME                      Fuel-devops environment name
+ *   PASSED_STEPS                  Steps passed to install components using Jenkins on CICD cluster: "salt,core,cicd,openstack:3200,stacklight:2400",
+                                   where 3200 and 2400 might be timeouts (not used in the testing pipeline)
+ *   RUN_TEST_OPTS                 Pytest option -k or -m, with expression to select necessary tests. Additional pytest options are allowed.
+ *   PARENT_NODE_NAME              Name of the jenkins slave to create the environment
+ *   PARENT_WORKSPACE              Path to the workspace of the parent job to use tcp-qa repo
+ *   TCP_QA_REFS                   Reference to the tcp-qa change on review.gerrithub.io, like refs/changes/46/418546/41
+ *   SHUTDOWN_ENV_ON_TEARDOWN      optional, shutdown fuel-devops environment at the end of the job
+ *   LAB_CONFIG_NAME               Not used (backward compatibility, for manual deployment steps only)
+ *   REPOSITORY_SUITE              Not used (backward compatibility, for manual deployment steps only)
+ *   MCP_IMAGE_PATH1604            Not used (backward compatibility, for manual deployment steps only)
+ *   IMAGE_PATH_CFG01_DAY01        Not used (backward compatibility, for manual deployment steps only)
+ */
+
+@Library('tcp-qa')_
+
+common = new com.mirantis.mk.Common()
+shared = new com.mirantis.system_qa.SharedPipeline()
+
+if (! env.PARENT_NODE_NAME) {
+    error "'PARENT_NODE_NAME' must be set from the parent deployment job!"
+}
+
+node ("${PARENT_NODE_NAME}") {
+    if (! fileExists("${PARENT_WORKSPACE}")) {
+        error "'PARENT_WORKSPACE' contains path to non-existing directory ${PARENT_WORKSPACE} on the node '${PARENT_NODE_NAME}'."
+    }
+    dir("${PARENT_WORKSPACE}") {
+        try {
+
+            stage("Run tests") {
+                def steps = shared.get_steps_list(PASSED_STEPS)
+                def sources = """\
+                    export ENV_NAME=${ENV_NAME}
+                    . ./tcp_tests/utils/env_salt"""
+                if (steps.contains('k8s')) {
+                    sources += """
+                    . ./tcp_tests/utils/env_k8s\n"""
+                }
+                if (steps.contains('openstack')) {
+                    sources += """
+                    # TODO: . ./tcp_tests/utils/env_keystonercv3\n"""
+                }
+                def installed = steps.collect {"""\
+                    export ${it}_installed=true"""}.join("\n")
+
+                shared.run_cmd(sources + installed + """
+                    export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
+                    export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
+                    export salt_master_port=6969
+                    export SALT_USER=\$SALTAPI_USER
+                    export SALT_PASSWORD=\$SALTAPI_PASS
+
+                    py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
+
+                    dos.py suspend ${ENV_NAME}
+                    dos.py snapshot ${ENV_NAME} test_completed
+                    """)
+            }
+
+        } catch (e) {
+            common.printMsg("Job failed", "red")
+            throw e
+        } finally {
+            // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+            // and report appropriate data to TestRail
+            if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+                shared.run_cmd("""\
+                    dos.py destroy ${ENV_NAME}
+                """)
+            }
+        }
+    }
+}