Improve cicd test runner and reported

- run pytest code without capturing stderr, to get solid output
  in the Jenkins log
- do not print the error message if something failed,
  it is already shown in the exception message
- add re-tries while getting the job results. Jenkins may return
  'None' for the job workflow right after the job is finished.
- add getting k8s version to utils/env_k8s
- add reporting k8s_conformance suite to TestRail
- increase number of cmp nodes to 4 on k8s cicd envs
- increase memory on k8s cmp nodes from 2Gb to 4Gb
- increase memory on k8s ctl nodes from 2Gb to 8Gb
- enable verbose in k8s_conformance tests

Change-Id: I672279007fe4d7e3d684f0e49d1bcb7ff42a430f
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index f4c8765..6b9370e 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -50,6 +50,12 @@
                 """)
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             stage("Create an environment ${ENV_NAME} in disabled state") {
                 // deploy_hardware.xml
                 shared.run_cmd("""\
@@ -97,7 +103,7 @@
             }
 
           } catch (e) {
-              common.printMsg("Job is failed: " + e.message, "red")
+              common.printMsg("Job is failed", "red")
               throw e
           } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index d067e07..538f5ea 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -35,6 +35,12 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             // Install core and cicd
             def stack
             def timeout
@@ -60,7 +66,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed: " + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 54bc43d..78e363f 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -35,6 +35,12 @@
                 error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
             }
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             // Install the cluster
             def stack
             def timeout
@@ -60,7 +66,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed:" + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 553b8a2..5d7bd8d 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -36,9 +36,16 @@
     dir("${PARENT_WORKSPACE}") {
         try {
 
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             stage("Run tests") {
                 def steps = shared.get_steps_list(PASSED_STEPS)
                 def sources = """\
+                    cd ${PARENT_WORKSPACE}
                     export ENV_NAME=${ENV_NAME}
                     . ./tcp_tests/utils/env_salt"""
                 if (steps.contains('k8s')) {
@@ -52,7 +59,7 @@
                 def installed = steps.collect {"""\
                     export ${it}_installed=true"""}.join("\n")
 
-                shared.run_cmd(sources + installed + """
+                shared.run_sh(sources + installed + """
                     export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
                     export MANAGER=devops  # use 'hardware' fixture to manage fuel-devops environment
                     export salt_master_host=\$SALT_MASTER_IP  # skip salt_deployed fixture
@@ -68,7 +75,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed" + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 333547a..4b19a5e 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -31,6 +31,13 @@
     }
     dir("${PARENT_WORKSPACE}") {
         try {
+
+            if (env.TCP_QA_REFS) {
+                stage("Update working dir to patch ${TCP_QA_REFS}") {
+                    shared.update_working_dir()
+                }
+            }
+
             def report_name = ''
             def testSuiteName = ''
             def methodname = ''
@@ -70,7 +77,7 @@
 
             if (tcpqa_report_name) {
                 stage("tcp-qa cases report") {
-//                    report_name = "nosetests.xml"
+                    // tcpqa_report_name =~ "nosetests.xml"
                     testSuiteName = "[MCP_X] integration cases"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
@@ -85,7 +92,7 @@
 
             if ('openstack' in stacks && tempest_report_name) {
                 stage("Tempest report") {
-//                    report_name = "report_*.xml"
+                    // tempest_report_name =~ "report_*.xml"
                     testSuiteName = "[MCP1.1_PIKE]Tempest"
                     methodname = "{classname}.{methodname}"
                     testrail_name_template = "{title}"
@@ -95,14 +102,30 @@
 
             if ('k8s' in stacks && k8s_conformance_report_name) {
                 stage("K8s conformance report") {
-                    println "TBD"
-                    // K8s conformance report
+                    // k8s_conformance_report_name =~ conformance_result.xml
+                    // TODO(ddmitriev): it's better to get the k8s version right after deployment
+                    // and store in some artifact that can be re-used here.
+                    def k8s_version=run_cmd_stdout("""\
+                        export ENV_NAME=${ENV_NAME}
+                        . ./tcp_tests/utils/env_salt
+                        . ./tcp_tests/utils/env_k8s
+                        echo "\$kubernetes_version_major.\$kubernetes_version_minor"
+                    """).trim().split().last()
+                    testSuiteName = "[MCP][k8s]Hyperkube ${k8s_version}.x"
+                    methodname = "{methodname}"
+                    testrail_name_template = "{title}"
+                    reporter_extra_options = [
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                      "--testrail-case-section-name \'Conformance\'",
+                    ]
+                    shared.upload_results_to_testrail(k8s_conformance_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                 }
             }
 
             if ('stacklight' in stacks && stacklight_report_name) {
                 stage("stacklight-pytest report") {
-//                    report_name = "stacklight_report.xml"
+                    // stacklight_report_name =~ "stacklight_report.xml"
                     testSuiteName = "LMA2.0_Automated"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
@@ -111,7 +134,7 @@
             }
 
         } catch (e) {
-            common.printMsg("Job is failed: " + e.message, "red")
+            common.printMsg("Job is failed", "red")
             throw e
         } finally {
             // reporting is failed for some reason