Merge "New cutted volume from flavors"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 1e35c9e..10536c7 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -32,18 +32,6 @@
             shared.prepare_working_dir(env_manager)
         }
 
-        // Reboot Hardware before the BM deployments
-        if ("$ENV_NAME".contains("bm-")){
-            reboot_hw_nodes = env.REBOOT_HW_NODES ?: false
-            stage("Reboot HW nodes") {
-                if (reboot_hw_nodes) {
-                    shared.reboot_hardware_nodes()
-                } else {
-                    common.printMsg("REBOOT_HW_NODES is disabled. Skipping this stage...", "blue")
-                }
-            }
-        }
-
         stage("Create environment, generate model, bootstrap the salt-cluster") {
             // steps: "hardware,create_model,salt"
             if (env_manager == 'devops') {
diff --git a/jobs/templates/bm-cicd-pike-ovs-maas.yml b/jobs/templates/bm-cicd-pike-ovs-maas.yml
index 6b30a2a..4f4311a 100644
--- a/jobs/templates/bm-cicd-pike-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-pike-ovs-maas.yml
@@ -199,10 +199,6 @@
         default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
-    - bool:
-        default: true
-        description: Reboot hardware servers and set boot to PXE before the deployment
-        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/bm-cicd-queens-ovs-maas.yml b/jobs/templates/bm-cicd-queens-ovs-maas.yml
index 95b0343..ad40bdf 100644
--- a/jobs/templates/bm-cicd-queens-ovs-maas.yml
+++ b/jobs/templates/bm-cicd-queens-ovs-maas.yml
@@ -212,10 +212,6 @@
         default: false
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
-    - bool:
-        default: true
-        description: Reboot hardware servers and set boot to PXE before the deployment
-        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
index 531b1e9..3d9c175 100644
--- a/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-pike-contrail-sl.yml
@@ -225,10 +225,6 @@
         default: true
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
-    - bool:
-        default: true
-        description: Reboot hardware servers and set boot to PXE before the deployment
-        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
index 09cd0fb..2c0de61 100644
--- a/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
+++ b/jobs/templates/heat-bm-cicd-queens-contrail-sl.yml
@@ -226,10 +226,6 @@
         default: false
         description: 'Works starting from MCP 2019.2.10 or master. Whether to apply saltstack updates on all nodes in cluster before deployment'
         name: UPGRADE_SALTSTACK
-    - bool:
-        default: true
-        description: Reboot hardware servers and set boot to PXE before the deployment
-        name: REBOOT_HW_NODES
     pipeline-scm:
       lightweight-checkout: false
       scm:
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index c42d43f..72ff052 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -780,33 +780,3 @@
     return ret
   }
 }
-
-def reboot_hardware_nodes() {
-    bm_ips = [
-        "185.8.59.227",
-        "185.8.59.229",
-        "5.43.225.88",
-        "5.43.225.112",
-        "5.43.225.208",
-        "5.43.227.118",
-        "185.8.58.248",
-        "185.8.59.222",
-        "5.43.225.228",
-        "5.43.229.28",
-        "5.43.225.23",
-        "185.8.58.9",
-        "185.8.58.246",
-        "185.8.58.243",
-        "185.8.58.244"
-        ]
-
-    withCredentials([
-       [$class          : 'UsernamePasswordMultiBinding',
-       credentialsId   : 'lab_engineer',
-       passwordVariable: 'lab_pass',
-       usernameVariable: 'lab_user']
-    ]) {
-        for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis power off")}
-        for (ip in bm_ips) { sh ("ipmitool -H ${ip} -U ${lab_user} -P ${lab_pass} chassis bootdev pxe")}
-    }
-}
\ No newline at end of file
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index 79b248c..a31bbc8 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -4,6 +4,7 @@
 
 import jenkins
 import json
+import yaml
 import requests
 
 from devops.helpers import helpers
@@ -99,16 +100,47 @@
         job_params = job_params['parameterDefinitions']
         return job_params
 
-    def make_defults_params(self, name):
+    def make_defaults_params(self, name):
         job_params = self.job_params(name)
         def_params = dict(
             [(j['name'], j['defaultParameterValue']['value'])
              for j in job_params])
         return def_params
 
+    def _correct_yaml_params(self, job_name, params):
+        """
+        Params can be defined as a nested dict.
+        In that case 2nd-layer dict will be translated to YAML text and
+        added to default parameter value
+
+        :param job_name: Job name
+        :param params: dict of JenkinsJobs parameters
+        :return: nothing
+        """
+        for param_name, param_value in params.items():
+            if not isinstance(param_value, dict):
+                continue
+            default_param = self.make_defaults_params(job_name).get(param_name)
+            if default_param is None:
+                print("{param} param of {job} job doesn't exist. "
+                      "Ignoring enriching it with {value}".format(
+                        param=param_name,
+                        job=job_name,
+                        value=param_value
+                        ))
+                continue
+            yaml_param = yaml.load(default_param)
+            yaml_param.update(param_value)
+            params[param_name] = yaml.dump(yaml_param,
+                                           default_flow_style=False)
+        return params
+
     @retry()
     def run_build(self, name, params=None, timeout=600, verbose=False):
-        params = params or self.make_defults_params(name)
+        params = params or self.make_defaults_params(name)
+        params = self._correct_yaml_params(job_name=name,
+                                           params=params)
+
         num = self.__client.build_job(name, params)
         time.sleep(2)  # wait while job is started
 
diff --git a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
index a31051f..f0b727e 100644
--- a/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-pike-ovs-maas/salt.yaml
@@ -5,6 +5,7 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
 {% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
 
 - description: Wait for salt-master is ready after configdrive user-data
   cmd: |
@@ -14,6 +15,24 @@
   skip_fail: false
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+{{ SHARED_MAAS.REBOOT_HW_NODES('engineer', 'KBwdcRqwed3w2',
+  [
+      "185.8.59.227",
+      "185.8.59.229",
+      "5.43.225.88",
+      "5.43.225.112",
+      "5.43.225.208",
+      "5.43.227.118",
+      "185.8.58.248",
+      "185.8.59.222",
+      "5.43.225.228",
+      "5.43.229.28",
+      "5.43.225.23",
+      "185.8.58.9",
+      "185.8.58.246",
+      "185.8.58.243",
+      "185.8.58.244"
+  ]) }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
index 9fc0dfe..f008071 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt.yaml
@@ -5,6 +5,7 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
 {% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
 
 - description: Wait for salt-master is ready after configdrive user-data
   cmd: |
@@ -14,6 +15,24 @@
   skip_fail: false
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+{{ SHARED_MAAS.REBOOT_HW_NODES('engineer', 'KBwdcRqwed3w2',
+  [
+      "185.8.59.227",
+      "185.8.59.229",
+      "5.43.225.88",
+      "5.43.225.112",
+      "5.43.225.208",
+      "5.43.227.118",
+      "185.8.58.248",
+      "185.8.59.222",
+      "5.43.225.228",
+      "5.43.229.28",
+      "5.43.225.23",
+      "185.8.58.9",
+      "185.8.58.246",
+      "185.8.58.243",
+      "185.8.58.244"
+  ]) }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
index 46393c3..d069df0 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt.yaml
@@ -7,6 +7,7 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
 {% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
 
 - description: Wait for salt-master is ready after configdrive user-data
   cmd: |
@@ -16,6 +17,24 @@
   skip_fail: false
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+{{ SHARED_MAAS.REBOOT_HW_NODES('engineer', 'KBwdcRqwed3w2',
+  [
+      "185.8.59.227",
+      "185.8.59.229",
+      "5.43.225.88",
+      "5.43.225.112",
+      "5.43.225.208",
+      "5.43.227.118",
+      "185.8.58.248",
+      "185.8.59.222",
+      "5.43.225.228",
+      "5.43.229.28",
+      "5.43.225.23",
+      "185.8.58.9",
+      "185.8.58.246",
+      "185.8.58.243",
+      "185.8.58.244"
+  ]) }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
index 8dbf61a..06b692c 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt.yaml
@@ -7,6 +7,7 @@
 {% import 'shared-salt.yaml' as SHARED with context %}
 {% import 'shared-test-tools.yaml' as SHARED_TEST_TOOLS with context %}
 {% import 'shared-workarounds.yaml' as SHARED_WORKAROUNDS with context %}
+{% import 'shared-maas.yaml' as SHARED_MAAS with context %}
 
 - description: Wait for salt-master is ready after configdrive user-data
   cmd: |
@@ -16,6 +17,24 @@
   skip_fail: false
 
 {{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+{{ SHARED_MAAS.REBOOT_HW_NODES('engineer', 'KBwdcRqwed3w2',
+  [
+      "185.8.59.227",
+      "185.8.59.229",
+      "5.43.225.88",
+      "5.43.225.112",
+      "5.43.225.208",
+      "5.43.227.118",
+      "185.8.58.248",
+      "185.8.59.222",
+      "5.43.225.228",
+      "5.43.229.28",
+      "5.43.225.23",
+      "185.8.58.9",
+      "185.8.58.246",
+      "185.8.58.243",
+      "185.8.58.244"
+  ]) }}
 
 {{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
 
diff --git a/tcp_tests/templates/shared-maas.yaml b/tcp_tests/templates/shared-maas.yaml
index 0becf80..2066f4f 100644
--- a/tcp_tests/templates/shared-maas.yaml
+++ b/tcp_tests/templates/shared-maas.yaml
@@ -1,5 +1,23 @@
 {# Collection of common macroses shared across MaaS #}
 
+
+{%- macro REBOOT_HW_NODES(ipmi_user, ipmi_password, ipmi_devices=['']) %}
+
+- description: Reboot hardware computes and set to PXE boot. It needs for clean deployment
+  cmd: |
+    {%- for device in ipmi_devices %}
+      ipmitool -H {{ device }} -U {{ ipmi_user }} -P {{ ipmi_password }} chassis power off
+    {%- endfor %}
+    {%- for device in ipmi_devices %}
+      ipmitool -H {{ device }} -U {{ ipmi_user }} -P {{ ipmi_password }} chassis bootdev pxe
+    {%- endfor %}
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
+
+{%- endmacro %}
+
+
 {%- macro AUTH_IN_MAAS() %}
 
 - description: MaaS auth
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 0c74d20..f3de4eb 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -331,7 +331,7 @@
             host='http://{}:8081'.format(config.salt.salt_master_host),
             username='admin',
             password='r00tme')
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
         params['STACK_INSTALL'] = 'core,cicd'
 
@@ -432,7 +432,7 @@
             host='http://{}:8081'.format(config.salt.salt_master_host),
             username='admin',
             password='r00tme')
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
         params['STACK_INSTALL'] = 'core,cicd'
 
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index c6d61dd..e265af5 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -490,7 +490,7 @@
             username=jenkins_info['username'],
             password=jenkins_info['password'])
 
-        params = jenkins.make_defults_params('deploy-k8s-upgrade')
+        params = jenkins.make_defaults_params('deploy-k8s-upgrade')
         params['SALT_MASTER_URL'] = salt_api
         params['SALT_MASTER_CREDENTIALS'] = 'salt'
         params['CONFORMANCE_RUN_AFTER'] = True
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 240b481..c1456be 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -234,7 +234,12 @@
         job_parameters = {
             'GIT_REFSPEC': 'release/proposed/2019.2.0',
             'MK_PIPELINES_REFSPEC': 'release/proposed/2019.2.0',
-            'TARGET_MCP_VERSION': '2019.2.0'
+            'TARGET_MCP_VERSION': '2019.2.0',
+            "DRIVE_TRAIN_PARAMS": {
+                        "OS_DIST_UPGRADE": True,
+                        "OS_UPGRADE": True,
+                        "BATCH_SIZE": 10
+                        }
         }
         job_result, job_description = dt.start_job_on_jenkins(
             job_name=job_name,
@@ -346,7 +351,9 @@
 
         job_name = 'deploy-upgrade-galera'
         job_parameters = {
-            'INTERACTIVE': 'false'
+            'INTERACTIVE': 'false',
+            'OS_DIST_UPGRADE': 'true',
+            'OS_UPGRADE': 'true'
         }
 
         job_result, job_description = dt.start_job_on_jenkins(
@@ -480,7 +487,9 @@
         # ########### Start Deploy - upgrade RabbitMQ pipeline  ############
         show_step(3)
         job_parameters = {
-            'INTERACTIVE': 'false'
+            'INTERACTIVE': 'false',
+            'OS_DIST_UPGRADE': 'true',
+            'OS_UPGRADE': 'true'
         }
 
         job_result, job_description = dt.start_job_on_jenkins(
@@ -542,7 +551,9 @@
         job_parameters = {
             "STAGE_UPGRADE_DOCKER_COMPONENTS": True,
             "STAGE_UPGRADE_ES_KIBANA": True,
-            "STAGE_UPGRADE_SYSTEM_PART": True
+            "STAGE_UPGRADE_SYSTEM_PART": True,
+            'OS_DIST_UPGRADE': 'true',
+            'OS_UPGRADE': 'true'
         }
         job_result, job_description = drivetrain.start_job_on_jenkins(
             job_name="stacklight-upgrade",
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index d19316b..7ea9c11 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -150,7 +150,7 @@
                 host=day1_cfg_config.config.salt.salt_master_host),
             username='admin',
             password='r00tme')
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = result
         params['STACK_INSTALL'] = "core,openstack,ovs"
         build = jenkins.run_build('deploy_openstack', params)
@@ -212,7 +212,7 @@
             host='http://172.16.44.33:8081',
             username='admin',
             password='r00tme')
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
         if settings.STACK_INSTALL:
             params['STACK_INSTALL'] = settings.STACK_INSTALL
@@ -271,7 +271,7 @@
         except LookupError:
             ntp_skipped_nodes = ''
 
-        params = jenkins.make_defults_params('cvp-sanity')
+        params = jenkins.make_defaults_params('cvp-sanity')
         params['TESTS_SETTINGS'] = (
             'drivetrain_version={0};{1}'
             .format(settings.MCP_VERSION, ntp_skipped_nodes))
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
index 9852f5f..bfc7d8c 100644
--- a/tcp_tests/tests/system/test_pipeline_deploy.py
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -49,7 +49,7 @@
             password='r00tme')
 
         # Creating param list for openstack deploy
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
         params['STACK_INSTALL'] = 'core,kvm,openstack,ovs'
         show_step(4)
@@ -99,7 +99,7 @@
             host='http://172.16.49.2:8081',
             username='admin',
             password='r00tme')
-        params = jenkins.make_defults_params('deploy_openstack')
+        params = jenkins.make_defaults_params('deploy_openstack')
         params['SALT_MASTER_URL'] = salt_api
         params['STACK_INSTALL'] = 'core,kvm,cicd'
 
diff --git a/tcp_tests/utils/run_jenkins_job.py b/tcp_tests/utils/run_jenkins_job.py
index e0e7f69..bf3bb01 100755
--- a/tcp_tests/utils/run_jenkins_job.py
+++ b/tcp_tests/utils/run_jenkins_job.py
@@ -123,7 +123,7 @@
         username=username,
         password=password)
 
-    job_params = jenkins.make_defults_params(job_name)
+    job_params = jenkins.make_defaults_params(job_name)
     if job_parameters is not None:  # job_parameters = {}
         job_params.update(job_parameters)