Merge "Switch to deploy from proposed PROD-37154"
diff --git a/jobs/project.yaml b/jobs/project.yaml
index d47c7c3..f40d31b 100644
--- a/jobs/project.yaml
+++ b/jobs/project.yaml
@@ -69,4 +69,6 @@
       # - testrail-reporter-ci - can't moved to JJB, unsupported parameters
       # - testrail-reporter-gerrit-mcp-ci - can't moved to JJB, unsupported parameters
       - release-artifact-checker
+      - cc_table_jjb
+      - packer-image-system.foundation
 ...
\ No newline at end of file
diff --git a/jobs/scripts/cc_table.sh b/jobs/scripts/cc_table.sh
new file mode 100644
index 0000000..4c5564f
--- /dev/null
+++ b/jobs/scripts/cc_table.sh
@@ -0,0 +1,197 @@
+#!/bin/bash
+set -x
+
+TITLE="Comparison of tcp-qa deployments"
+RIGHT_NOW=$(date +"%x %r %Z")
+TIME_STAMP="Updated on $RIGHT_NOW by $USER"
+
+list_of_envs=(
+    heat-cicd-pike-dvr-sl
+    heat-cicd-queens-contrail41-sl
+    heat-cicd-queens-dvr-sl
+    heat-cicd-pike-contrail-stb-sl
+    released-heat-cicd-pike-contrail41-sl
+    released-heat-cicd-pike-dvr-sl
+    released-heat-cicd-queens-contrail41-sl
+    released-heat-cicd-queens-dvr-sl
+    bm-cicd-pike-ovs-maas
+    bm-cicd-queens-ovs-maas
+    heat-bm-cicd-pike-contrail-sl
+    heat-bm-cicd-queens-contrail-sl
+    bm-e7-cicd-pike-ovs-maas
+    bm-e7-cicd-pike-odl-maas
+    bm-b300-cicd-queens-ovs-maas
+    bm-b300-e7-cicd-pike-ovs-maas
+)
+
+all_parameters=(
+  auditd_enabled
+  barbican_enabled
+  barbican_backend
+  barbican_integration_enabled
+  jenkins_slave_type
+  updates_mirantis_version
+  bmk_enabled
+  ceph_enabled
+  ceph_version
+  cicd_enabled
+  ceph_osd_backend
+  ceph_osd_mode
+  ceph_osd_data_disks
+  ceph_osd_journal_or_block_db_disks
+  ceph_osds_per_device
+  ceph_osd_data_size
+  ceph_osd_journal_size
+  ceph_osd_block_db_size
+  ceph_osd_dmcrypt
+  ceph_osd_node_count
+  ceph_osd_bond_mode
+  ceph_hyper_converged
+  rbd_monitoring_enabled
+  cinder_backup_engine
+  compute_padding_with_zeros
+  designate_enabled
+  designate_backend
+  fluentd_enabled
+  rsync_fernet_rotation
+  gainsight_service_enabled
+  galera_ssl_enabled
+  internal_proxy_enabled
+  ironic_enabled
+  kqueen_custom_mail_enabled
+  kqueen_enabled
+  kubernetes_enabled
+  maas_dhcp_enabled
+  maas_enabled
+  manila_enabled
+  mcp_version
+  nova_vnc_tls_enabled
+  octavia_manager_cluster
+  octavia_amphora_topology
+  octavia_spare_amphora_pool_size
+  opencontrail_api_ssl_enabled
+  opencontrail_enabled
+  opencontrail_version
+  openldap_enabled
+  openscap_enabled
+  openstack_enabled
+  openstack_cluster_size
+  openstack_mysql_x509_enabled
+  openstack_nfv_dpdk_enabled
+  openstack_nfv_sriov_enabled
+  openstack_network_engine
+  openstack_nova_compute_nfv_req_enabled
+  openstack_octavia_enabled
+  openstack_ovs_dvr_enabled
+  openstack_rabbitmq_x509_enabled
+  openstack_version
+  platform
+  rabbitmq_ssl_enabled
+  openstack_rabbitmq_standalone_mode
+  secrets_encryption_enabled
+  stacklight_enabled
+  stacklight_ssl_enabled
+  stacklight_version
+  static_ips_on_deploy_network_enabled
+  tenant_telemetry_enabled
+  upstream_proxy_enabled
+  version
+
+)
+
+function split_string
+{
+  str=$1
+
+  IFS=':' #  set as delimiter
+  read -r id value <<< "$str" # str is read into an array as tokens separated by IFS
+  unset IFS
+  echo ${value}
+}
+
+
+function collect_parameter
+{
+  env=$1
+  parameter_key=$2
+
+  key_value=$(grep -w $parameter_key: tcp_tests/templates/$env/salt-context-cookiecutter*.yaml);
+  echo $(split_string "$key_value")
+}
+
+function clean
+{
+    str=$1
+    # first, strip underscores
+    CLEAN=${str//[^a-zA-Z0-9_.\/\{\}:]/}
+    CLEAN=`echo -n $CLEAN | tr A-Z a-z`
+  echo $CLEAN
+}
+
+function html_color
+{
+    str=$(clean $1)
+  if [ "$str" == "False" -o "$str" == "false" ]; then
+       echo "bgcolor=\"#ff9999\"";
+    fi
+    if [ "$str" == "True" -o "$str" == "true" ]; then
+       echo "bgcolor=\"#99cc99\"";
+    fi
+
+}
+
+function unpack_parameters
+{
+ for param in ${all_parameters[@]}; do
+    echo "<tr>";
+    echo "<td>";
+    echo "$param"
+    echo "</td>";
+    for env in ${list_of_envs[@]}; do
+      value=$(collect_parameter "$env" "$param")
+      echo "<td $(html_color $value)>";
+      echo $(clean $value)
+      echo "</td>";
+    done;
+    echo "</tr>";
+  done;
+}
+
+function unpack_envs
+{
+
+  echo "<tr>";
+  echo "<th>"
+  echo "</th>"
+  for env in ${list_of_envs[@]}; do
+    echo "<th>";
+    echo "$env"
+    echo "</th>";
+  done;
+  echo "</tr>";
+
+}
+
+function write_page
+{
+  cat <<- _EOF_
+      <html>
+          <head>
+              <title>$TITLE</title>
+          </head>
+        <body>
+          <h1>$TITLE</h1>
+          <p>$TIME_STAMP</p>
+          <table border=1 style="border-collapse:collapse;border-spacing:0">
+            $(unpack_envs)
+          $(unpack_parameters)
+          </table>
+        </body>
+      </html>
+_EOF_
+}
+
+
+filename=report.html
+
+write_page > $filename
\ No newline at end of file
diff --git a/jobs/templates/cc_table.yml b/jobs/templates/cc_table.yml
new file mode 100644
index 0000000..d385fa5
--- /dev/null
+++ b/jobs/templates/cc_table.yml
@@ -0,0 +1,22 @@
+- job-template:

+    project-type: freestyle

+    description: '{job-description}'

+    concurrent: false

+    disabled: false

+    name: cc_table_jjb

+

+    publishers:

+      - archive:

+          artifacts: 'report.html'

+          allow-empty: false

+    scm:

+    - git:

+        branches:

+        - '*/master'

+        url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa

+    triggers: []

+    builders:

+    - shell:

+        !include-raw-escape: '../scripts/cc_table.sh'

+

+

diff --git a/jobs/templates/packer-image-system.foundation.yml b/jobs/templates/packer-image-system.foundation.yml
new file mode 100644
index 0000000..8cd2af0
--- /dev/null
+++ b/jobs/templates/packer-image-system.foundation.yml
@@ -0,0 +1,86 @@
+- job-template:

+    project-type: pipeline

+    description: '{job-description}'

+    concurrent: false

+    disabled: false

+    name: packer-image-system.foundation

+    parameters:

+    - string:

+        name: IMAGE_NAME

+        default: system.foundation

+        description: 'Name of the resulting qcow2 image'

+        trim: 'false'

+    - string:

+        name: BUILD_CONFIG_DRIVE_PATH

+        default: tcp_tests/templates/_packer/foundation/config-drive

+        description: 'Relative path in tcp-qa to the directory with meta-data and user-data files'

+        trim: 'false'

+    - string:

+        name: BUILD_PACKER_CONFIG_PATH

+        default: tcp_tests/templates/_packer/foundation/packer.json

+        description: 'Relative path in tcp-qa to the file with packer config (JSON)'

+        trim: 'false'

+    - string:

+        name: BASE_IMAGE_URL

+        default: http://cloud-images.ubuntu.com/releases/xenial/release-20180306/ubuntu-16.04-server-cloudimg-amd64-disk1.img

+        description: 'Base image to build a new image'

+        trim: 'false'

+    - string:

+        name: BASE_IMAGE_MD5

+        default: 566efef1d6f12e7d3a994c2405bdb642

+        description: 'Base image MD5 checksum'

+        trim: 'false'

+    - string:

+        name: PACKER_URL

+        default: https://releases.hashicorp.com/packer/1.4.1/packer_1.4.1_linux_amd64.zip

+        description: 'URL to the zip archive with packer binary, see https://releases.hashicorp.com/packer/'

+        trim: 'false'

+    - string:

+        name: PACKER_ZIP_MD5

+        default: 35cc6dd2a2b2e50e76090197d7c12a90

+        description: 'MD5 of the zip archive with packer binary'

+        trim: 'false'

+    - string:

+        name: OS_AUTH_URL

+        default: https://keystone.ic-eu.ssl.mirantis.net/v3

+        description: 'Openstack keystone catalog endpoint, for example https://10.90.0.80:5000/v3'

+        trim: 'false'

+    - string:

+        name: OS_PROJECT_NAME

+        default: sre-team

+        description: ''

+        trim: 'false'

+    - string:

+        name: OS_USER_DOMAIN_NAME

+        default: ldap_mirantis

+        description: 'OpenStack user domain name'

+        trim: 'false'

+    - string:

+        name: OS_CREDENTIALS

+        default: sre-qa-ci-eu

+        description: 'Jenkins credentials ID with username and password to create a heat stack in OpenStack'

+        trim: 'false'

+    - bool:

+        name: UPLOAD_IMAGE_TO_GLANCE

+        default: True

+        description: 'If True: upload image to glance; if False: store as an artifact'

+        trim: 'false'

+    - string:

+        name: TCP_QA_REFS

+        default:

+        description: |

+          Example: refs/changes/89/411189/36

+          (for now - only one reference allowed)

+        trim: 'false'

+

+    pipeline-scm:

+      lightweight-checkout: false

+      scm:

+      - git:

+          branches:

+          - FETCH_HEAD

+          refspec: ${{TCP_QA_REFS}}

+          url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa

+      script-path: jobs/pipelines/packer-image-create.groovy

+    logrotate:

+      daysToKeep: 30
\ No newline at end of file
diff --git a/jobs/templates/self-deploy-jobs.yaml b/jobs/templates/self-deploy-jobs.yaml
index 5660557..894ec3c 100644
--- a/jobs/templates/self-deploy-jobs.yaml
+++ b/jobs/templates/self-deploy-jobs.yaml
@@ -7,9 +7,9 @@
     name: self-deploy-jobs
     parameters:
     - string:
-        default: 'master'
+        default: 'refs/heads/master'
         description: 'tcp-qa review refspec'
-        name: TCP_QA_REFS
+        name: GERRIT_REFSPEC
         trim: 'true'
     - bool:
         name: DRY_RUN
@@ -34,7 +34,7 @@
       - git:
           branches:
           - FETCH_HEAD
-          refspec: '${{TCP_QA_REFS}}'
+          refspec: '${{GERRIT_REFSPEC}}'
           url: https://gerrit.mcp.mirantis.com/mcp/tcp-qa
       script-path: jobs/pipelines/self-deploy-jobs.groovy
     logrotate:
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index e0ef723..46fc90d 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -23,8 +23,6 @@
 import jinja2
 import paramiko
 import yaml
-import logging
-from multiprocessing import Process, BoundedSemaphore
 from devops.helpers import ssh_client
 
 from tcp_tests import logger
@@ -487,73 +485,3 @@
 
 class TimeoutException(Exception):
     pass
-
-
-pool = list()
-LOG_FORMAT = '%(asctime)s - %(levelname)s %(filename)s:%(lineno)d ' \
-             '/%(processName)s/ -- %(message)s'
-
-
-class Worker:
-    def __init__(self, limit=4, timeout=None):
-        """
-        limit of parallel thread to execute
-        timeout of waiting threads in seconds
-        """
-        LOG.debug("Created multithreading Worker limited by {} "
-                  "threads".format(limit))
-        self._sema = BoundedSemaphore(limit)
-        self.timeout = timeout
-        pass
-
-    @property
-    def pool(self):
-        global pool
-        return pool
-
-    def _worker(self, func, args):
-        try:
-            # FIXME: logging doesn't work
-            memory_handler = logging.handlers.MemoryHandler(
-                50,
-                target=logger.console)
-            formatter = logging.Formatter(fmt=LOG_FORMAT)
-
-            LOG = logging.getLogger("{}{}".format(func, args))
-            LOG.setLevel(logging.DEBUG)
-            memory_handler.setFormatter(formatter)
-            LOG.addHandler(memory_handler)
-            # #######
-            func(*args)
-            # #######
-            memory_handler.close()
-        finally:
-            # allow a new process to be started now that this one is exiting
-            self._sema.release()
-
-    def start(self, func, args, name=None):
-        self._sema.acquire()  # wait to start until another process is finished
-        p = Process(target=self._worker,
-                    args=(func, args),
-                    name=name
-                    )
-        self.pool.append(p)
-        p.start()
-
-    def are_completed(self):
-        for t in self.pool:
-            LOG.info("Joining {}....".format(t))
-            t.join(timeout=self.timeout)
-        return all([not (task.is_alive()) for task in self.pool])
-
-    def clean_pool(self):
-        for i in range(self.pool.__len__()):
-            del self.pool[0]
-
-    def all_tasks_successfully_completed(self):
-        return all([task.exitcode == 0 for task in self.pool])
-
-    def print_failed_tasks(self):
-        return "\n".join([str(task)
-                          for task in self.pool
-                          if task.exitcode != 0])
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 314c641..6dcf615 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -1,9 +1,8 @@
 
 import time
 
-from tcp_tests import logger, settings
+from tcp_tests import logger
 from tcp_tests.helpers.log_helpers import pretty_repr
-from tcp_tests.helpers.utils import Worker
 
 LOG = logger.logger
 
@@ -37,8 +36,6 @@
                 'node_name': 'name of the node to run the command(s)',
                 # Optional:
                 'description': 'string with a readable command description',
-                'parallel': 'bool (True of False) to enable executing these
-                            type of command in multithreading'
                 'retry': {
                     'count': int,  # How many times should be run the command
                                    # until success
@@ -52,7 +49,6 @@
             ...
         ]
         """
-        worker = Worker(limit=settings.THREADS, timeout=3*60)
         for n, step in enumerate(commands):
             # Required fields
             action_cmd = step.get('cmd')
@@ -71,19 +67,7 @@
             log_msg = "\n\n{0}\n{1}".format(msg, '=' * len(msg))
 
             if action_cmd:
-                if step.get('parallel'):
-                    name = description + " on " + step.get("node_name")
-                    worker.start(func=self.execute_command,
-                                 args=(step, msg),
-                                 name=name
-                                 )
-                else:
-                    while not worker.are_completed():
-                        LOG.info("Waiting {}".format(worker.pool))
-                    if worker.all_tasks_successfully_completed():
-                        worker.clean_pool()
-                    self.execute_command(step, msg)
-
+                self.execute_command(step, msg)
             elif action_do:
                 self.command2(step, msg)
             elif action_upload:
@@ -93,12 +77,6 @@
                 LOG.info(log_msg)
                 self.action_download(step)
 
-        while not worker.are_completed():
-            LOG.info("Waiting {}".format(worker.pool))
-
-        assert worker.all_tasks_successfully_completed(), \
-            worker.print_failed_tasks()
-
     def execute_command(self, step, msg, return_res=None):
         # Required fields
         cmd = step.get('cmd')
@@ -112,6 +90,7 @@
         timeout = step.get('timeout', None)
 
         with self.__underlay.remote(node_name=node_name) as remote:
+
             for x in range(retry_count, 0, -1):
                 time.sleep(3)
 
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index f4ff7e0..18548fb 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -72,7 +72,6 @@
 DOCKER_NAME = os.environ.get('DOCKER_NAME',
                              'mirantis/oscore/rally-tempest:latest')
 DOCKER_IMAGES_SL_TAG = os.environ.get('DOCKER_IMAGES_SL_TAG', 'latest')
-THREADS = os.environ.get("THREADS", 10)
 
 PATTERN = os.environ.get('PATTERN', None)
 RUN_TEMPEST = get_var_as_bool('RUN_TEMPEST', False)
diff --git a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/env_add.yml b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/env_add.yml
index 5408fb4..77c518a 100644
--- a/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/env_add.yml
+++ b/tcp_tests/templates/bm-e7-cicd-pike-ovs-maas/env_add.yml
@@ -68,3 +68,25 @@
                 - bond0
                 require_interfaces:
                 - bond0
+        openstack_compute_rack01:
+          params:
+            linux_network_interfaces:
+              br_mesh:
+                address: ${_param:_esc}{_param:tenant_address}
+                enabled: true
+                netmask: ${_param:_esc}{_param:tenant_network_netmask}
+                proto: static
+                type: bridge
+                use_interfaces:
+                  - bond0.${_param:_esc}{_param:tenant_vlan}
+                require_interfaces:
+                  - bond0.${_param:_esc}{_param:tenant_vlan}
+              bond0.tenant_vlan:
+                name: bond0.${_param:_esc}{_param:tenant_vlan}
+                enabled: true
+                proto: manual
+                type: vlan
+                use_interfaces:
+                  - bond0
+                require_interfaces:
+                  - bond0
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
index 4cefd30..e8dec92 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-e7-cicd-pike-ovs-maas.yaml
@@ -77,3 +77,32 @@
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 5}
   skip_fail: false
+
+- description: "Upload env_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: env_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Upload infra_config_nodes_add.yml to cfg01.{{ DOMAIN_NAME }}"
+  upload:
+    local_path:  {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+    local_filename: infra_config_nodes_add.yml
+    remote_path: /root/environment/
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 10}
+  skip_fail: false
+
+- description: "Modify model to add interfaces to kvm nodes"
+  cmd: |
+    set -e;
+    set -x;
+    . /root/venv-reclass-tools/bin/activate;
+    reclass-tools merge-context /root/environment/env_add.yml /srv/salt/reclass/classes/environment/{{ LAB_CONFIG_NAME }}/init.yml;
+    reclass-tools merge-context /root/environment/infra_config_nodes_add.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config/nodes.yml;
+  node_name: {{ HOSTNAME_CFG01 }}
+  retry: {count: 1, delay: 5}
+  skip_fail: false
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index cc38df3..1f43386 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -808,7 +808,6 @@
 
   {%- if salt_roles %}
 - description: Configure salt-minion on {{ ssh['node_name'] }}
-  parallel: True
   cmd: |
     set -ex;
     [ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d;