Merge "Upgrade k8s addons after control plane upgrade"
diff --git a/cvp-func.groovy b/cvp-func.groovy
index 120bb9d..0c657a5 100644
--- a/cvp-func.groovy
+++ b/cvp-func.groovy
@@ -26,7 +26,6 @@
 def saltMaster
 def artifacts_dir = 'validation_artifacts/'
 def remote_artifacts_dir = '/root/qa_results/'
-def container_name = "${env.JOB_NAME}"
 
 node() {
     try{
@@ -39,7 +38,7 @@
             if (!keystone_creds) {
                 keystone_creds = validate._get_keystone_creds_v2(saltMaster)
             }
-            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
             validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO, TEMPEST_ENDPOINT_TYPE)
         }
 
diff --git a/cvp-ha.groovy b/cvp-ha.groovy
index 649ac6a..414ab46 100644
--- a/cvp-ha.groovy
+++ b/cvp-ha.groovy
@@ -28,7 +28,6 @@
 def saltMaster
 def artifacts_dir = 'validation_artifacts/'
 def remote_artifacts_dir = '/root/qa_results/'
-def container_name = "${env.JOB_NAME}"
 def current_target_node = null
 def first_node = null
 def tempest_result = ''
@@ -45,7 +44,7 @@
                 if (!keystone_creds) {
                     keystone_creds = validate._get_keystone_creds_v2(saltMaster)
                 }
-                validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+                validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
                 validate.configureContainer(saltMaster, TEMPEST_TARGET_NODE, PROXY, TOOLS_REPO, TEMPEST_REPO)
             }
 
diff --git a/cvp-perf.groovy b/cvp-perf.groovy
index 7938572..74c9a63 100644
--- a/cvp-perf.groovy
+++ b/cvp-perf.groovy
@@ -21,7 +21,6 @@
 
 def artifacts_dir = 'validation_artifacts/'
 def remote_artifacts_dir = '/root/qa_results/'
-def container_name = "${env.JOB_NAME}"
 def saltMaster
 
 node() {
@@ -35,7 +34,7 @@
             if (!keystone_creds) {
                 keystone_creds = validate._get_keystone_creds_v2(saltMaster)
             }
-            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, container_name, keystone_creds)
+            validate.runContainer(saltMaster, TARGET_NODE, TEST_IMAGE, 'cvp', keystone_creds)
             validate.configureContainer(saltMaster, TARGET_NODE, PROXY, TOOLS_REPO, "")
         }
 
diff --git a/generate-cookiecutter-products.groovy b/generate-cookiecutter-products.groovy
index 7609103..25473fb 100644
--- a/generate-cookiecutter-products.groovy
+++ b/generate-cookiecutter-products.groovy
@@ -29,8 +29,6 @@
             def clusterDomain = templateContext.default_context.cluster_domain
             def clusterName = templateContext.default_context.cluster_name
             def saltMaster = templateContext.default_context.salt_master_hostname
-            def localRepositories = templateContext.default_context.local_repositories.toBoolean()
-            def offlineDeployment = templateContext.default_context.offline_deployment.toBoolean()
             def cutterEnv = "${env.WORKSPACE}/cutter"
             def jinjaEnv = "${env.WORKSPACE}/jinja"
             def outputDestination = "${modelEnv}/classes/cluster/${clusterName}"
@@ -99,63 +97,9 @@
                 git.commitGitChanges(modelEnv, "Added new shared reclass submodule", "${user}@localhost", "${user}")
             }
 
-            def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-            for (product in productList) {
-
-                // get templateOutputDir and productDir
-                templateOutputDir = "${env.WORKSPACE}/output/${product}"
-                productDir = product
-                templateDir = "${templateEnv}/cluster_product/${productDir}"
-                // Bw for 2018.8.1 and older releases
-                if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
-                    common.warningMsg("Old release detected! productDir => 'stacklight2' ")
-                    productDir = "stacklight2"
-                    templateDir = "${templateEnv}/cluster_product/${productDir}"
-                }
-
-                if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                    && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
-                    common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
-                    sh "rm -rf ${templateOutputDir} || true"
-                    sh "mkdir -p ${templateOutputDir}"
-                    sh "mkdir -p ${outputDestination}"
-
-                    python.setupCookiecutterVirtualenv(cutterEnv)
-                    python.buildCookiecutterTemplate(templateDir, COOKIECUTTER_TEMPLATE_CONTEXT, templateOutputDir, cutterEnv, templateBaseDir)
-                    sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-                } else {
-                    common.warningMsg("Product " + product + " is disabled")
-                }
-            }
-
-            if (localRepositories && !offlineDeployment) {
-                def aptlyModelUrl = templateContext.default_context.local_model_url
-                dir(path: modelEnv) {
-                    ssh.agentSh "git submodule add \"${aptlyModelUrl}\" \"classes/cluster/${clusterName}/cicd/aptly\""
-                    if (!(mcpVersion in ["nightly", "testing", "stable"])) {
-                        ssh.agentSh "cd \"classes/cluster/${clusterName}/cicd/aptly\";git fetch --tags;git checkout ${mcpVersion}"
-                    }
-                }
-            }
-
-            stage('Generate new SaltMaster node') {
-                def nodeFile = "${modelEnv}/nodes/${saltMaster}.${clusterDomain}.yml"
-                def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
-  _param:
-    linux_system_codename: xenial
-    reclass_data_revision: master
-  linux:
-    system:
-      name: ${saltMaster}
-      domain: ${clusterDomain}
-    """
-                sh "mkdir -p ${modelEnv}/nodes/"
-                writeFile(file: nodeFile, text: nodeString)
-
+            stage('Generate model') {
+                python.setupCookiecutterVirtualenv(cutterEnv)
+                python.generateModel(COOKIECUTTER_TEMPLATE_CONTEXT, 'default_context', saltMaster, cutterEnv, modelEnv, templateEnv, false)
                 git.commitGitChanges(modelEnv, "Create model ${clusterName}", "${user}@localhost", "${user}")
             }
 
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index c98ff17..c20c3a0 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -20,70 +20,110 @@
         }
 
         stage('Restore') {
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('neutron-server service already stopped')
+            // get opencontrail version
+            def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
+            def contrailVersion = _pillar['return'][0].values()[0]
+            common.infoMsg("Contrail version is ${contrailVersion}")
+            if (contrailVersion >= 4) {
+                common.infoMsg("There will be steps for OC4.0 restore")
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+                } catch (Exception err) {
+                    common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+                } catch (Exception err) {
+                    common.warningMsg('cassandra data already removed? ' + err.getMessage())
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+                } catch (Exception err) {
+                    common.warningMsg('contrail-database already started? ' + err.getMessage())
+                }
+                // remove restore-already-happenned file if any is present
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm  /var/backups/cassandra/dbrestored')
+                } catch (Exception err) {
+                    common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
+                }
+                // perform actual backup
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+                sleep(5)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+                // the lovely wait-60-seconds mantra before restarting supervisor-database service
+                sleep(60)
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+                // another mantra
+                sleep(60)
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
             }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-config service already stopped')
+            else {
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('neutron-server service already stopped')
+                }
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('Supervisor-config service already stopped')
+                }
+                // Cassandra restore section
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('Supervisor-database service already stopped')
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Directory already exists')
+                }
+
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Files were already moved')
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+                } catch (Exception er) {
+                    common.warningMsg('Directory already empty')
+                }
+
+                _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+                def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+                common.infoMsg("Backup directory is ${backupDir}")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
+
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+
+                // wait until supervisor-database service is up
+                salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                sleep(60)
+
+                // performs restore
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+                sleep(5)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+
+                // wait until supervisor-database service is up
+                salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                sleep(5)
+
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
+
+                // wait until contrail-status is up
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
             }
-            // Cassandra restore section
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-database service already stopped')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-
-            _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(60)
-
-            // performs restore
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-            sleep(5)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(5)
-
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
-
-            // wait until contrail-status is up
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-            
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
         }
     }
 }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index c09c572..0bab394 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -17,7 +17,7 @@
 git = new com.mirantis.mk.Git()
 python = new com.mirantis.mk.Python()
 
-def extraVarsYAML = env.EXTRA_VARIABLES_YAML ?: false
+extraVarsYAML = env.EXTRA_VARIABLES_YAML.trim() ?: ''
 if (extraVarsYAML) {
     common.mergeEnv(env, extraVarsYAML)
 }
@@ -59,79 +59,6 @@
 chunkJobName = "test-mk-cookiecutter-templates-chunk"
 testModelBuildsData = [:]
 
-def generateSaltMaster(modEnv, clusterDomain, clusterName) {
-    def nodeFile = "${modEnv}/nodes/cfg01.${clusterDomain}.yml"
-    def nodeString = """classes:
-- cluster.${clusterName}.infra.config
-parameters:
-    _param:
-        linux_system_codename: xenial
-        reclass_data_revision: master
-    linux:
-        system:
-            name: cfg01
-            domain: ${clusterDomain}
-"""
-    sh "mkdir -p ${modEnv}/nodes/"
-    println "Create file ${nodeFile}"
-    writeFile(file: nodeFile, text: nodeString)
-}
-
-/**
- *
- * @param contextFile - path to `contexts/XXX.yaml file`
- * @param virtualenv - pyvenv with CC and dep's
- * @param templateEnvDir - root of CookieCutter
- * @return
- */
-
-def generateModel(contextFile, virtualenv, templateEnvDir) {
-    def modelEnv = "${templateEnvDir}/model"
-    def basename = common.GetBaseName(contextFile, '.yml')
-    def generatedModel = "${modelEnv}/${basename}"
-    def content = readFile(file: "${templateEnvDir}/contexts/${contextFile}")
-    def templateContext = readYaml text: content
-    def clusterDomain = templateContext.default_context.cluster_domain
-    def clusterName = templateContext.default_context.cluster_name
-    def outputDestination = "${generatedModel}/classes/cluster/${clusterName}"
-    def templateBaseDir = templateEnvDir
-    def templateDir = "${templateEnvDir}/dir"
-    def templateOutputDir = templateBaseDir
-    dir(templateEnvDir) {
-        sh(script: "rm -rf ${generatedModel} || true")
-        common.infoMsg("Generating model from context ${contextFile}")
-        def productList = ["infra", "cicd", "opencontrail", "kubernetes", "openstack", "oss", "stacklight", "ceph"]
-        for (product in productList) {
-
-            // get templateOutputDir and productDir
-            templateOutputDir = "${templateEnvDir}/output/${product}"
-            productDir = product
-            templateDir = "${templateEnvDir}/cluster_product/${productDir}"
-            // Bw for 2018.8.1 and older releases
-            if (product.startsWith("stacklight") && (!fileExists(templateDir))) {
-                common.warningMsg("Old release detected! productDir => 'stacklight2' ")
-                productDir = "stacklight2"
-                templateDir = "${templateEnvDir}/cluster_product/${productDir}"
-            }
-            if (product == "infra" || (templateContext.default_context["${product}_enabled"]
-                && templateContext.default_context["${product}_enabled"].toBoolean())) {
-
-                common.infoMsg("Generating product " + product + " from " + templateDir + " to " + templateOutputDir)
-
-                sh "rm -rf ${templateOutputDir} || true"
-                sh "mkdir -p ${templateOutputDir}"
-                sh "mkdir -p ${outputDestination}"
-
-                python.buildCookiecutterTemplate(templateDir, content, templateOutputDir, virtualenv, templateBaseDir)
-                sh "mv -v ${templateOutputDir}/${clusterName}/* ${outputDestination}"
-            } else {
-                common.warningMsg("Product " + product + " is disabled")
-            }
-        }
-        generateSaltMaster(generatedModel, clusterDomain, clusterName)
-    }
-}
-
 def getAndUnpackNodesInfoArtifact(jobName, copyTo, build) {
     return {
         dir(copyTo) {
@@ -212,7 +139,9 @@
 def StepGenerateModels(_contextFileList, _virtualenv, _templateEnvDir) {
     return {
         for (contextFile in _contextFileList) {
-            generateModel(contextFile, _virtualenv, _templateEnvDir)
+            def basename = common.GetBaseName(contextFile, '.yml')
+            def context = readFile(file: "${_templateEnvDir}/contexts/${contextFile}")
+            python.generateModel(context, basename, 'cfg01', _virtualenv, "${_templateEnvDir}/model", _templateEnvDir)
         }
     }
 }
@@ -221,8 +150,13 @@
     // Simple function, to check and define branch-around variables
     // In general, simply make transition updates for non-master branch
     // based on magic logic
-    def message = '<br/>'
+    def newline = '<br/>'
+    def messages = []
     if (env.GERRIT_PROJECT) {
+        messages.add("<font color='red'>GerritTrigger detected! We are in auto-mode:</font>")
+        messages.add("Test env variables has been changed:")
+        messages.add("COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}")
+        messages.add("RECLASS_MODEL_BRANCH => ${gerritDataRS['gerritBranch']}")
         // TODO are we going to have such branches?
         if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
             gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
@@ -233,20 +167,16 @@
         if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
             gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataRS['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = message + "<br/>RECLASS_SYSTEM_GIT_REF =>${gerritDataRS['gerritRefSpec']}"
+            messages.add("RECLASS_SYSTEM_GIT_REF => ${gerritDataRS['gerritRefSpec']}")
         } else if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
             gerritDataCC['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataCC['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = message + "<br/>COOKIECUTTER_TEMPLATE_REF =>${gerritDataCC['gerritRefSpec']}"
+            messages.add("COOKIECUTTER_TEMPLATE_REF => ${gerritDataCC['gerritRefSpec']}")
         } else {
             error("Unsuported gerrit-project triggered:${env.GERRIT_PROJECT}")
         }
-        message = "<font color='red'>GerritTrigger detected! We are in auto-mode:</font>" +
-            "<br/>Test env variables has been changed:" +
-            "<br/>COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}" +
-            "<br/>RECLASS_MODEL_BRANCH=> ${gerritDataRS['gerritBranch']}" + message
     } else {
-        message = "<font color='red'>Non-gerrit trigger run detected!</font>" + message
+        messages.add("<font color='red'>Non-gerrit trigger run detected!</font>")
     }
     gerritDataCCHEAD << gerritDataCC
     gerritDataCCHEAD['gerritRefSpec'] = null
@@ -262,8 +192,9 @@
     if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
         common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
         testDistribRevision = 'proposed'
-        message = "<br/>DISTRIB_REVISION =>${testDistribRevision}" + message
+        messages.add("DISTRIB_REVISION => ${testDistribRevision}")
     }
+    def message = messages.join(newline) + newline
     currentBuild.description = currentBuild.description ? message + currentBuild.description : message
 }
 
@@ -293,25 +224,29 @@
     // tar.gz
     // ├── contexts
     // │   └── ceph.yml
-    // ├── ${reclassDirName} <<< reclass system
+    // ├── classes-system <<< reclass system
     // ├── model
     // │   └── ceph       <<< from `context basename`
     // │       ├── classes
     // │       │   ├── cluster
-    // │       │   └── system -> ../../../${reclassDirName}
+    // │       │   └── system -> ../../../classes-system
     // │       └── nodes
     // │           └── cfg01.ceph-cluster-domain.local.yml
+    def archiveBaseName = common.GetBaseName(archiveName, '.tar.gz')
+    def classesSystemDir = 'classes-system'
+    // copy reclass system under envPath with -R and trailing / to support symlinks direct copy
+    sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
     dir(envPath) {
         for (String context : contextList) {
             def basename = common.GetBaseName(context, '.yml')
-            dir("${envPath}/model/${basename}") {
-                sh(script: "mkdir -p classes/; ln -sfv ../../../../${common.GetBaseName(archiveName, '.tar.gz')} classes/system ")
+            dir("${envPath}/model/${basename}/classes") {
+                sh(script: "ln -sfv ../../../${classesSystemDir} system ")
             }
         }
         // replace all generated passwords/secrets/keys with hardcode value for infra/secrets.yaml
         replaceGeneratedValues("${envPath}/model")
         // Save all models and all contexts. Warning! `h` flag must be used!
-        sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' model contexts", returnStatus: true)
+        sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' contexts model ${classesSystemDir}", returnStatus: true)
     }
     archiveArtifacts artifacts: archiveName
 }
@@ -402,10 +337,12 @@
             stage("Compare cluster lvl Head/Patched") {
                 // Compare patched and HEAD reclass pillars
                 compareRoot = "${env.WORKSPACE}/cluster_compare/"
+                // extract archive and drop all copied classes/system before comparing
                 sh(script: """
                    mkdir -pv ${compareRoot}/new ${compareRoot}/old
                    tar -xzf ${patchedReclassArtifactName}  --directory ${compareRoot}/new
                    tar -xzf ${headReclassArtifactName}  --directory ${compareRoot}/old
+                   find ${compareRoot} -name classes -type d -exec rm -rf '{}/system' \\;
                    """)
                 common.warningMsg('infra/secrets.yml has been skipped from compare!')
                 result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml|\\.git\'")
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
index c57e67d..b886467 100644
--- a/test-openscap-pipeline.groovy
+++ b/test-openscap-pipeline.groovy
@@ -3,13 +3,16 @@
  * Run openscap xccdf evaluation on given nodes
  *
  * Expected parametes:
+ *  OPENSCAP_TEST_TYPE          Type of OpenSCAP evaluation to run, either 'xccdf' or 'oval'
  *  SALT_MASTER_URL             Full Salt API address.
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API.
  *
- *  XCCDF_BENCHMARKS_DIR        The XCCDF benchmarks base directory (default /usr/share/xccdf-benchmarks/mirantis/)
+ *  XCCDF_BENCHMARKS_DIR        Base directory for XCCDF benchmarks (default /usr/share/xccdf-benchmarks/mirantis/)
+ *                              or OVAL devinitions (default /usr/share/oval-definitions/mirantis/)
  *  XCCDF_BENCHMARKS            List of pairs XCCDF benchmark filename and corresponding profile separated with ','
- *                                  these pairs are separated with semicolon.
- *                                  (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile)
+ *                                  these pairs are separated with semicolon
+ *                                  (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile).
+ *                              For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
  *  XCCDF_VERSION               The XCCDF version (default 1.2)
  *  XCCDF_TAILORING_ID          The tailoring id (default None)
  *
@@ -118,19 +121,35 @@
 
 
 node('python') {
-    def pepperEnv = 'pepperEnv'
-
-    // XCCDF related variables
-    def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
-    def benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/'
-    def xccdfVersion = XCCDF_VERSION ?: '1.2'
-    def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
-    def targetServers = TARGET_SERVERS ?: '*'
-
     def salt = new com.mirantis.mk.Salt()
     def python = new com.mirantis.mk.Python()
     def common = new com.mirantis.mk.Common()
     def http = new com.mirantis.mk.Http()
+    def validate = new com.mirantis.mcp.Validate()
+
+    def pepperEnv = 'pepperEnv'
+
+    def benchmarkType = OPENSCAP_TEST_TYPE ?: 'xccdf'
+    def reportType
+    def benchmarksDir
+
+    switch (benchmarkType) {
+        case 'xccdf':
+            reportType = 'openscap';
+            benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/';
+            break;
+        case 'oval':
+            reportType = 'cve';
+            benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/oval-definitions/mirantis/';
+            break;
+        default:
+            throw new Exception('Unsupported value for OPENSCAP_TEST_TYPE, must be "oval" or "xccdf".')
+    }
+    // XCCDF related variables
+    def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+    def xccdfVersion = XCCDF_VERSION ?: '1.2'
+    def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+    def targetServers = TARGET_SERVERS ?: '*'
 
     // To have an ability to work in heavy concurrency conditions
     def scanUUID = UUID.randomUUID().toString()
@@ -146,7 +165,7 @@
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
-    stage ('Run openscap xccdf evaluation and attempt to upload the results to a dashboard') {
+    stage ('Run openscap evaluation and attempt to upload the results to a dashboard') {
         liveMinions = salt.getMinions(pepperEnv, targetServers)
 
         if (liveMinions.isEmpty()) {
@@ -161,49 +180,69 @@
         }
 
         def reportId
-        for (minion in liveMinions) {
+        def lastError
+        // Iterate oscap evaluation over the benchmarks
+        for (benchmark in benchmarksAndProfilesArray) {
+            def (benchmarkFilePath, profileName) = benchmark.tokenize(',').collect({it.trim()})
 
-            // Iterate oscap evaluation over the benchmarks
-            for (benchmark in benchmarksAndProfilesArray) {
-                def (benchmarkFilePath, profile) = benchmark.tokenize(',').collect({it.trim()})
+            // Remove extension from the benchmark name
+            def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
 
-                // Remove extension from the benchmark name
-                def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+            // Get benchmark name
+            def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
 
-                // Get benchmark name
-                def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+            // And build resultsDir based on this path
+            def resultsDir = "${resultsBaseDir}/${benchmarkName}"
+            if (profileName) {
+                resultsDir = "${resultsDir}/${profileName}"
+            }
 
-                // And build resultsDir based on this path
-                def resultsDir = "${resultsBaseDir}/${benchmarkPathWithoutExtension}"
+            def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
 
-                def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+            // Evaluate the benchmark on all minions at once
+            salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
+                benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
+                "profile=${profileName}", "xccdf_version=${xccdfVersion}",
+                "tailoring_id=${xccdfTailoringId}"
+            ])
 
+            salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
+
+            // fetch and store results one by one
+            for (minion in liveMinions) {
                 def nodeShortName = minion.tokenize('.')[0]
+                def localResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}"
 
-                def archiveName = "${scanUUID}_${nodeShortName}_${benchmarkName}.tar"
+                fileContentBase64 = validate.getFileContentEncoded(pepperEnv, minion, "/tmp/${scanUUID}.tar.xz")
+                writeFile file: "${scanUUID}.base64", text: fileContentBase64
 
-                // Evaluate the benchmark
-                salt.runSaltProcessStep(pepperEnv, minion, 'oscap.eval', [
-                    'xccdf', benchmarkFile, "results_dir=${resultsDir}",
-                    "profile=${profile}", "xccdf_version=${xccdfVersion}",
-                    "tailoring_id=${xccdfTailoringId}"
-                ])
+                sh "mkdir -p ${localResultsDir}"
+                sh "base64 -d ${scanUUID}.base64 | tar -xJ --strip-components 1 --directory ${localResultsDir}"
+                sh "rm -f ${scanUUID}.base64"
+            }
 
-                salt.cmdRun(pepperEnv, minion, "tar -cf /tmp/${archiveName} -C ${resultsBaseDir} .")
-                fileContents = salt.cmdRun(pepperEnv, minion, "cat /tmp/${archiveName}", true, null, false)['return'][0].values()[0].replaceAll('Salt command execution success', '')
+            // Remove archives which is not needed anymore
+            salt.runSaltProcessStep(pepperEnv, targetServers, 'file.remove', "/tmp/${scanUUID}.tar.xz")
 
-                sh "mkdir -p ${artifactsDir}/${scanUUID}/${nodeShortName}"
-                writeFile file: "${archiveName}", text: fileContents
-                sh "tar --strip-components 1 -xf ${archiveName} --directory ${artifactsDir}/${scanUUID}/${nodeShortName}; rm -f ${archiveName}"
-
-                // Remove archive which is not needed anymore
-                salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', "/tmp/${archiveName}")
+            // publish results one by one
+            for (minion in liveMinions) {
+                def nodeShortName = minion.tokenize('.')[0]
+                def benchmarkResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}/${benchmarkName}"
+                if (profileName) {
+                    benchmarkResultsDir = "${benchmarkResultsDir}/${profileName}"
+                }
 
                 // Attempt to upload the scanning results to the dashboard
                 if (UPLOAD_TO_DASHBOARD.toBoolean()) {
                     if (common.validInputParam('DASHBOARD_API_URL')) {
                         def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
-                        reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, "openscap", reportId, salt.getFileContent(pepperEnv, minion, "${resultsDir}/results.json"))
+                        try {
+                            def nodeResults = readFile "${benchmarkResultsDir}/results.json"
+                            reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, reportType, reportId, nodeResults)
+                            common.infoMsg("Report ID is ${reportId}.")
+                        } catch (Exception e) {
+                            lastError = e
+                        }
                     } else {
                         throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
                     }
@@ -216,6 +255,10 @@
 
         // Archive the build output artifacts
         archiveArtifacts artifacts: "*.xz"
+        if (lastError) {
+            common.infoMsg('Uploading some results to the dashboard report ${reportId} failed. Raising last error.')
+            throw lastError
+        }
     }
 
 /*  // Will be implemented later