Merge "Fix issue with wrong container name"
diff --git a/restore-cassandra.groovy b/restore-cassandra.groovy
index c98ff17..c20c3a0 100644
--- a/restore-cassandra.groovy
+++ b/restore-cassandra.groovy
@@ -20,70 +20,110 @@
         }
 
         stage('Restore') {
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('neutron-server service already stopped')
+            // get opencontrail version
+            def _pillar = salt.getPillar(pepperEnv, "I@opencontrail:control", '_param:opencontrail_version')
+            def contrailVersion = _pillar['return'][0].values()[0]
+            common.infoMsg("Contrail version is ${contrailVersion}")
+            if (contrailVersion >= 4) {
+                common.infoMsg("There will be steps for OC4.0 restore")
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller systemctl stop contrail-database' )
+                } catch (Exception err) {
+                    common.warningMsg('contrail-database already stopped? ' + err.getMessage())
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', 'doctrail controller bash -c "for f in $(ls /var/lib/cassandra/); do rm -r /var/lib/cassandra/$f; done"')
+                } catch (Exception err) {
+                    common.warningMsg('cassandra data already removed? ' + err.getMessage())
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'doctrail controller systemctl start contrail-database' )
+                } catch (Exception err) {
+                    common.warningMsg('contrail-database already started? ' + err.getMessage())
+                }
+                // remove restore-already-happenned file if any is present
+                try {
+                    salt.cmdRun(pepperEnv, 'I@cassandra:backup:client', 'rm  /var/backups/cassandra/dbrestored')
+                } catch (Exception err) {
+                    common.warningMsg('/var/backups/cassandra/dbrestored not present? ' + err.getMessage())
+                }
+                // perform actual backup
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+                sleep(5)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, [], true, 5)
+                // the lovely wait-60-seconds mantra before restarting supervisor-database service
+                sleep(60)
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller systemctl restart contrail-database")
+                // another mantra
+                sleep(60)
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "doctrail controller contrail-status")
             }
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-config service already stopped')
+            else {
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.stop', ['neutron-server'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('neutron-server service already stopped')
+                }
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-config'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('Supervisor-config service already stopped')
+                }
+                // Cassandra restore section
+                try {
+                    salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
+                } catch (Exception er) {
+                    common.warningMsg('Supervisor-database service already stopped')
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Directory already exists')
+                }
+
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
+                } catch (Exception er) {
+                    common.warningMsg('Files were already moved')
+                }
+                try {
+                    salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
+                } catch (Exception er) {
+                    common.warningMsg('Directory already empty')
+                }
+
+                _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
+                def backupDir = _pillar['return'][0].values()[0] ?: '/var/backups/cassandra'
+                common.infoMsg("Backup directory is ${backupDir}")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backupDir}/dbrestored"], null, true)
+
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
+
+                // wait until supervisor-database service is up
+                salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                sleep(60)
+
+                // performs restore
+                salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
+                salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+                sleep(5)
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
+
+                // wait until supervisor-database service is up
+                salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
+                sleep(5)
+
+                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
+                salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
+
+                // wait until contrail-status is up
+                salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
+
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
+                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
             }
-            // Cassandra restore section
-            try {
-                salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.stop', ['supervisor-database'], null, true)
-            } catch (Exception er) {
-                common.warningMsg('Supervisor-database service already stopped')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mkdir -p /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Directory already exists')
-            }
-
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "mv /var/lib/cassandra/* /root/cassandra/cassandra.bak")
-            } catch (Exception er) {
-                common.warningMsg('Files were already moved')
-            }
-            try {
-                salt.cmdRun(pepperEnv, 'I@opencontrail:control', "rm -rf /var/lib/cassandra/*")
-            } catch (Exception er) {
-                common.warningMsg('Directory already empty')
-            }
-
-            _pillar = salt.getPillar(pepperEnv, "I@cassandra:backup:client", 'cassandra:backup:backup_dir')
-            backup_dir = _pillar['return'][0].values()[0]
-            if(backup_dir == null || backup_dir.isEmpty()) { backup_dir='/var/backups/cassandra' }
-            print(backup_dir)
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'file.remove', ["${backup_dir}/dbrestored"], null, true)
-
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'service.start', ['supervisor-database'], null, true)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(60)
-
-            // performs restore
-            salt.enforceState(pepperEnv, 'I@cassandra:backup:client', "cassandra.backup")
-            salt.runSaltProcessStep(pepperEnv, 'I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-            sleep(5)
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'system.reboot', null, null, true, 5)
-
-            // wait until supervisor-database service is up
-            salt.commandStatus(pepperEnv, 'I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control and not I@cassandra:backup:client', 'service supervisor-database status', 'running')
-            sleep(5)
-
-            salt.runSaltProcessStep(pepperEnv, 'I@opencontrail:control', 'service.restart', ['supervisor-database'], null, true)
-            salt.runSaltProcessStep(pepperEnv, 'I@neutron:server', 'service.start', ['neutron-server'], null, true)
-
-            // wait until contrail-status is up
-            salt.commandStatus(pepperEnv, 'I@opencontrail:control', "contrail-status | grep -v == | grep -v \'disabled on boot\' | grep -v nodemgr | grep -v active | grep -v backup", null, false)
-            
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "nodetool status")
-            salt.cmdRun(pepperEnv, 'I@opencontrail:control', "contrail-status")
         }
     }
 }
diff --git a/test-cookiecutter-reclass.groovy b/test-cookiecutter-reclass.groovy
index c09c572..8826bc1 100644
--- a/test-cookiecutter-reclass.groovy
+++ b/test-cookiecutter-reclass.groovy
@@ -221,8 +221,13 @@
     // Simple function, to check and define branch-around variables
     // In general, simply make transition updates for non-master branch
     // based on magic logic
-    def message = '<br/>'
+    def newline = '<br/>'
+    def messages = []
     if (env.GERRIT_PROJECT) {
+        messages.add("<font color='red'>GerritTrigger detected! We are in auto-mode:</font>")
+        messages.add("Test env variables has been changed:")
+        messages.add("COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}")
+        messages.add("RECLASS_MODEL_BRANCH => ${gerritDataRS['gerritBranch']}")
         // TODO are we going to have such branches?
         if (!['nightly', 'testing', 'stable', 'proposed', 'master'].contains(env.GERRIT_BRANCH)) {
             gerritDataCC['gerritBranch'] = env.GERRIT_BRANCH
@@ -233,20 +238,16 @@
         if (env.GERRIT_PROJECT == 'salt-models/reclass-system') {
             gerritDataRS['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataRS['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = message + "<br/>RECLASS_SYSTEM_GIT_REF =>${gerritDataRS['gerritRefSpec']}"
+            messages.add("RECLASS_SYSTEM_GIT_REF => ${gerritDataRS['gerritRefSpec']}")
         } else if (env.GERRIT_PROJECT == 'mk/cookiecutter-templates') {
             gerritDataCC['gerritRefSpec'] = env.GERRIT_REFSPEC
             gerritDataCC['GERRIT_CHANGE_NUMBER'] = env.GERRIT_CHANGE_NUMBER
-            message = message + "<br/>COOKIECUTTER_TEMPLATE_REF =>${gerritDataCC['gerritRefSpec']}"
+            messages.add("COOKIECUTTER_TEMPLATE_REF => ${gerritDataCC['gerritRefSpec']}")
         } else {
             error("Unsuported gerrit-project triggered:${env.GERRIT_PROJECT}")
         }
-        message = "<font color='red'>GerritTrigger detected! We are in auto-mode:</font>" +
-            "<br/>Test env variables has been changed:" +
-            "<br/>COOKIECUTTER_TEMPLATE_BRANCH => ${gerritDataCC['gerritBranch']}" +
-            "<br/>RECLASS_MODEL_BRANCH=> ${gerritDataRS['gerritBranch']}" + message
     } else {
-        message = "<font color='red'>Non-gerrit trigger run detected!</font>" + message
+        messages.add("<font color='red'>Non-gerrit trigger run detected!</font>")
     }
     gerritDataCCHEAD << gerritDataCC
     gerritDataCCHEAD['gerritRefSpec'] = null
@@ -262,8 +263,9 @@
     if (!common.checkRemoteBinary([apt_mk_version: testDistribRevision]).linux_system_repo_url) {
         common.errorMsg("Binary release: ${testDistribRevision} not exist. Fallback to 'proposed'! ")
         testDistribRevision = 'proposed'
-        message = "<br/>DISTRIB_REVISION =>${testDistribRevision}" + message
+        messages.add("DISTRIB_REVISION => ${testDistribRevision}")
     }
+    def message = messages.join(newline) + newline
     currentBuild.description = currentBuild.description ? message + currentBuild.description : message
 }
 
@@ -293,25 +295,29 @@
     // tar.gz
     // ├── contexts
     // │   └── ceph.yml
-    // ├── ${reclassDirName} <<< reclass system
+    // ├── classes-system <<< reclass system
     // ├── model
     // │   └── ceph       <<< from `context basename`
     // │       ├── classes
     // │       │   ├── cluster
-    // │       │   └── system -> ../../../${reclassDirName}
+    // │       │   └── system -> ../../../classes-system
     // │       └── nodes
     // │           └── cfg01.ceph-cluster-domain.local.yml
+    def archiveBaseName = common.GetBaseName(archiveName, '.tar.gz')
+    def classesSystemDir = 'classes-system'
+    // copy reclass system under envPath with -R and trailing / to support symlinks direct copy
+    sh("cp -R ${archiveBaseName}/ ${envPath}/${classesSystemDir}")
     dir(envPath) {
         for (String context : contextList) {
             def basename = common.GetBaseName(context, '.yml')
-            dir("${envPath}/model/${basename}") {
-                sh(script: "mkdir -p classes/; ln -sfv ../../../../${common.GetBaseName(archiveName, '.tar.gz')} classes/system ")
+            dir("${envPath}/model/${basename}/classes") {
+                sh(script: "ln -sfv ../../../${classesSystemDir} system ")
             }
         }
         // replace all generated passwords/secrets/keys with hardcode value for infra/secrets.yaml
         replaceGeneratedValues("${envPath}/model")
         // Save all models and all contexts. Warning! `h` flag must be used!
-        sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' model contexts", returnStatus: true)
+        sh(script: "set -ex; tar -czhf ${env.WORKSPACE}/${archiveName} --exclude='*@tmp' contexts model ${classesSystemDir}", returnStatus: true)
     }
     archiveArtifacts artifacts: archiveName
 }
@@ -402,10 +408,12 @@
             stage("Compare cluster lvl Head/Patched") {
                 // Compare patched and HEAD reclass pillars
                 compareRoot = "${env.WORKSPACE}/cluster_compare/"
+                // extract archive and drop all copied classes/system before comparing
                 sh(script: """
                    mkdir -pv ${compareRoot}/new ${compareRoot}/old
                    tar -xzf ${patchedReclassArtifactName}  --directory ${compareRoot}/new
                    tar -xzf ${headReclassArtifactName}  --directory ${compareRoot}/old
+                   find ${compareRoot} -name classes -type d -exec rm -rf '{}/system' \\;
                    """)
                 common.warningMsg('infra/secrets.yml has been skipped from compare!')
                 result = '\n' + common.comparePillars(compareRoot, env.BUILD_URL, "-Ev \'infra/secrets.yml|\\.git\'")
diff --git a/test-openscap-pipeline.groovy b/test-openscap-pipeline.groovy
index c57e67d..b886467 100644
--- a/test-openscap-pipeline.groovy
+++ b/test-openscap-pipeline.groovy
@@ -3,13 +3,16 @@
  * Run openscap xccdf evaluation on given nodes
  *
  * Expected parametes:
+ *  OPENSCAP_TEST_TYPE          Type of OpenSCAP evaluation to run, either 'xccdf' or 'oval'
  *  SALT_MASTER_URL             Full Salt API address.
  *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API.
  *
- *  XCCDF_BENCHMARKS_DIR        The XCCDF benchmarks base directory (default /usr/share/xccdf-benchmarks/mirantis/)
+ *  XCCDF_BENCHMARKS_DIR        Base directory for XCCDF benchmarks (default /usr/share/xccdf-benchmarks/mirantis/)
+ *                              or OVAL devinitions (default /usr/share/oval-definitions/mirantis/)
  *  XCCDF_BENCHMARKS            List of pairs XCCDF benchmark filename and corresponding profile separated with ','
- *                                  these pairs are separated with semicolon.
- *                                  (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile)
+ *                                  these pairs are separated with semicolon
+ *                                  (e.g. manila/openstack_manila-xccdf.xml,profilename;horizon/openstack_horizon-xccdf.xml,profile).
+ *                              For OVAL definitions, paths to OVAL definition files separated by semicolon, profile is ignored.
  *  XCCDF_VERSION               The XCCDF version (default 1.2)
  *  XCCDF_TAILORING_ID          The tailoring id (default None)
  *
@@ -118,19 +121,35 @@
 
 
 node('python') {
-    def pepperEnv = 'pepperEnv'
-
-    // XCCDF related variables
-    def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
-    def benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/'
-    def xccdfVersion = XCCDF_VERSION ?: '1.2'
-    def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
-    def targetServers = TARGET_SERVERS ?: '*'
-
     def salt = new com.mirantis.mk.Salt()
     def python = new com.mirantis.mk.Python()
     def common = new com.mirantis.mk.Common()
     def http = new com.mirantis.mk.Http()
+    def validate = new com.mirantis.mcp.Validate()
+
+    def pepperEnv = 'pepperEnv'
+
+    def benchmarkType = OPENSCAP_TEST_TYPE ?: 'xccdf'
+    def reportType
+    def benchmarksDir
+
+    switch (benchmarkType) {
+        case 'xccdf':
+            reportType = 'openscap';
+            benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/xccdf-benchmarks/mirantis/';
+            break;
+        case 'oval':
+            reportType = 'cve';
+            benchmarksDir = XCCDF_BENCHMARKS_DIR ?: '/usr/share/oval-definitions/mirantis/';
+            break;
+        default:
+            throw new Exception('Unsupported value for OPENSCAP_TEST_TYPE, must be "oval" or "xccdf".')
+    }
+    // XCCDF related variables
+    def benchmarksAndProfilesArray = XCCDF_BENCHMARKS.tokenize(';')
+    def xccdfVersion = XCCDF_VERSION ?: '1.2'
+    def xccdfTailoringId = XCCDF_TAILORING_ID ?: 'None'
+    def targetServers = TARGET_SERVERS ?: '*'
 
     // To have an ability to work in heavy concurrency conditions
     def scanUUID = UUID.randomUUID().toString()
@@ -146,7 +165,7 @@
         python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
     }
 
-    stage ('Run openscap xccdf evaluation and attempt to upload the results to a dashboard') {
+    stage ('Run openscap evaluation and attempt to upload the results to a dashboard') {
         liveMinions = salt.getMinions(pepperEnv, targetServers)
 
         if (liveMinions.isEmpty()) {
@@ -161,49 +180,69 @@
         }
 
         def reportId
-        for (minion in liveMinions) {
+        def lastError
+        // Iterate oscap evaluation over the benchmarks
+        for (benchmark in benchmarksAndProfilesArray) {
+            def (benchmarkFilePath, profileName) = benchmark.tokenize(',').collect({it.trim()})
 
-            // Iterate oscap evaluation over the benchmarks
-            for (benchmark in benchmarksAndProfilesArray) {
-                def (benchmarkFilePath, profile) = benchmark.tokenize(',').collect({it.trim()})
+            // Remove extension from the benchmark name
+            def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
 
-                // Remove extension from the benchmark name
-                def benchmarkPathWithoutExtension = benchmarkFilePath.replaceFirst('[.][^.]+$', '')
+            // Get benchmark name
+            def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
 
-                // Get benchmark name
-                def benchmarkName = benchmarkPathWithoutExtension.tokenize('/')[-1]
+            // And build resultsDir based on this path
+            def resultsDir = "${resultsBaseDir}/${benchmarkName}"
+            if (profileName) {
+                resultsDir = "${resultsDir}/${profileName}"
+            }
 
-                // And build resultsDir based on this path
-                def resultsDir = "${resultsBaseDir}/${benchmarkPathWithoutExtension}"
+            def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
 
-                def benchmarkFile = "${benchmarksDir}${benchmarkFilePath}"
+            // Evaluate the benchmark on all minions at once
+            salt.runSaltProcessStep(pepperEnv, targetServers, 'oscap.eval', [
+                benchmarkType, benchmarkFile, "results_dir=${resultsDir}",
+                "profile=${profileName}", "xccdf_version=${xccdfVersion}",
+                "tailoring_id=${xccdfTailoringId}"
+            ])
 
+            salt.cmdRun(pepperEnv, targetServers, "rm -f /tmp/${scanUUID}.tar.xz; tar -cJf /tmp/${scanUUID}.tar.xz -C ${resultsBaseDir} .")
+
+            // fetch and store results one by one
+            for (minion in liveMinions) {
                 def nodeShortName = minion.tokenize('.')[0]
+                def localResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}"
 
-                def archiveName = "${scanUUID}_${nodeShortName}_${benchmarkName}.tar"
+                fileContentBase64 = validate.getFileContentEncoded(pepperEnv, minion, "/tmp/${scanUUID}.tar.xz")
+                writeFile file: "${scanUUID}.base64", text: fileContentBase64
 
-                // Evaluate the benchmark
-                salt.runSaltProcessStep(pepperEnv, minion, 'oscap.eval', [
-                    'xccdf', benchmarkFile, "results_dir=${resultsDir}",
-                    "profile=${profile}", "xccdf_version=${xccdfVersion}",
-                    "tailoring_id=${xccdfTailoringId}"
-                ])
+                sh "mkdir -p ${localResultsDir}"
+                sh "base64 -d ${scanUUID}.base64 | tar -xJ --strip-components 1 --directory ${localResultsDir}"
+                sh "rm -f ${scanUUID}.base64"
+            }
 
-                salt.cmdRun(pepperEnv, minion, "tar -cf /tmp/${archiveName} -C ${resultsBaseDir} .")
-                fileContents = salt.cmdRun(pepperEnv, minion, "cat /tmp/${archiveName}", true, null, false)['return'][0].values()[0].replaceAll('Salt command execution success', '')
+            // Remove archives which is not needed anymore
+            salt.runSaltProcessStep(pepperEnv, targetServers, 'file.remove', "/tmp/${scanUUID}.tar.xz")
 
-                sh "mkdir -p ${artifactsDir}/${scanUUID}/${nodeShortName}"
-                writeFile file: "${archiveName}", text: fileContents
-                sh "tar --strip-components 1 -xf ${archiveName} --directory ${artifactsDir}/${scanUUID}/${nodeShortName}; rm -f ${archiveName}"
-
-                // Remove archive which is not needed anymore
-                salt.runSaltProcessStep(pepperEnv, minion, 'file.remove', "/tmp/${archiveName}")
+            // publish results one by one
+            for (minion in liveMinions) {
+                def nodeShortName = minion.tokenize('.')[0]
+                def benchmarkResultsDir = "${artifactsDir}/${scanUUID}/${nodeShortName}/${benchmarkName}"
+                if (profileName) {
+                    benchmarkResultsDir = "${benchmarkResultsDir}/${profileName}"
+                }
 
                 // Attempt to upload the scanning results to the dashboard
                 if (UPLOAD_TO_DASHBOARD.toBoolean()) {
                     if (common.validInputParam('DASHBOARD_API_URL')) {
                         def cloudName = salt.getGrain(pepperEnv, minion, 'domain')['return'][0].values()[0].values()[0]
-                        reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, "openscap", reportId, salt.getFileContent(pepperEnv, minion, "${resultsDir}/results.json"))
+                        try {
+                            def nodeResults = readFile "${benchmarkResultsDir}/results.json"
+                            reportId = uploadResultToDashboard(DASHBOARD_API_URL, cloudName, minion, reportType, reportId, nodeResults)
+                            common.infoMsg("Report ID is ${reportId}.")
+                        } catch (Exception e) {
+                            lastError = e
+                        }
                     } else {
                         throw new Exception('Uploading to the dashboard is enabled but the DASHBOARD_API_URL was not set')
                     }
@@ -216,6 +255,10 @@
 
         // Archive the build output artifacts
         archiveArtifacts artifacts: "*.xz"
+        if (lastError) {
+            common.infoMsg('Uploading some results to the dashboard report ${reportId} failed. Raising last error.')
+            throw lastError
+        }
     }
 
 /*  // Will be implemented later