Merge "Add Keystone v3 envs to k8s-simple-deploy pipeline"
diff --git a/build-debian-packages-libvirt-exporter.groovy b/build-debian-packages-libvirt-exporter.groovy
new file mode 100644
index 0000000..01e2f10
--- /dev/null
+++ b/build-debian-packages-libvirt-exporter.groovy
@@ -0,0 +1,72 @@
+def common = new com.mirantis.mk.Common()
+def git = new com.mirantis.mk.Git()
+def aptly = new com.mirantis.mk.Aptly()
+
+def timestamp = common.getDatetime()
+def version = "0.1~${timestamp}"
+
+node('docker') {
+    try{
+
+        stage("cleanup") {
+            sh("rm -rf * || true")
+        }
+
+        stage("checkout") {
+            git.checkoutGitRepository(
+                "libvirt-exporter-${version}",
+                "${SOURCE_URL}",
+                SOURCE_BRANCH,
+                SOURCE_CREDENTIALS,
+                true,
+                30,
+                1
+            )
+        }
+
+        stage("build binary") {
+            dir("libvirt-exporter-${version}") {
+                sh("sed -i 's/VERSION/${version}/g' debian/changelog")
+                sh("debuild -us -uc")
+            }
+
+            archiveArtifacts artifacts: "*.deb"
+        }
+
+        if (UPLOAD_APTLY.toBoolean()) {
+            lock("aptly-api") {
+                stage("upload") {
+                    def buildSteps = [:]
+                    def debFiles = sh script: "ls *.deb", returnStdout: true
+                    def debFilesArray = debFiles.trim().tokenize()
+                    def workspace = common.getWorkspace()
+                    for (int i = 0; i < debFilesArray.size(); i++) {
+                        def debFile = debFilesArray[i];
+                        buildSteps[debFiles[i]] = aptly.uploadPackageStep(
+                            "${workspace}/"+debFile,
+                            APTLY_URL,
+                            APTLY_REPO,
+                            true
+                        )
+                    }
+                    parallel buildSteps
+                }
+                stage("publish") {
+                    aptly.snapshotRepo(APTLY_URL, APTLY_REPO, timestamp)
+                    aptly.publish(APTLY_URL)
+                }
+            }
+        }
+
+    } catch (Throwable e) {
+       // If there was an exception thrown, the build failed
+       currentBuild.result = "FAILURE"
+       throw e
+    } finally {
+       common.sendNotification(currentBuild.result,"",["slack"])
+
+       if (currentBuild.result != 'FAILURE') {
+          sh("rm -rf *")
+       }
+    }
+}
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index ddfe37a..da04f62 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -163,6 +163,14 @@
 
                     orchestrate.installKubernetesControl(saltMaster)
                 }
+
+
+                if (common.checkContains('INSTALL', 'contrail')) {
+                    state('Install Contrail for Kubernetes') {
+                        orchestrate.installContrailNetwork(saltMaster)
+                        orchestrate.installContrailCompute(saltMaster)
+                    }
+                }
             }
 
             // install openstack
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 1273d6e..9e6fd01 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -123,23 +123,54 @@
                             salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
                         }
                     }
+                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
                 }else{
                     common.errorMsg("No _upgrade databases were returned")
                 }
 
-                salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
-
                 try {
                     salt.enforceState(saltMaster, 'upg*', 'keystone.server')
                 } catch (Exception e) {
-                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.reload', ['apache2'], null, true)
-                    common.warningMsg('reload of apache2. We should continue to run')
+                    common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
                 }
-                salt.enforceState(saltMaster, 'upg*', ['keystone.client', 'glance', 'keystone.server'])
-                salt.enforceState(saltMaster, 'upg*', 'nova')
-                salt.enforceState(saltMaster, 'upg*', 'nova')
-                salt.enforceState(saltMaster, 'upg*', ['cinder', 'neutron', 'heat'])
-
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                } catch (Exception e) {
+                    common.warningMsg('running keystone.client state again')
+                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'glance')
+                } catch (Exception e) {
+                    common.warningMsg('running glance state again')
+                    salt.enforceState(saltMaster, 'upg*', 'glance')
+                }
+                salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                } catch (Exception e) {
+                    common.warningMsg('running nova state again')
+                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'cinder')
+                } catch (Exception e) {
+                    common.warningMsg('running cinder state again')
+                    salt.enforceState(saltMaster, 'upg*', 'cinder')
+                }                
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                } catch (Exception e) {
+                    common.warningMsg('running neutron state again')
+                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'heat')
+                } catch (Exception e) {
+                    common.warningMsg('running heat state again')
+                    salt.enforceState(saltMaster, 'upg*', 'heat')
+                }
                 salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
             }
         }
@@ -150,7 +181,6 @@
             }
         }
 
-
         if (STAGE_REAL_UPGRADE.toBoolean() == true) {
             stage('Real upgrade') {
                 // # actual upgrade
@@ -228,7 +258,6 @@
 
                 salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
                 salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync')
-                
 
                 try {
                     salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
@@ -261,28 +290,76 @@
                 // salt "ctl*" state.sls memcached
                 // salt "ctl*" state.sls keystone.server
                 try {
-                    salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
-                } catch (Exception e) {
-                    salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.reload', ['apache2'], null, true)
-                    common.warningMsg('reload of apache2. We should continue to run')
-                }
-                // salt 'ctl01*' state.sls keystone.client
-                salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
-                // salt 'ctl*' state.sls glance
-                salt.enforceState(saltMaster, 'ctl*', 'glance')
-                // salt 'ctl*' state.sls glusterfs.client
-                salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
-                // salt 'ctl*' state.sls keystone.server
-                salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
-                // salt 'ctl*' state.sls nova
-                salt.enforceState(saltMaster, 'ctl*', 'nova')
-                // salt 'ctl*' state.sls cinder
-                salt.enforceState(saltMaster, 'ctl*', 'cinder')
-                // salt 'ctl*' state.sls neutron
-                salt.enforceState(saltMaster, 'ctl*', 'neutron')
-                // salt 'ctl*' state.sls heat
-                salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
+                    } catch (Exception e) {
+                        common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+                        salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+                        salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                    }
+                    // salt 'ctl01*' state.sls keystone.client
+                    try {
+                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    } catch (Exception e) {
+                        common.warningMsg('running keystone.client state again')
+                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    } 
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    } catch (Exception e) {
+                        common.warningMsg('running glance state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    }                // salt 'ctl*' state.sls glusterfs.client
+                    salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
+                    // salt 'ctl*' state.sls keystone.server
+                    salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                    // salt 'ctl*' state.sls nova
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    } catch (Exception e) {
+                        common.warningMsg('running nova state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    }
+                    // salt 'ctl*' state.sls cinder
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    } catch (Exception e) {
+                        common.warningMsg('running cinder state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    }                
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    } catch (Exception e) {
+                        common.warningMsg('running neutron state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    }
+                    // salt 'ctl*' state.sls heat
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    } catch (Exception e) {
+                        common.warningMsg('running heat state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    }
 
+                } catch (Exception e) {
+                    common.warningMsg('Some states that require syncdb failed. Restoring production databases')
+                    databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
+                    if(databases && databases != ""){
+                        databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                        for( i = 0; i < databasesList.size(); i++){ 
+                            if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
+                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                                common.warningMsg("removing database ${databasesList[i]}")
+                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                            }
+                        }
+                        salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+                    }else{
+                        common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
+                    }
+                    common.errorMsg("Stage Real control upgrade failed")
+                }
+                    
                 // salt 'cmp*' cmd.run 'service nova-compute restart'
                 salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
 
@@ -318,6 +395,30 @@
                 print(_pillar)
                 print(domain)
 
+                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+                kvm01 = _pillar['return'][0].values()[0].values()[0]
+                kvm03 = _pillar['return'][0].values()[2].values()[0]
+                kvm02 = _pillar['return'][0].values()[1].values()[0]
+                print(_pillar)
+                print(kvm01)
+                print(kvm02)
+                print(kvm03)
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+                def ctl01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+                def ctl02NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+                def ctl03NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+                def prx01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+                def prx02NodeProvider = _pillar['return'][0].values()[0]
+
                 salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
@@ -346,12 +447,11 @@
                             salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
                         }
                     }
+                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
                 }else{
                     common.errorMsg("No none _upgrade databases were returned")
                 }
 
-                salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
-
                 salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
                 salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
diff --git a/test-nodejs-pipeline.groovy b/test-nodejs-pipeline.groovy
index 9e033fd..d659b7e 100644
--- a/test-nodejs-pipeline.groovy
+++ b/test-nodejs-pipeline.groovy
@@ -58,15 +58,9 @@
              }
         }
         stage('Generate config file for devops portal') {
-            def builder = new groovy.json.JsonBuilder()
-            def config = builder.services {
-                elasticsearch {
-                    endpoint 'http://elasticsearch:9200'
-                }
-            }
             writeFile (
                 file: "${workspace}/test_config.json",
-                text: config.toString()
+                text: '${JSON_CONFIG}'
             )
        }
        stage('Start container') {
@@ -95,7 +89,7 @@
         common.sendNotification(currentBuild.result, "" ,["slack"])
         stage('Cleanup') {
             if (containerId != null) {
-                dockerCleanupCommands = ['stop', 'rm']
+                dockerCleanupCommands = ['stop', 'rm -f']
                 for (int i = 0; i < dockerCleanupCommands.size(); i++) {
                     sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} ${dockerCleanupCommands[i]} || true")
                 }