Merge "Add libvirt-exporter pipeline"
diff --git a/lab-pipeline.groovy b/lab-pipeline.groovy
index ddfe37a..da04f62 100644
--- a/lab-pipeline.groovy
+++ b/lab-pipeline.groovy
@@ -163,6 +163,14 @@
 
                     orchestrate.installKubernetesControl(saltMaster)
                 }
+
+
+                if (common.checkContains('INSTALL', 'contrail')) {
+                    state('Install Contrail for Kubernetes') {
+                        orchestrate.installContrailNetwork(saltMaster)
+                        orchestrate.installContrailCompute(saltMaster)
+                    }
+                }
             }
 
             // install openstack
diff --git a/openstack-control-upgrade.groovy b/openstack-control-upgrade.groovy
index 4defcd5..9e6fd01 100644
--- a/openstack-control-upgrade.groovy
+++ b/openstack-control-upgrade.groovy
@@ -4,6 +4,9 @@
  * Expected parameters:
  *   SALT_MASTER_CREDENTIALS    Credentials to the Salt API.
  *   SALT_MASTER_URL            Full Salt API address [http://10.10.10.1:8000].
+ *   STAGE_TEST_UPGRADE         Run test upgrade stage (bool)
+ *   STAGE_REAL_UPGRADE         Run real upgrade stage (bool)
+ *   STAGE_ROLLBACK_UPGRADE     Run rollback upgrade stage (bool)
  *
 **/
 
@@ -20,206 +23,448 @@
             saltMaster = salt.connection(SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
         }
 
-        state('Prepare upgrade') {
-            // #ignorovat no response a chyby - nene, chyby opravíme
+        if (STAGE_TEST_UPGRADE.toBoolean() == true) {
+            stage('Test upgrade') {
+
+                //salt.enforceState(saltMaster, 'I@salt:master', 'reclass')
+
+                // salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+                // salt '*' saltutil.sync_all
+                // salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+                
+
+                def _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+                def domain = _pillar['return'][0].values()[0].values()[0]
+                print(_pillar)
+                print(domain)
+
+                // read backupninja variable
+                _pillar = salt.getPillar(saltMaster, 'I@backupninja:client', '_param:backupninja_backup_host')
+                def backupninja_backup_host = _pillar['return'][0].values()[0]
+                print(_pillar)
+                print(backupninja_backup_host)
+
+                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+                def kvm01 = _pillar['return'][0].values()[0].values()[0]
+                def kvm03 = _pillar['return'][0].values()[2].values()[0]
+                def kvm02 = _pillar['return'][0].values()[1].values()[0]
+                print(_pillar)
+                print(kvm01)
+                print(kvm02)
+                print(kvm03)
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:upg01:provider')
+                def upgNodeProvider = _pillar['return'][0].values()[0]
+                print(_pillar)
+                print(upgNodeProvider)
 
 
-            salt.enforceState(master, 'I@salt:master', 'reclass')
+                salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.destroy', ["upg01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${upgNodeProvider}", 'virt.undefine', ["upg01.${domain}"], null, true)
 
-            salt.runSaltProcessStep(master, '*', 'saltutil.refresh_pillar', [], null, true)
-            // salt '*' saltutil.sync_all
-            salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
-            
-
-            def _pillar = salt.getPillar(master, 'I@salt:master', 'grains.item domain')
-            def domain = _pillar['return'][0].values()[0]
-            println _pillar
-            println domain
-
-            // read backupninja variable
-            _pillar = salt.getPillar(master, 'I@backupninja:server', '_param:single_address')
-            def backupninja_backup_host = _pillar['return'][0].values()[0]
-            println _pillar
-            println backupninja_backup_host
-
-            _pillar = salt.getPillar(master, 'I@salt:control', 'grains.item id')
-            def kvm01 = _pillar['return'][0].values()[0]
-            def kvm02 = _pillar['return'][0].values()[1]
-            def kvm03 = _pillar['return'][0].values()[2]
-            println _pillar
-            println kvm01
-            println kvm02
-            println kvm03
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:upg01:provider')
-            def upgNodeProvider = _pillar['return'][0].values()[0]
-            println _pillar
-            println upgNodeProvider
+                
+                try {
+                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d upg01.${domain} -y")
+                } catch (Exception e) {
+                    common.warningMsg("upg01.${domain} does not match any accepted, unaccepted or rejected keys. The key did not exist yet or was already removed. We should continue to run")
+                }
 
 
-            salt.runSaltProcessStep(master, '${upgNodeProvider}', 'virt.destroy upg01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${upgNodeProvider}', 'virt.undefine upg01.${domain}', [], null, true)
+                // salt 'kvm02*' state.sls salt.control
+                salt.enforceState(saltMaster, "${upgNodeProvider}", 'salt.control')
 
-            // salt-key -d upg01.${domain} -y
-            salt.runSaltProcessStep(saltMaster, 'I@salt:master', 'cmd.run', "salt-key -d upg01.${domain} -y", [], null, true)
-            // salt 'kvm02*' state.sls salt.control
-            salt.enforceState(saltMaster, '${upgNodeProvider}', 'salt.control')
+                sleep(60)
 
-            sleep(60)
+                // salt '*' saltutil.refresh_pillar
+                salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.refresh_pillar', [], null, true)
+                // salt '*' saltutil.sync_all
+                salt.runSaltProcessStep(saltMaster, 'upg*', 'saltutil.sync_all', [], null, true)
 
-            // salt '*' saltutil.refresh_pillar
-            salt.runSaltProcessStep(master, 'upg*', 'saltutil.refresh_pillar', [], null, true)
-            // salt '*' saltutil.sync_all
-            salt.runSaltProcessStep(master, 'upg*', 'saltutil.sync_all', [], null, true)
+                // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
+                try {
+                    salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+                } catch (Exception e) {
+                    common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
+                }
+                salt.enforceState(saltMaster, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
 
-            // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-            salt.enforceState(master, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-            // salt '*' state.apply salt.minion.grains
-            salt.enforceState(master, '*', 'salt.minion.grains')
-            // salt "upg*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-            salt.enforceState(master, 'upg*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-
-            // salt "upg*" state.sls rabbitmq
-            salt.enforceState(master, 'upg*', 'rabbitmq')
-            // salt "upg*" state.sls memcached
-            salt.enforceState(master, 'upg*', 'memcached')
-            salt.enforceState(master, 'I@backupninja:client', 'openssh.client')
-            // salt -C 'I@backupninja:server' state.sls backupninja
-            salt.enforceState(master, 'I@backupninja:server', 'backupninja')
-            // salt -C 'I@backupninja:client' state.sls backupninja
-            salt.enforceState(master, 'I@backupninja:client', 'backupninja')
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'ssh.rm_known_host root ${backupninja_backup_host}', [], null, true)
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'cmd.run', "arp -d ${backupninja_backup_host}", [], null, true)
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'ssh.set_known_host root ${backupninja_backup_host}', [], null, true)
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'cmd.run', "backupninja -n --run /etc/backup.d/101.mysql", [], null, true)
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'cmd.run', "backupninja -n --run /etc/backup.d/200.backup.rsync", [], null, true)
-            
-
-            def databases = salt.runSaltProcessStep(master, 'I@mysql:client', 'mysql.db_list | grep upgrade | awk "/-/ {print $2}"', [], null, true)
-            for (String database : databases) { System.out.println(database) }
-            for (String database : databases) { salt.runSaltProcessStep(master, 'I@mysql:client', 'mysql.db_remove ${database}', [], null, true) }
-            for (String database : databases) { salt.runSaltProcessStep(master, 'I@mysql:client', 'file.remove /root/mysql/flags/${database}-installed', [], null, true) }
+                // salt "upg*" state.sls rabbitmq
+                salt.enforceState(saltMaster, 'upg*', ['rabbitmq', 'memcached'])
+                try {
+                    salt.enforceState(saltMaster, 'I@backupninja:client', ['openssh.client', 'salt.minion'])
+                } catch (Exception e) {
+                    common.warningMsg('salt-minion was restarted. We should continue to run')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'I@backupninja:server', ['salt.minion'])
+                } catch (Exception e) {
+                    common.warningMsg('salt-minion was restarted. We should continue to run')
+                }
+                // salt '*' state.apply salt.minion.grains
+                //salt.enforceState(saltMaster, '*', 'salt.minion.grains')
+                // salt -C 'I@backupninja:server' state.sls backupninja
+                salt.enforceState(saltMaster, 'I@backupninja:server', 'backupninja')
+                // salt -C 'I@backupninja:client' state.sls backupninja
+                salt.enforceState(saltMaster, 'I@backupninja:client', 'backupninja')
+                salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.rm_known_host', ["root", "${backupninja_backup_host}"], null, true)
+                salt.cmdRun(saltMaster, 'I@backupninja:client', "arp -d ${backupninja_backup_host}")
+                salt.runSaltProcessStep(saltMaster, 'I@backupninja:client', 'ssh.set_known_host', ["root", "${backupninja_backup_host}"], null, true)
+                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync')
 
 
-            salt.enforceState(master, 'I@mysql:client', 'mysql.client')
+                def databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep upgrade | awk \'/-/ {print \$2}\'')
+                if(databases && databases != ""){
+                    def databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                    for( i = 0; i < databasesList.size(); i++){ 
+                        if(databasesList[i].toLowerCase().contains('upgrade')){
+                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                            common.warningMsg("removing database ${databasesList[i]}")
+                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                        }
+                    }
+                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+                }else{
+                    common.errorMsg("No _upgrade databases were returned")
+                }
 
-            salt.enforceState(master, 'upg*', ['keystone.server', 'keystone.client'])
-
-            salt.enforceState(master, 'upg*', ['glance', 'keystone.server', 'nova', 'cinder', 'neutron', 'heat'])
-
-            salt.runSaltProcessStep(master, 'upg01*', 'cmd.run', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list', null, true)
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                } catch (Exception e) {
+                    common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+                    salt.runSaltProcessStep(saltMaster, 'upg*', 'service.restart', ['apache2'], null, true)
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                } catch (Exception e) {
+                    common.warningMsg('running keystone.client state again')
+                    salt.enforceState(saltMaster, 'upg*', 'keystone.client')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'glance')
+                } catch (Exception e) {
+                    common.warningMsg('running glance state again')
+                    salt.enforceState(saltMaster, 'upg*', 'glance')
+                }
+                salt.enforceState(saltMaster, 'upg*', 'keystone.server')
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                } catch (Exception e) {
+                    common.warningMsg('running nova state again')
+                    salt.enforceState(saltMaster, 'upg*', 'nova')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'cinder')
+                } catch (Exception e) {
+                    common.warningMsg('running cinder state again')
+                    salt.enforceState(saltMaster, 'upg*', 'cinder')
+                }                
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                } catch (Exception e) {
+                    common.warningMsg('running neutron state again')
+                    salt.enforceState(saltMaster, 'upg*', 'neutron')
+                }
+                try {
+                    salt.enforceState(saltMaster, 'upg*', 'heat')
+                } catch (Exception e) {
+                    common.warningMsg('running heat state again')
+                    salt.enforceState(saltMaster, 'upg*', 'heat')
+                }
+                salt.cmdRun(saltMaster, 'upg01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+            }
         }
 
-        stage('Ask for manual confirmation') {
-            input message: "Do you want to continue with upgrade?"
-        }
-        state('Start upgrade') {
-            // # actual upgrade
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:ctl01:provider')
-            def ctl01NodeProvider = _pillar['return'][0].values()[0]
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:ctl02:provider')
-            def ctl02NodeProvider = _pillar['return'][0].values()[0]
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:ctl03:provider')
-            def ctl03NodeProvider = _pillar['return'][0].values()[0]
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:prx01:provider')
-            def prx01NodeProvider = _pillar['return'][0].values()[0]
-
-            _pillar = salt.getPillar(master, '${kvm01}', 'salt:control:cluster:internal:node:prx02:provider')
-            def prx02NodeProvider = _pillar['return'][0].values()[0]
-
-
-            salt.runSaltProcessStep(master, '${prx01NodeProvider}', 'virt.destroy prx01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${prx02NodeProvider}', 'virt.destroy prx01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl01NodeProvider}', 'virt.destroy ctl01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl02NodeProvider}', 'virt.destroy ctl02.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl03NodeProvider}', 'virt.destroy ctl03.${domain}', [], null, true)
-
-
-            // salt 'kvm01*' cmd.run '[ ! -f ./prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak'
-            salt.runSaltProcessStep(master, '${prx01NodeProvider}', 'cmd.run', "[ ! -f ./prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak", null, true)
-            // salt 'kvm03*' cmd.run '[ ! -f ./prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak'
-            salt.runSaltProcessStep(master, '${prx02NodeProvider}', 'cmd.run', "[ ! -f ./prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak", null, true)
-            // salt 'kvm01*' cmd.run '[ ! -f ./ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak'
-            salt.runSaltProcessStep(master, '${ctl01NodeProvider}', 'cmd.run', "[ ! -f ./ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak", null, true)
-            // salt 'kvm02*' cmd.run '[ ! -f ./ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak'
-            salt.runSaltProcessStep(master, '${ctl02NodeProvider}', 'cmd.run', "[ ! -f ./ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak", null, true)
-            // salt 'kvm03*' cmd.run '[ ! -f ./ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak'
-            salt.runSaltProcessStep(master, '${ctl03NodeProvider}', 'cmd.run', "[ ! -f ./ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak", null, true)
-
-
-            salt.runSaltProcessStep(master, '${prx01NodeProvider}', 'virt.undefine prx01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${prx02NodeProvider}', 'virt.undefine prx02.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl01NodeProvider}', 'virt.undefine ctl01.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl02NodeProvider}', 'virt.undefine ctl02.${domain}', [], null, true)
-            salt.runSaltProcessStep(master, '${ctl03NodeProvider}', 'virt.undefine ctl03.${domain}', [], null, true)
-
-
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'cmd.run', "backupninja -n --run /etc/backup.d/101.mysql", [], null, true)
-            salt.runSaltProcessStep(master, 'I@backupninja:client', 'cmd.run', "backupninja -n --run /etc/backup.d/200.backup.rsync", [], null, true)
-            
-
-            salt.runSaltProcessStep(master, 'I@salt', 'cmd.run', "salt-key -d ctl01.${domain},ctl01.${domain},ctl03.${domain},prx01.${domain},prx02.${domain}", null, true)
-
-            // salt 'kvm*' state.sls salt.control
-            salt.enforceState(master, 'I@salt:control', 'salt.control')
-
-            sleep(60)
-
-            // salt '*' saltutil.refresh_pillar
-            salt.runSaltProcessStep(master, '*', 'saltutil.refresh_pillar', [], null, true)
-            // salt '*' saltutil.sync_all
-            salt.runSaltProcessStep(master, '*', 'saltutil.sync_all', [], null, true)
-
-            // salt "ctl*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-            // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog
-            // salt "ctl*" state.sls linux,openssh,salt.minion,ntp,rsyslog
-            salt.enforceState(master, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
-            // salt 'ctl*' state.sls keepalived
-            // salt 'ctl*' state.sls haproxy
-            salt.enforceState(master, 'ctl*', ['keepalived', 'haproxy'])
-            // salt 'ctl*' service.restart rsyslog
-            salt.runSaltProcessStep(master, 'ctl*', 'service.restart', ['rsyslog'], null, true)
-            // salt "ctl*" state.sls memcached
-            // salt "ctl*" state.sls keystone.server
-            salt.enforceState(master, 'ctl*', ['memcached', 'keystone.server'])
-            // salt 'ctl01*' state.sls keystone.client
-            salt.enforceState(master, 'I@keystone:client and ctl*', 'keystone.client')
-            // salt 'ctl*' state.sls glance
-            salt.enforceState(master, 'ctl*', 'glance')
-            // salt 'ctl*' state.sls glusterfs.client
-            salt.enforceState(master, 'ctl*', 'glusterfs.client')
-            // salt 'ctl*' state.sls keystone.server
-            salt.enforceState(master, 'ctl*', 'keystone.server')
-            // salt 'ctl*' state.sls nova
-            salt.enforceState(master, 'ctl*', 'nova')
-            // salt 'ctl*' state.sls cinder
-            salt.enforceState(master, 'ctl*', 'cinder')
-            // salt 'ctl*' state.sls neutron
-            salt.enforceState(master, 'ctl*', 'neutron')
-            // salt 'ctl*' state.sls heat
-            salt.enforceState(master, 'ctl*', 'heat')
-
-            // salt 'cmp*' cmd.run 'service nova-compute restart'
-            salt.runSaltProcessStep(master, 'cmp*', 'service.restart', ['nova-compute'], null, true)
-
-            // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
-            // salt 'ctl*' state.sls keepalived
-            // salt 'prx*' state.sls keepalived
-            salt.enforceState(master, 'prx*', 'keepalived')
-            // salt 'prx*' state.sls horizon
-            salt.enforceState(master, 'prx*', 'horizon')
-            // salt 'prx*' state.sls nginx
-            salt.enforceState(master, 'prx*', 'nginx')
-
-            salt.runSaltProcessStep(master, 'ctl01*', 'cmd.run', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list', null, true)
+        if (STAGE_TEST_UPGRADE.toBoolean() == true && STAGE_REAL_UPGRADE.toBoolean() == true) {
+            stage('Ask for manual confirmation') {
+                input message: "Do you want to continue with upgrade?"
+            }
         }
 
-        stage('Verification') {
-            input message: "Please verify the control upgrade and if was not successful, in the worst scenario, you can use the openstack-control-upgrade-rollover pipeline"
+        if (STAGE_REAL_UPGRADE.toBoolean() == true) {
+            stage('Real upgrade') {
+                // # actual upgrade
+
+                _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+                domain = _pillar['return'][0].values()[0].values()[0]
+                print(_pillar)
+                print(domain)
+
+                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+                kvm01 = _pillar['return'][0].values()[0].values()[0]
+                kvm03 = _pillar['return'][0].values()[2].values()[0]
+                kvm02 = _pillar['return'][0].values()[1].values()[0]
+                print(_pillar)
+                print(kvm01)
+                print(kvm02)
+                print(kvm03)
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+                def ctl01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+                def ctl02NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+                def ctl03NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+                def prx01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+                def prx02NodeProvider = _pillar['return'][0].values()[0]
+
+
+                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+
+
+                try {
+                    salt.cmdRun(saltMaster, "${prx01NodeProvider}", "[ ! -f /root/prx01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx01.${domain}/system.qcow2 ./prx01.${domain}.qcow2.bak")
+                } catch (Exception e) {
+                    common.warningMsg('File already exists')
+                }
+                try {
+                    salt.cmdRun(saltMaster, "${prx02NodeProvider}", "[ ! -f /root/prx02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/prx02.${domain}/system.qcow2 ./prx02.${domain}.qcow2.bak")
+                } catch (Exception e) {
+                    common.warningMsg('File already exists')
+                }
+                try {
+                    salt.cmdRun(saltMaster, "${ctl01NodeProvider}", "[ ! -f /root/ctl01.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl01.${domain}/system.qcow2 ./ctl01.${domain}.qcow2.bak")
+                } catch (Exception e) {
+                    common.warningMsg('File already exists')
+                }
+                try {
+                    salt.cmdRun(saltMaster, "${ctl02NodeProvider}", "[ ! -f /root/ctl02.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl02.${domain}/system.qcow2 ./ctl02.${domain}.qcow2.bak")
+                } catch (Exception e) {
+                    common.warningMsg('File already exists')
+                }
+                try {
+                    salt.cmdRun(saltMaster, "${ctl03NodeProvider}", "[ ! -f /root/ctl03.${domain}.qcow2.bak ] && cp /var/lib/libvirt/images/ctl03.${domain}/system.qcow2 ./ctl03.${domain}.qcow2.bak")
+                } catch (Exception e) {
+                    common.warningMsg('File already exists')
+                }
+
+
+                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.undefine', ["prx01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.undefine', ["prx02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.undefine', ["ctl01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.undefine', ["ctl02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.undefine', ["ctl03.${domain}"], null, true)
+
+
+                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/101.mysql')
+                salt.cmdRun(saltMaster, 'I@backupninja:client', 'backupninja -n --run /etc/backup.d/200.backup.rsync')
+
+                try {
+                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+                } catch (Exception e) {
+                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                }
+
+                // salt 'kvm*' state.sls salt.control
+                salt.enforceState(saltMaster, 'I@salt:control', 'salt.control')
+
+                sleep(60)
+
+                // salt '*' saltutil.refresh_pillar
+                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.refresh_pillar', [], null, true)
+                // salt '*' saltutil.sync_all
+                salt.runSaltProcessStep(saltMaster, '*', 'saltutil.sync_all', [], null, true)
+
+                try {
+                    salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+                } catch (Exception e) {
+                    common.warningMsg('Received no response because salt-minion was restarted. We should continue to run')
+                }
+                salt.enforceState(saltMaster, 'ctl* or prx* or ctl*', ['linux', 'openssh', 'salt.minion', 'ntp', 'rsyslog'])
+
+                // salt 'ctl*' state.sls keepalived
+                // salt 'ctl*' state.sls haproxy
+                salt.enforceState(saltMaster, 'ctl*', ['keepalived', 'haproxy'])
+                // salt 'ctl*' service.restart rsyslog
+                salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['rsyslog'], null, true)
+                // salt "ctl*" state.sls memcached
+                // salt "ctl*" state.sls keystone.server
+                try {
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', ['memcached', 'keystone.server'])
+                    } catch (Exception e) {
+                        common.warningMsg('Reloading Apache2 and enforcing keystone.server state again')
+                        salt.runSaltProcessStep(saltMaster, 'ctl*', 'service.restart', ['apache2'], null, true)
+                        salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                    }
+                    // salt 'ctl01*' state.sls keystone.client
+                    try {
+                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    } catch (Exception e) {
+                        common.warningMsg('running keystone.client state again')
+                        salt.enforceState(saltMaster, 'I@keystone:client and ctl*', 'keystone.client')
+                    } 
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    } catch (Exception e) {
+                        common.warningMsg('running glance state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'glance')
+                    }                // salt 'ctl*' state.sls glusterfs.client
+                    salt.enforceState(saltMaster, 'ctl*', 'glusterfs.client')
+                    // salt 'ctl*' state.sls keystone.server
+                    salt.enforceState(saltMaster, 'ctl*', 'keystone.server')
+                    // salt 'ctl*' state.sls nova
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    } catch (Exception e) {
+                        common.warningMsg('running nova state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'nova')
+                    }
+                    // salt 'ctl*' state.sls cinder
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    } catch (Exception e) {
+                        common.warningMsg('running cinder state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'cinder')
+                    }                
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    } catch (Exception e) {
+                        common.warningMsg('running neutron state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'neutron')
+                    }
+                    // salt 'ctl*' state.sls heat
+                    try {
+                        salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    } catch (Exception e) {
+                        common.warningMsg('running heat state again')
+                        salt.enforceState(saltMaster, 'ctl*', 'heat')
+                    }
+
+                } catch (Exception e) {
+                    common.warningMsg('Some states that require syncdb failed. Restoring production databases')
+                    databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
+                    if(databases && databases != ""){
+                        databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                        for( i = 0; i < databasesList.size(); i++){ 
+                            if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
+                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                                common.warningMsg("removing database ${databasesList[i]}")
+                                salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                            }
+                        }
+                        salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+                    }else{
+                        common.errorMsg("No none _upgrade databases were returned. You have to restore production databases before running the real control upgrade again. This is because database schema for some services already happened. To do that delete the production databases and run salt 'I@mysql:client' state.sls mysql.client on the salt-master node")
+                    }
+                    common.errorMsg("Stage Real control upgrade failed")
+                }
+                    
+                // salt 'cmp*' cmd.run 'service nova-compute restart'
+                salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+
+                // salt 'prx*' state.sls linux,openssh,salt.minion,ntp,rsyslog - TODO: proč? už to jednou projelo
+                // salt 'ctl*' state.sls keepalived
+                // salt 'prx*' state.sls keepalived
+                salt.enforceState(saltMaster, 'prx*', 'keepalived')
+                // salt 'prx*' state.sls horizon
+                salt.enforceState(saltMaster, 'prx*', 'horizon')
+                // salt 'prx*' state.sls nginx
+                salt.enforceState(saltMaster, 'prx*', 'nginx')
+
+                salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonercv3; openstack service list; openstack image list; openstack flavor list; openstack compute service list; openstack server list; openstack network list; openstack volume list; openstack orchestration service list')
+            }
+
+        }
+
+
+        if (STAGE_REAL_UPGRADE.toBoolean() == true && STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+            stage('Ask for manual confirmation') {
+                input message: "Please verify that control upgrade was successful. If it did not succeed, in the worst scenario, you can click YES to continue with control-upgrade-rollback. Do you want to continue with the rollback?"
+            }
+            stage('Ask for manual confirmation') {
+                input message: "Do you really want to continue with the rollback?"
+            }
+        }
+
+        if (STAGE_ROLLBACK_UPGRADE.toBoolean() == true) {
+            stage('Rollback upgrade') {
+
+                _pillar = salt.getGrain(saltMaster, 'I@salt:master', 'domain')
+                domain = _pillar['return'][0].values()[0].values()[0]
+                print(_pillar)
+                print(domain)
+
+                _pillar = salt.getGrain(saltMaster, 'I@salt:control', 'id')
+                kvm01 = _pillar['return'][0].values()[0].values()[0]
+                kvm03 = _pillar['return'][0].values()[2].values()[0]
+                kvm02 = _pillar['return'][0].values()[1].values()[0]
+                print(_pillar)
+                print(kvm01)
+                print(kvm02)
+                print(kvm03)
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl01:provider')
+                def ctl01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl02:provider')
+                def ctl02NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:ctl03:provider')
+                def ctl03NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx01:provider')
+                def prx01NodeProvider = _pillar['return'][0].values()[0]
+
+                _pillar = salt.getPillar(saltMaster, "${kvm01}", 'salt:control:cluster:internal:node:prx02:provider')
+                def prx02NodeProvider = _pillar['return'][0].values()[0]
+
+                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.destroy', ["prx01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.destroy', ["prx02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.destroy', ["ctl01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.destroy', ["ctl02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.destroy', ["ctl03.${domain}"], null, true)
+
+                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'file.copy', ["/root/prx01.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx01.${domain}/system.qcow2"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'file.copy', ["/root/prx02.${domain}.qcow2.bak", "/var/lib/libvirt/images/prx02.${domain}/system.qcow2"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'file.copy', ["/root/ctl01.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl01.${domain}/system.qcow2"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'file.copy', ["/root/ctl02.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl02.${domain}/system.qcow2"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'file.copy', ["/root/ctl03.${domain}.qcow2.bak", "/var/lib/libvirt/images/ctl03.${domain}/system.qcow2"], null, true)
+
+                try {
+                    salt.cmdRun(saltMaster, 'I@salt:master', "salt-key -d ctl01.${domain},ctl02.${domain},ctl03.${domain},prx01.${domain},prx02.${domain} -y")
+                } catch (Exception e) {
+                    common.warningMsg('does not match any accepted, unaccepted or rejected keys. They were probably already removed. We should continue to run')
+                }
+
+                databases = salt.cmdRun(saltMaster, 'I@mysql:client','salt-call mysql.db_list | grep -v \'upgrade\' | grep -v \'schema\' | awk \'/-/ {print \$2}\'')
+                if(databases && databases != ""){
+                    databasesList = databases['return'][0].values()[0].trim().tokenize("\n")
+                    for( i = 0; i < databasesList.size(); i++){ 
+                        if(!databasesList[i].toLowerCase().contains('upgrade') && !databasesList[i].toLowerCase().contains('command execution')){
+                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'mysql.db_remove', ["${databasesList[i]}"], null, true)
+                            common.warningMsg("removing database ${databasesList[i]}")
+                            salt.runSaltProcessStep(saltMaster, 'I@mysql:client', 'file.remove', ["/root/mysql/flags/${databasesList[i]}-installed"], null, true)
+                        }
+                    }
+                    salt.enforceState(saltMaster, 'I@mysql:client', 'mysql.client')
+                }else{
+                    common.errorMsg("No none _upgrade databases were returned")
+                }
+
+                salt.runSaltProcessStep(saltMaster, "${prx01NodeProvider}", 'virt.start', ["prx01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${prx02NodeProvider}", 'virt.start', ["prx02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl01NodeProvider}", 'virt.start', ["ctl01.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl02NodeProvider}", 'virt.start', ["ctl02.${domain}"], null, true)
+                salt.runSaltProcessStep(saltMaster, "${ctl03NodeProvider}", 'virt.start', ["ctl03.${domain}"], null, true)
+
+                // salt 'cmp*' cmd.run 'service nova-compute restart'
+                salt.runSaltProcessStep(saltMaster, 'cmp*', 'service.restart', ['nova-compute'], null, true)
+
+                sleep(60)
+
+                salt.cmdRun(saltMaster, 'ctl01*', '. /root/keystonerc; nova service-list; glance image-list; nova flavor-list; nova hypervisor-list; nova list; neutron net-list; cinder list; heat service-list')
+            }
         }
     }
 }
diff --git a/test-nodejs-pipeline.groovy b/test-nodejs-pipeline.groovy
index d0365a4..d659b7e 100644
--- a/test-nodejs-pipeline.groovy
+++ b/test-nodejs-pipeline.groovy
@@ -1,6 +1,7 @@
 /**
 * JS testing pipeline
 * CREDENTIALS_ID - gerrit credentials id
+* COMPOSE_PATH - path to compose file in repository
 * NODE_IMAGE - NodeJS with NPM Docker image name
 * COMMANDS - a list of command(s) to run
 **/
@@ -21,7 +22,6 @@
     }
 }
 
-
 def gerritRef
 try {
     gerritRef = GERRIT_REFSPEC
@@ -41,6 +41,7 @@
 
 node("docker") {
     def containerId
+    def uniqId
     try {
         stage('Checkout source code') {
             if (gerritRef) {
@@ -56,12 +57,22 @@
                  throw new Exception("Cannot checkout gerrit patchset, GERRIT_REFSPEC and DEFAULT_GIT_REF is null")
              }
         }
-        stage('Start container') {
+        stage('Generate config file for devops portal') {
+            writeFile (
+                file: "${workspace}/test_config.json",
+                text: '${JSON_CONFIG}'
+            )
+       }
+       stage('Start container') {
             def workspace = common.getWorkspace()
-            containerId = sh(
-                script: "docker run -d ${NODE_IMAGE}",
-                returnStdout: true,
-            ).trim()
+            def timeStamp = new Date().format("HHmmss", TimeZone.getTimeZone('UTC'))
+            if (gerritRef) {
+                uniqId = gerritRef.tokenize('/').takeRight(2).join('') + timeStamp
+            } else {
+                uniqId = defaultGitRef.tokenize('/').takeRight(2).join('') + timeStamp
+            }
+            sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} up -d")
+            containerId = "${uniqId}_devopsportal_1"
             common.successMsg("Container with id ${containerId} started.")
             sh("docker cp ${workspace}/. ${containerId}:/opt/workspace/")
         }
@@ -78,8 +89,12 @@
         common.sendNotification(currentBuild.result, "" ,["slack"])
         stage('Cleanup') {
             if (containerId != null) {
-                sh("docker stop -t 0 ${containerId}")
-                sh("docker rm ${containerId}")
+                dockerCleanupCommands = ['stop', 'rm -f']
+                for (int i = 0; i < dockerCleanupCommands.size(); i++) {
+                    sh("docker-compose -f ${COMPOSE_PATH} -p ${uniqId} ${dockerCleanupCommands[i]} || true")
+                }
+                sh("docker network rm ${uniqId}_default || true")
+                sh("rm -f ${workspace}/test_config.json || true")
                 common.infoMsg("Container with id ${containerId} was removed.")
             }
         }
diff --git a/test-salt-models-pipeline.groovy b/test-salt-models-pipeline.groovy
index 1fafac9..82685dd 100644
--- a/test-salt-models-pipeline.groovy
+++ b/test-salt-models-pipeline.groovy
@@ -61,18 +61,34 @@
     }
 
     stage("test-nodes") {
-      def nodes = sh script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true
+      def nodes = sh(script: "find ./nodes -type f -name 'cfg*.yml'", returnStdout: true).tokenize()
       def buildSteps = [:]
-      def partitions = common.partitionList(nodes.tokenize(), PARALLEL_NODE_GROUP_SIZE.toInteger())
-      for (int i=0; i< partitions.size();i++) {
-        def partition = partitions[i]
-        buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
-        for(int k=0; k < partition.size;k++){
-            def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
-            buildSteps.get("partition-${i}").put(basename, { setupAndTestNode(basename) })
-        }
+      if(nodes.size() > 1){
+          if(nodes.size() <= 3){
+            common.infoMsg("Found <=3  cfg nodes, running parallel test")
+             for(int i=0; i < nodes.size();i++){
+               def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+               buildSteps.put("node-${basename}", { setupAndTestNode(basename) })
+             }
+             parallel buildSteps
+          }else{
+            common.infoMsg("Found more than 3 cfg nodes, running parallel group test with 3 nodes")
+            def partitions = common.partitionList(nodes, 3)
+            for (int i=0; i < partitions.size();i++) {
+              def partition = partitions[i]
+              buildSteps.put("partition-${i}", new HashMap<String,org.jenkinsci.plugins.workflow.cps.CpsClosure2>())
+              for(int k=0; k < partition.size;k++){
+                  def basename = sh(script: "basename ${partition[k]} .yml", returnStdout: true).trim()
+                  buildSteps.get("partition-${i}").put(basename, { setupAndTestNode(basename) })
+              }
+            }
+            common.serial(buildSteps)
+          }
+      }else{
+          common.infoMsg("Found one cfg node, running single test")
+          def basename = sh(script: "basename ${nodes[0]} .yml", returnStdout: true).trim()
+          setupAndTestNode(basename)
       }
-      common.serial(buildSteps)
     }
 
   } catch (Throwable e) {