Merge "Add check for user choosing of stages Related-Prod: #PROD-23318 (PROD:23318)" into release/2019.2.0
diff --git a/ceph-add-osd-upmap.groovy b/ceph-add-osd-upmap.groovy
new file mode 100644
index 0000000..96ca29d
--- /dev/null
+++ b/ceph-add-osd-upmap.groovy
@@ -0,0 +1,133 @@
+/**
+ *
+ * Add Ceph node to existing cluster using upmap mechanism
+ *
+ * Requred parameters:
+ *  SALT_MASTER_URL             URL of Salt master
+ *  SALT_MASTER_CREDENTIALS     Credentials to the Salt API
+ *  HOST                        Host (minion id) to be added
+ *
+ */
+
+common = new com.mirantis.mk.Common()
+salt = new com.mirantis.mk.Salt()
+def python = new com.mirantis.mk.Python()
+orchestrate = new com.mirantis.mk.Orchestrate()
+
+def waitForHealthy(master, count=0, attempts=100) {
+    // wait for healthy cluster
+    while (count<attempts) {
+        def health = runCephCommand('ceph health')['return'][0].values()[0]
+        if (health.contains('HEALTH_OK')) {
+            common.infoMsg('Cluster is healthy')
+            break;
+        }
+        count++
+        sleep(10)
+    }
+}
+
+def runCephCommand(cmd) {
+  return salt.cmdRun("pepperEnv", "I@ceph:mon and I@ceph:common:keyring:admin", cmd, checkResponse=true, batch=null, output=false)
+}
+
+def getpgmap(master) {
+  return runCephCommand('ceph pg ls remapped --format=json')['return'][0].values()[0]
+}
+
+def generatemapping(master,pgmap,map) {
+  def pg_new
+  def pg_old
+
+  for ( pg in pgmap )
+  {
+
+    pg_new = pg["up"].minus(pg["acting"])
+    pg_old = pg["acting"].minus(pg["up"])
+
+    for ( i = 0; i < pg_new.size(); i++ )
+    {
+      def string = "ceph osd pg-upmap-items " + pg["pgid"].toString() + " " + pg_new[i] + " " + pg_old[i] + ";"
+      map.add(string)
+    }
+
+  }
+}
+
+def pepperEnv = "pepperEnv"
+
+timeout(time: 12, unit: 'HOURS') {
+    node("python") {
+
+        // create connection to salt master
+        python.setupPepperVirtualenv(pepperEnv, SALT_MASTER_URL, SALT_MASTER_CREDENTIALS)
+
+        stage ("verify client versions")
+        {
+          def nodes = salt.getMinions("pepperEnv", "I@ceph:common and not E@mon*")
+          for ( node in nodes )
+          {
+            def versions = salt.cmdRun("pepperEnv", node, "ceph features --format json", checkResponse=true, batch=null, output=false).values()[0]
+            versions = new groovy.json.JsonSlurperClassic().parseText(versions[0][node])
+            if ( versions['client']['group']['release'] != 'luminous' )
+            {
+              throw new Exception("client installed on " + node + " is not luminous. Update all clients to luminous before using this pipeline")
+            }
+          }
+        }
+
+        stage ("enable luminous compat")
+        {
+          runCephCommand('ceph osd set-require-min-compat-client luminous')['return'][0].values()[0]
+        }
+
+        stage ("enable upmap balancer")
+        {
+          runCephCommand('ceph balancer on')['return'][0].values()[0]
+          runCephCommand('ceph balancer mode upmap')['return'][0].values()[0]
+        }
+
+
+        stage ("set norebalance")
+        {
+          runCephCommand('ceph osd set norebalance')['return'][0].values()[0]
+        }
+
+        stage('Install Ceph OSD') {
+            orchestrate.installCephOsd(pepperEnv, HOST)
+        }
+
+        def mapping = []
+
+        stage ("update mappings")
+        {
+          def pgmap1 = getpgmap(pepperEnv)
+          if ( pgmap1 == '' )
+          {
+            return 1
+          }
+          else
+          {
+            def pgmap = new groovy.json.JsonSlurperClassic().parseText(pgmap1)
+            for(int x=1; x<=3; x++){
+              pgmap1 = getpgmap(pepperEnv)
+              generatemapping(pepperEnv,pgmap,mapping)
+              mapping.each(this.&runCephCommand)
+              sleep(30)
+            }
+          }
+
+        }
+
+        stage ("unset norebalance")
+        {
+          runCephCommand('ceph osd unset norebalance')['return'][0].values()[0]
+        }
+
+        stage ("wait for healthy cluster")
+        {
+          waitForHealthy(pepperEnv)
+        }
+
+    }
+}
diff --git a/cloud-deploy-pipeline.groovy b/cloud-deploy-pipeline.groovy
index 3084f4b..6b6ec4e 100644
--- a/cloud-deploy-pipeline.groovy
+++ b/cloud-deploy-pipeline.groovy
@@ -433,6 +433,7 @@
                     // Setup kubernetes addons for opencontrail. More info in the definition of the func.
                     orchestrate.setupKubeAddonForContrail(venvPepper, extra_tgt)
                 }
+                orchestrate.installKubernetesClient(venvPepper, extra_tgt)
             }
 
             // install ceph
diff --git a/k8s-upgrade-pipeline.groovy b/k8s-upgrade-pipeline.groovy
index bd963eb..fe7d189 100644
--- a/k8s-upgrade-pipeline.groovy
+++ b/k8s-upgrade-pipeline.groovy
@@ -28,6 +28,8 @@
  *   KUBERNETES_CALICO_CNI_IPAM_SOURCE_HASH    Сalico/ipam binary hash. Should be null if update rolling via reclass-system level
  *   KUBERNETES_CALICO_KUBE_CONTROLLERS_IMAGE  Target calico/kube-controllers image. May be null in case of reclass-system rollout.
  *   CALICO_UPGRADE_VERSION                    Version of "calico-upgrade" utility to be used ("v1.0.5" for Calico v3.1.3 target).
+ *   KUBERNETES_ETCD_SOURCE      Target etcd binary. May be null in case of reclass-system rollout.
+ *   KUBERNETES_ETCD_SOURCE_HASH Target etcd binary checksum. May be null in case of reclass-system rollout.
  *
 **/
 import groovy.json.JsonSlurper
@@ -87,6 +89,27 @@
     }
 }
 
+def overrideEtcdSource(pepperEnv) {
+    def salt = new com.mirantis.mk.Salt()
+
+    def k8sSaltOverrides = """
+        kubernetes_etcd_source: ${KUBERNETES_ETCD_SOURCE}
+        kubernetes_etcd_source_hash: ${KUBERNETES_ETCD_SOURCE_HASH}
+    """
+    stage("Override etcd binaries to target version") {
+        salt.setSaltOverrides(pepperEnv,  k8sSaltOverrides)
+    }
+}
+
+def performEtcdUpdateAndServicesRestart(pepperEnv, target) {
+    def salt = new com.mirantis.mk.Salt()
+
+    stage("Performing etcd update and services restart on ${target}") {
+        salt.enforceState(pepperEnv, target, "etcd.server.service")
+        salt.cmdRun(pepperEnv, target, ". /var/lib/etcd/configenv && etcdctl cluster-health")
+    }
+}
+
 def performKubernetesComputeUpdate(pepperEnv, target) {
     def salt = new com.mirantis.mk.Salt()
 
@@ -711,6 +734,17 @@
             }
 
             /*
+                * Execute etcd update
+            */
+            if ((common.validInputParam('KUBERNETES_ETCD_SOURCE')) && (common.validInputParam('KUBERNETES_ETCD_SOURCE_HASH'))) {
+                overrideEtcdSource(pepperEnv)
+            }
+            def targetHostsEtcd = salt.getMinionsSorted(pepperEnv, "I@etcd:server")
+            for (t in targetHostsEtcd) {
+                performEtcdUpdateAndServicesRestart(pepperEnv, t)
+            }
+
+            /*
                 * Execute k8s update
             */
             if (updates.contains("ctl")) {
diff --git a/opencontrail40-upgrade.groovy b/opencontrail40-upgrade.groovy
index 180ed85..2f89659 100644
--- a/opencontrail40-upgrade.groovy
+++ b/opencontrail40-upgrade.groovy
@@ -26,9 +26,9 @@
 def probe = 1
 def command = 'cmd.shell'
 
-def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,python-contrail,contrail-database'
+def controlPkgs = 'contrail-config,contrail-config-openstack,contrail-control,contrail-dns,contrail-lib,contrail-nodemgr,contrail-utils,contrail-web-controller,contrail-web-core,neutron-plugin-contrail,contrail-database'
 def thirdPartyControlPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,redis-server,ifmap-server,supervisor'
-def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,python-contrail,contrail-database'
+def analyticsPkgs = 'contrail-analytics,contrail-lib,contrail-nodemgr,contrail-utils,contrail-database'
 def thirdPartyAnalyticsPkgsToRemove = 'zookeeper,libzookeeper-java,kafka,cassandra,python-cassandra,cassandra-cpp-driver,redis-server,supervisor'
 def cmpPkgs = 'contrail-lib contrail-nodemgr contrail-utils contrail-vrouter-agent contrail-vrouter-utils python-contrail python-contrail-vrouter-api python-opencontrail-vrouter-netns contrail-vrouter-dkms'
 def neutronServerPkgs = 'neutron-plugin-contrail,contrail-heat,python-contrail'