Merge "Dashboard rationalization for Kubernetes"
diff --git a/.kitchen.yml b/.kitchen.yml
index f939ef3..5bca6a9 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -53,7 +53,7 @@
 platforms:
   - name: <%= ENV['PLATFORM'] || 'saltstack-ubuntu-xenial-salt-stable' %>
     driver_config:
-      image: <%= ENV['PLATFORM'] || 'epcim/salt-formulas:saltstack-ubuntu-xenial-salt-stable' %>
+      image: <%= ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable' %>
       platform: ubuntu
 
 suites:
diff --git a/.travis.yml b/.travis.yml
index 1c5139d..45062b7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,22 +18,33 @@
   - bundle install
 
 env:
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-cluster
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail4-0
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail4-0
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-contrail4-0
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-contrail4-0
-  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=common-storageclass
-#  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-cluster
-#  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-cluster
-#  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail
-#  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail4-0
-#  - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=common-storageclass
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=pool-cluster
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-contrail
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=pool-contrail4-0
+  - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=common-storageclass
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-cluster
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-cluster
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail4-0
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-contrail4-0
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-cluster
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=pool-cluster
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-contrail
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-contrail4-0
+#  - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=pool-contrail4-0
 
 before_script:
   - set -o pipefail
diff --git a/README.rst b/README.rst
index 3b50609..c9dc10a 100644
--- a/README.rst
+++ b/README.rst
@@ -70,7 +70,7 @@
             virtlet:
               enabled: true
               namespace: kube-system
-              image: mirantis/virtlet:v1.0.0
+              image: mirantis/virtlet:v1.0.3
               hosts:
               - cmp01
               - cmp02
@@ -476,6 +476,22 @@
         apiserver:
           secure_port: 8081
 
+Kubernetes with MetalLB
+-----------------------
+
+On Master:
+
+.. code-block:: yaml
+
+    kubernetes:
+      common:
+        addons:
+          metallb:
+            enabled: true
+            addresses:
+            - 172.16.10.150-172.16.10.180
+            - 172.16.10.192/26
+
 Kubernetes with Flannel
 -----------------------
 
diff --git a/kubernetes/files/calico/calico-node.service.master b/kubernetes/files/calico/calico-node.service.master
index c91dd5c..e79fc65 100644
--- a/kubernetes/files/calico/calico-node.service.master
+++ b/kubernetes/files/calico/calico-node.service.master
@@ -29,6 +29,7 @@
  -p {{ pool.network.calico.prometheus.get('address', '0.0.0.0') }}:{{ master.network.calico.get('prometheus', {}).get('port', 9091) }}:9091 \
 {%- endif %}
  -v /var/log/calico:/var/log/calico \
+ -v /var/lib/calico:/var/lib/calico \
  -v /run/docker/plugins:/run/docker/plugins \
  -v /lib/modules:/lib/modules \
  -v /var/run/calico:/var/run/calico \
diff --git a/kubernetes/files/calico/calico-node.service.pool b/kubernetes/files/calico/calico-node.service.pool
index 034a900..0797fa3 100644
--- a/kubernetes/files/calico/calico-node.service.pool
+++ b/kubernetes/files/calico/calico-node.service.pool
@@ -30,6 +30,7 @@
  -p {{ pool.network.calico.prometheus.get('address', '0.0.0.0') }}:{{ pool.network.calico.prometheus.get('port', 9091) }}:9091 \
 {%- endif %}
  -v /var/log/calico:/var/log/calico \
+ -v /var/lib/calico:/var/lib/calico \
  -v /run/docker/plugins:/run/docker/plugins \
  -v /lib/modules:/lib/modules \
  -v /var/run/calico:/var/run/calico \
diff --git a/kubernetes/files/dockershim/default.master b/kubernetes/files/dockershim/default.master
index c5f3174..f224475 100644
--- a/kubernetes/files/dockershim/default.master
+++ b/kubernetes/files/dockershim/default.master
@@ -14,6 +14,9 @@
 --hostname-override={{ master.host.name }} \
 --v={{ master.get('verbosity', 2) }} \
 --node-labels=node-role.kubernetes.io/master=true \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
 {%- if master.get('unschedulable', 'false') %}
 --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
 {%- endif %}
diff --git a/kubernetes/files/dockershim/default.pool b/kubernetes/files/dockershim/default.pool
index 308b5d6..1cbbbd7 100644
--- a/kubernetes/files/dockershim/default.pool
+++ b/kubernetes/files/dockershim/default.pool
@@ -13,6 +13,9 @@
 --cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
 --hostname-override={{ pool.host.name }} \
 --v={{ pool.get('verbosity', 2) }} \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
 {%- if pillar.kubernetes.master is defined %}
 --node-labels=node-role.kubernetes.io/master=true \
 {%-   if pillar.kubernetes.get('master', {}).get('unschedulable', 'false') %}
diff --git a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
index 19611f2..52d1b26 100644
--- a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
+++ b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
@@ -1,36 +1,58 @@
 {%- from "kubernetes/map.jinja" import common with context -%}
 {%- from "kubernetes/map.jinja" import master with context -%}
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
 apiVersion: extensions/v1beta1
-kind: ReplicaSet
+kind: Deployment
 metadata:
-  name: calico-policy-controller
+  name: calico-kube-controllers
   namespace: {{ common.addons.calico_policy.namespace }}
   labels:
-    k8s-app: calico-policy
-    kubernetes.io/cluster-service: "true"
+    k8s-app: calico-kube-controllers
     addonmanager.kubernetes.io/mode: Reconcile
 spec:
+  # The controllers can only have a single active instance.
   replicas: 1
   selector:
     matchLabels:
-      k8s-app: calico-policy
+      k8s-app: calico-kube-controllers
+  strategy:
+    type: Recreate
   template:
     metadata:
-      name: calico-policy-controller
+      name: calico-kube-controllers
       namespace: {{ common.addons.calico_policy.namespace }}
       labels:
-        k8s-app: calico-policy
+        k8s-app: calico-kube-controllers
       annotations:
+        # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+        # reserves resources for critical add-on pods so that they can be rescheduled after
+        # a failure. This annotation works in tandem with the toleration below.
+        # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
 {%- if common.addons.calico_policy.cni is defined %}
         cni: {{ common.addons.calico_policy.cni }}
 {%- endif %}
     spec:
+      # The controllers must run in the host network namespace so that
+      # it isn't governed by policy that would prevent it from working.
       hostNetwork: true
       tolerations:
-        - key: node-role.kubernetes.io/master
-          effect: NoSchedule
+      # this taint is set by all kubelets running `--cloud-provider=external`
+      # so we should tolerate it to schedule the calico pods
+      - key: node.cloudprovider.kubernetes.io/uninitialized
+        value: "true"
+        effect: NoSchedule
+      - key: node-role.kubernetes.io/master
+        effect: NoSchedule
+      # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+      # This, along with the annotation above marks this pod as a critical add-on.
+      # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
+      - key: CriticalAddonsOnly
+        operator: Exists
+      serviceAccountName: calico-kube-controllers
       containers:
-        - name: calico-policy-controller
+        - name: calico-kube-controllers
           image: {{ common.addons.calico_policy.image }}
           imagePullPolicy: IfNotPresent
           resources:
@@ -41,25 +63,25 @@
               cpu: 30m
               memory: 64M
           env:
+            # The list of etcd nodes in the cluster.
             - name: ETCD_ENDPOINTS
               value: "{% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}"
+            # CA certificate, client certificate, client key files for accessing the etcd cluster.
             - name: ETCD_CA_CERT_FILE
               value: "/var/lib/etcd/ca.pem"
             - name: ETCD_CERT_FILE
               value: "/var/lib/etcd/etcd-client.pem"
             - name: ETCD_KEY_FILE
               value: "/var/lib/etcd/etcd-client.pem"
-            # Location of the Kubernetes API - this shouldn't need to be
-            # changed so long as it is used in conjunction with
-            # CONFIGURE_ETC_HOSTS="true".
-            - name: K8S_API
-              value: "https://kubernetes.default"
-            # Configure /etc/hosts within the container to resolve
-            # the kubernetes.default Service to the correct clusterIP
-            # using the environment provided by the kubelet.
-            # This removes the need for KubeDNS to resolve the Service.
-            - name: CONFIGURE_ETC_HOSTS
-              value: "true"
+            # Which controllers to run.
+            - name: ENABLED_CONTROLLERS
+              value: "policy,profile,workloadendpoint,node"
+            # Minimum log level to be displayed.
+            - name: LOG_LEVEL
+              value: "info"
+            # Period to perform reconciliation with the Calico datastore. Default is 5m.
+            - name: RECONCILER_PERIOD
+              value: "1m"
           volumeMounts:
           - mountPath: /var/lib/etcd/
             name: etcd-certs
@@ -69,3 +91,57 @@
           path: /var/lib/etcd
         name: etcd-certs
 
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: calico-kube-controllers
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+  name: calico-kube-controllers
+  namespace: {{ common.addons.calico_policy.namespace }}
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: calico-kube-controllers
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+  - apiGroups:
+    - ""
+    - extensions
+    resources:
+      - pods
+      - namespaces
+      - networkpolicies
+      - nodes
+    verbs:
+      - watch
+      - list
+  - apiGroups:
+    - networking.k8s.io
+    resources:
+      - networkpolicies
+    verbs:
+      - watch
+      - list
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: calico-kube-controllers
+  namespace: {{ common.addons.calico_policy.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/contrail/contrail.yaml b/kubernetes/files/kube-addons/contrail/contrail.yaml
index 18cd5cf..ebee90f 100644
--- a/kubernetes/files/kube-addons/contrail/contrail.yaml
+++ b/kubernetes/files/kube-addons/contrail/contrail.yaml
@@ -1,5 +1,8 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+
 apiVersion: apps/v1beta2
-kind: Deployment
+kind: DaemonSet
 metadata:
   name: opencontrail
   namespace: kube-system
@@ -8,7 +11,6 @@
     kubernetes.io/cluster-service: "true"
     addonmanager.kubernetes.io/mode: Reconcile
 spec:
-  replicas: 3
   selector:
     matchLabels:
       app: opencontrail
@@ -19,10 +21,40 @@
     spec:
       hostNetwork: true
       containers:
+      - name: rabbitmq
+        image: rabbitmq:{{ common.addons.get('contrail',{}).get('rabbitmq_version',"3.6.6") }}-management-alpine
+        lifecycle:
+          postStart:
+            exec:
+              command:
+              - /bin/sh
+              - -c
+              - >
+                if [ -z "$(grep rabbitmq /etc/resolv.conf)" ]; then
+                  sed "s/^search \([^ ]\+\)/search rabbitmq.\1 \1/" /etc/resolv.conf > /etc/resolv.conf.new;
+                  cat /etc/resolv.conf.new > /etc/resolv.conf;
+                  rm /etc/resolv.conf.new;
+                fi;
+                until rabbitmqctl node_health_check; do sleep 1; done;
+                if [[ "$HOSTNAME" != "ctl01" && -z "$(rabbitmqctl cluster_status | grep ctl01)" ]]; then
+                  rabbitmqctl stop_app;
+                  rabbitmqctl join_cluster rabbit@ctl01;
+                  rabbitmqctl start_app;
+                fi;
+                rabbitmqctl set_policy ha-all "." '{"ha-mode":"exactly","ha-params":3,"ha-sync-mode":"automatic"}'
+        env:
+        - name: RABBITMQ_ERLANG_COOKIE
+          value: {{ common.addons.get('contrail',{}).get('rabbitmq_erlang_cookie',"YTQMGYEHFATZPDKPOCXX") }}
+
       - name: opencontrail-controller
-        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-controller
+        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-controller:{{ common.addons.get('contrail',{}).get('contrail_version',"latest") }}
         securityContext:
           privileged: true
+        env:
+        - name: CONTRAIL_UID
+          value: {{ salt['user.info']('contrail').get('uid', 0) }}
+        - name: CONTRAIL_GID
+          value: {{ salt['group.info']('contrail').get('gid', 0) }}
         lifecycle:
           postStart:
             exec:
@@ -54,13 +86,21 @@
           mountPath: /etc/zookeeper/conf/zoo.cfg
         - name: etc-zookeeper-conf-log4j-properties
           mountPath: /etc/zookeeper/conf/log4j.properties
-        - name: var-lib-rabbitmq-erlang-cookie
-          mountPath: /var/lib/rabbitmq/.erlang.cookie
+        - name: contrail-logs
+          mountPath: /var/log/contrail
+        - name: journal-controller
+          mountPath: /var/log/journal
+
 
       - name: opencontrail-analyticsdb
-        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-analyticsdb
+        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-analyticsdb:{{ common.addons.get('contrail',{}).get('contrail_version',"latest") }}
         securityContext:
           privileged: true
+        env:
+        - name: CONTRAIL_UID
+          value: {{ salt['user.info']('contrail').get('uid', 0) }}
+        - name: CONTRAIL_GID
+          value: {{ salt['group.info']('contrail').get('gid', 0) }}
         volumeMounts:
         - name: etc-contrail
           mountPath: /etc/contrail
@@ -88,41 +128,29 @@
           mountPath: /etc/zookeeper/conf/zoo.cfg
         - name: etc-zookeeper-conf-log4j-properties
           mountPath: /etc/zookeeper/conf/log4j.properties
+        - name: contrail-logs
+          mountPath: /var/log/contrail
+        - name: journal-analyticsdb
+          mountPath: /var/log/journal
 
       - name: opencontrail-analytics
-        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-analytics
+        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-analytics:{{ common.addons.get('contrail',{}).get('contrail_version',"latest") }}
         volumeMounts:
         - name: etc-contrail
           mountPath: /etc/contrail
         - name: etc-redis-redis-conf
           mountPath: /etc/redis/redis.conf
+        - name: contrail-logs
+          mountPath: /var/log/contrail
+        - name: journal-analytics
+          mountPath: /var/log/journal
         securityContext:
           privileged: true
-
-      - name: rabbitmq
-        image: rabbitmq:3.6.6-management-alpine
-        lifecycle:
-          postStart:
-            exec:
-              command:
-              - /bin/sh
-              - -c
-              - >
-                if [ -z "$(grep rabbitmq /etc/resolv.conf)" ]; then
-                  sed "s/^search \([^ ]\+\)/search rabbitmq.\1 \1/" /etc/resolv.conf > /etc/resolv.conf.new;
-                  cat /etc/resolv.conf.new > /etc/resolv.conf;
-                  rm /etc/resolv.conf.new;
-                fi;
-                until rabbitmqctl node_health_check; do sleep 1; done;
-                if [[ "$HOSTNAME" != "ctl01" && -z "$(rabbitmqctl cluster_status | grep ctl01)" ]]; then
-                  rabbitmqctl stop_app;
-                  rabbitmqctl join_cluster rabbit@ctl01;
-                  rabbitmqctl start_app;
-                fi;
-                rabbitmqctl set_policy ha-all "." '{"ha-mode":"exactly","ha-params":3,"ha-sync-mode":"automatic"}'
         env:
-        - name: RABBITMQ_ERLANG_COOKIE
-          value: YTQMGYEHFATZPDKPOCXX
+        - name: CONTRAIL_UID
+          value: {{ salt['user.info']('contrail').get('uid', 0) }}
+        - name: CONTRAIL_GID
+          value: {{ salt['group.info']('contrail').get('gid', 0) }}
 
       volumes:
       # analytics / analyticsdb / controller /
@@ -130,6 +158,10 @@
         hostPath:
           path: /etc/contrail
           type: Directory
+      - name: contrail-logs
+        hostPath:
+          path: /var/log/contrail
+          type: Directory
 
       # controller
       - name: var-lib-configdb
@@ -152,11 +184,10 @@
         hostPath:
           path: /etc/zookeeper/conf/zoo.cfg
           type: File
-      - name: var-lib-rabbitmq-erlang-cookie
+      - name: journal-controller
         hostPath:
-          path: /var/lib/rabbitmq/.erlang.cookie
-          type: File
-
+          path:  /var/log/journal/contrail-controller
+          type: DirectoryOrCreate
 
       # analyticsdb
       - name: etc-cassandra-cassandra-env-analytics-sh
@@ -207,12 +238,20 @@
         hostPath:
           path: /etc/zookeeper/conf/log4j.properties
           type: File
+      - name: journal-analyticsdb
+        hostPath:
+          path:  /var/log/journal/contrail-analyticsdb
+          type: DirectoryOrCreate
 
       # analytics
       - name: etc-redis-redis-conf
         hostPath:
           path: /etc/redis/redis.conf
           type: File
+      - name: journal-analytics
+        hostPath:
+          path:  /var/log/journal/contrail-analytics
+          type: DirectoryOrCreate
 
       nodeSelector:
         "node-role.kubernetes.io/master": "true"
diff --git a/kubernetes/files/kube-addons/contrail/kube-manager.yaml b/kubernetes/files/kube-addons/contrail/kube-manager.yaml
index 0d8162e..00e8d61 100644
--- a/kubernetes/files/kube-addons/contrail/kube-manager.yaml
+++ b/kubernetes/files/kube-addons/contrail/kube-manager.yaml
@@ -1,5 +1,7 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
 apiVersion: apps/v1beta2
-kind: Deployment
+kind: DaemonSet
 metadata:
   name: kube-manager
   namespace: kube-system
@@ -8,7 +10,6 @@
     kubernetes.io/cluster-service: "true"
     addonmanager.kubernetes.io/mode: Reconcile
 spec:
-  replicas: 3
   selector:
     matchLabels:
       app: kube-manager
@@ -20,9 +21,14 @@
       hostNetwork: true
       containers:
       - name: opencontrail-kube-manager
-        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-kube-manager
+        image: docker-prod-local.artifactory.mirantis.com/opencontrail-oc40/opencontrail-kube-manager:{{ common.addons.get('contrail',{}).get('contrail_version',"latest") }}
         securityContext:
           privileged: true
+        env:
+        - name: CONTRAIL_UID
+          value: {{ salt['user.info']('contrail').get('uid', 0) }}
+        - name: CONTRAIL_GID
+          value: {{ salt['group.info']('contrail').get('gid', 0) }}
         lifecycle:
           postStart:
             exec:
@@ -34,12 +40,15 @@
                 apt install python-eventlet;
                 service contrail-kube-manager restart;
 
-
         volumeMounts:
         - name: etc-kubernetes
           mountPath: /etc/kubernetes
         - name: etc-contrail
           mountPath: /etc/contrail
+        - name: contrail-logs
+          mountPath: /var/log/contrail
+        - name: journal-kube-manager
+          mountPath: /var/log/journal
 
       volumes:
       - name: etc-kubernetes
@@ -50,6 +59,13 @@
         hostPath:
           path: /etc/contrail
           type: Directory
+      - name: contrail-logs
+        hostPath:
+          path: /var/log/contrail
+      - name: journal-kube-manager
+        hostPath:
+          path:  /var/log/journal/contrail-kube-manager
+          type: DirectoryOrCreate
 
       nodeSelector:
         "node-role.kubernetes.io/master": "true"
diff --git a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
index 8090402..febb3cf 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
@@ -156,3 +156,8 @@
             cpu: 10m
       dnsPolicy: Default  # Don't use cluster DNS.
       serviceAccountName: kube-dns
+      {%- if salt['pillar.get']('kubernetes').get('master', {}).get('network', {}).get('opencontrail', {}).get('enabled', false) %}
+      tolerations:
+      - effect: NoSchedule
+        key: node-role.kubernetes.io/node
+      {%- endif %}
diff --git a/kubernetes/files/kube-addons/metallb/metallb.yaml b/kubernetes/files/kube-addons/metallb/metallb.yaml
new file mode 100644
index 0000000..e64afdb
--- /dev/null
+++ b/kubernetes/files/kube-addons/metallb/metallb.yaml
@@ -0,0 +1,280 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: metallb-system
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: metallb-system
+  name: controller
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: metallb-system
+  name: speaker
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metallb-system:controller
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups: [""]
+  resources: ["services"]
+  verbs: ["get", "list", "watch", "update"]
+- apiGroups: [""]
+  resources: ["services/status"]
+  verbs: ["update"]
+- apiGroups: [""]
+  resources: ["events"]
+  verbs: ["create", "patch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metallb-system:speaker
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups: [""]
+  resources: ["services", "endpoints", "nodes"]
+  verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  namespace: metallb-system
+  name: leader-election
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups: [""]
+  resources: ["endpoints"]
+  resourceNames: ["metallb-speaker"]
+  verbs: ["get", "update"]
+- apiGroups: [""]
+  resources: ["endpoints"]
+  verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  namespace: metallb-system
+  name: config-watcher
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+  resources: ["events"]
+  verbs: ["create"]
+---
+
+## Role bindings
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: metallb-system:controller
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+  name: controller
+  namespace: metallb-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: metallb-system:controller
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: metallb-system:speaker
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+  name: speaker
+  namespace: metallb-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: metallb-system:speaker
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  namespace: metallb-system
+  name: config-watcher
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+  name: controller
+- kind: ServiceAccount
+  name: speaker
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: config-watcher
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  namespace: metallb-system
+  name: leader-election
+  labels:
+    app: metallb
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+- kind: ServiceAccount
+  name: speaker
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: leader-election
+---
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+  namespace: metallb-system
+  name: speaker
+  labels:
+    app: metallb
+    component: speaker
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  selector:
+    matchLabels:
+      app: metallb
+      component: speaker
+  template:
+    metadata:
+      labels:
+        app: metallb
+        component: speaker
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "7472"
+    spec:
+      serviceAccountName: speaker
+      terminationGracePeriodSeconds: 0
+      hostNetwork: true
+      containers:
+      - name: speaker
+        image: {{ common.addons.get('metallb', {}).get('speaker_image', 'metallb/speaker:v0.6.2') }}
+        imagePullPolicy: IfNotPresent
+        args:
+        - --port=7472
+        - --config=config
+        env:
+        - name: METALLB_NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        ports:
+        - name: monitoring
+          containerPort: 7472
+        resources:
+          limits:
+            cpu: 100m
+            memory: 100Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          readOnlyRootFilesystem: true
+          capabilities:
+            drop:
+            - all
+            add:
+            - net_raw
+
+---
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  namespace: metallb-system
+  name: controller
+  labels:
+    app: metallb
+    component: controller
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  revisionHistoryLimit: 3
+  selector:
+    matchLabels:
+      app: metallb
+      component: controller
+  template:
+    metadata:
+      labels:
+        app: metallb
+        component: controller
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "7472"
+    spec:
+      serviceAccountName: controller
+      terminationGracePeriodSeconds: 0
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 65534 # nobody
+      containers:
+      - name: controller
+        image: {{ common.addons.get('metallb', {}).get('controller_image', 'metallb/controller:v0.6.2') }}
+        imagePullPolicy: IfNotPresent
+        args:
+        - --port=7472
+        - --config=config
+        ports:
+        - name: monitoring
+          containerPort: 7472
+        resources:
+          limits:
+            cpu: 100m
+            memory: 100Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - all
+          readOnlyRootFilesystem: true
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  namespace: metallb-system
+  name: config
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+data:
+  config: |
+    address-pools:
+    - name: metallb-ip-space
+      protocol: layer2
+      addresses:
+      {%- for address in common.addons.get('metallb', {}).get('addresses', []) %}
+      - {{ address }}
+      {%- endfor %}
diff --git a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
index 45d352f..7a2372e 100644
--- a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
+++ b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
@@ -5,6 +5,7 @@
 apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
+  creationTimestamp: null
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
   labels:
@@ -12,10 +13,20 @@
 spec:
   template:
     metadata:
+      creationTimestamp: null
       labels:
         runtime: virtlet
       name: virtlet
     spec:
+      hostNetwork: true
+      dnsPolicy: ClusterFirstWithHostNet
+      # hostPID is true to (1) enable VMs to survive virtlet container restart
+      # (to be checked) and (2) to enable the use of nsenter in init container
+      hostPID: true
+      # bootstrap procedure needs to create a configmap in kube-system namespace
+      serviceAccountName: virtlet
+
+      # only run Virtlet pods on the nodes with extraRuntime=virtlet label
       affinity:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
@@ -25,19 +36,97 @@
                 operator: In
                 values:
                 - virtlet
-      containers:
-      - command:
-        - /libvirt.sh
+      initContainers:
+      # The init container copies virtlet's flexvolume driver
+      # to the default kubelet plugin dir and ensures that
+      # the directories needed by libvirt & virtlet exist on the host
+      - name: prepare-node
+        image: {{ common.addons.virtlet.image }}
+        imagePullPolicy: IfNotPresent
+        command:
+        - /prepare-node.sh
+        volumeMounts:
+        - name: k8s-flexvolume-plugins-dir
+          mountPath: /kubelet-volume-plugins
+        - name: run
+          mountPropagation: Bidirectional
+          mountPath: /run
+        - name: dockersock
+          mountPath: /var/run/docker.sock
+        - name: log
+          mountPath: /hostlog
+        # for ensuring that /var/lib/libvirt/images exists on node
+        - name: var-lib
+          mountPath: /host-var-lib
+        - name: dev
+          mountPath: /dev
+        - mountPath: /var/lib/virtlet
+          name: virtlet
+        securityContext:
+          privileged: true
         env:
+        - name: KUBE_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
         - name: VIRTLET_DISABLE_KVM
           valueFrom:
             configMapKeyRef:
-              key: disable_kvm
               name: virtlet-config
+              key: disable_kvm
               optional: true
+        - name: VIRTLET_SRIOV_SUPPORT
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: sriov_support
+              optional: true
+        - name: VIRTLET_DOWNLOAD_PROTOCOL
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: download_protocol
+              optional: true
+        - name: VIRTLET_LOGLEVEL
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: loglevel
+              optional: true
+        - name: VIRTLET_CALICO_SUBNET
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: calico-subnet
+              optional: true
+        - name: IMAGE_REGEXP_TRANSLATION
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: image_regexp_translation
+              optional: true
+        - name: VIRTLET_RAW_DEVICES
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: raw_devices
+              optional: true
+        - name: VIRTLET_DISABLE_LOGGING
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: disable_logging
+              optional: true
+        - name: VIRTLET_IMAGE_TRANSLATIONS_DIR
+          value: /etc/virtlet/images
+
+      containers:
+      - name: libvirt
         image: {{ common.addons.virtlet.image }}
         imagePullPolicy: IfNotPresent
-        name: libvirt
+        command:
+        - /libvirt.sh
         readinessProbe:
           exec:
             command:
@@ -57,89 +146,40 @@
           name: boot
           readOnly: true
         - mountPath: /run
-          mountPropagation: Bidirectional
           name: run
+          mountPropagation: Bidirectional
         - mountPath: /var/lib/virtlet
           name: virtlet
         - mountPath: /var/lib/libvirt
           name: libvirt
         - mountPath: /var/run/libvirt
           name: libvirt-sockets
-        - mountPath: /var/log/vms
-          name: vms-log
-        - mountPath: /var/log/libvirt
-          name: libvirt-log
-        - mountPath: /dev
-          name: dev
-      - env:
-        - name: VIRTLET_DISABLE_KVM
-          valueFrom:
-            configMapKeyRef:
-              key: disable_kvm
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_DOWNLOAD_PROTOCOL
-          valueFrom:
-            configMapKeyRef:
-              key: download_protocol
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_LOGLEVEL
-          valueFrom:
-            configMapKeyRef:
-              key: loglevel
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_CALICO_SUBNET
-          valueFrom:
-            configMapKeyRef:
-              key: calico-subnet
-              name: virtlet-config
-              optional: true
-        - name: IMAGE_REGEXP_TRANSLATION
-          valueFrom:
-            configMapKeyRef:
-              key: image_regexp_translation
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_DISABLE_LOGGING
-          valueFrom:
-            configMapKeyRef:
-              key: disable_logging
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_SRIOV_SUPPORT
-          valueFrom:
-            configMapKeyRef:
-              key: sriov_support
-              name: virtlet-config
-              optional: true
-        - name: VIRTLET_RAW_DEVICES
-          valueFrom:
-            configMapKeyRef:
-              key: raw_devices
-              name: virtlet-config
-              optional: true
-        - name: IMAGE_TRANSLATIONS_DIR
-          value: /etc/virtlet/images
-        - name: KUBERNETES_POD_LOGS
-          value: /kubernetes-log
-        image: {{ common.addons.virtlet.image }}
-        imagePullPolicy: IfNotPresent
-        name: virtlet
+        # the log dir is needed here because otherwise libvirt will produce errors
+        # like this:
+        # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
+        - name: vms-log
+          mountPath: /var/log/vms
+        - name: libvirt-log
+          mountPath: /var/log/libvirt
+        - name: dev
+          mountPath: /dev
+        securityContext:
+          privileged: true
         readinessProbe:
           exec:
             command:
             - /bin/sh
             - -c
-            - socat - UNIX:/run/virtlet.sock </dev/null
-        resources: {}
-        securityContext:
-          privileged: true
+            - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+      - name: virtlet
+        image: {{ common.addons.virtlet.image }}
+        # In case we inject local virtlet image we want to use it not officially available one
+        imagePullPolicy: IfNotPresent
         volumeMounts:
         - mountPath: /run
-          mountPropagation: Bidirectional
           name: run
+          mountPropagation: Bidirectional
+        # /boot and /lib/modules are required by supermin
         - mountPath: /lib/modules
           name: modules
           readOnly: true
@@ -155,74 +195,40 @@
         - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
           name: k8s-flexvolume-plugins-dir
         - mountPath: /var/lib/kubelet/pods
-          mountPropagation: Bidirectional
           name: k8s-pods-dir
-        - mountPath: /var/log/vms
-          name: vms-log
-        {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
-        - name: contrail-log
-          mountPath: /var/log/contrail
-        - name: contrail-data
-          mountPath: /var/lib/contrail
-        {%- endif %}
+          mountPropagation: Bidirectional
+        - name: vms-log
+          mountPath: /var/log/vms
         - mountPath: /etc/virtlet/images
           name: image-name-translations
-        - mountPath: /kubernetes-log
-          name: pods-log
-      - command:
-        - /vms.sh
+        - name: pods-log
+          mountPath: /var/log/pods
+        securityContext:
+          privileged: true
+        readinessProbe:
+          exec:
+            command:
+            - /bin/sh
+            - -c
+            - socat - UNIX:/run/virtlet.sock </dev/null
+      - name: vms
         image: {{ common.addons.virtlet.image }}
         imagePullPolicy: IfNotPresent
-        name: vms
-        resources: {}
+        command:
+        - /vms.sh
         volumeMounts:
         - mountPath: /var/lib/virtlet
           name: virtlet
         - mountPath: /var/lib/libvirt
           name: libvirt
-        - mountPath: /var/log/vms
-          name: vms-log
-        - mountPath: /dev
-          name: dev
-        - mountPath: /lib/modules
-          name: modules
-      {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
-      dnsPolicy: ClusterFirstWithHostNet
-      {%- endif %}
-      hostNetwork: true
-      hostPID: true
-      initContainers:
-      - command:
-        - /prepare-node.sh
-        env:
-        - name: VIRTLET_DISABLE_KVM
-          valueFrom:
-            configMapKeyRef:
-              key: disable_kvm
-              name: virtlet-config
-              optional: true
-        image: {{ common.addons.virtlet.image }}
-        imagePullPolicy: IfNotPresent
-        name: prepare-node
-        resources: {}
-        securityContext:
-          privileged: true
-        volumeMounts:
-        - mountPath: /kubelet-volume-plugins
-          name: k8s-flexvolume-plugins-dir
-        - mountPath: /run
-          mountPropagation: Bidirectional
-          name: run
-        - mountPath: /var/run/docker.sock
-          name: dockersock
-        - mountPath: /hostlog
-          name: log
-        - mountPath: /host-var-lib
-          name: var-lib
-        - mountPath: /dev
-          name: dev
-      serviceAccountName: virtlet
+        - name: vms-log
+          mountPath: /var/log/vms
+        - name: dev
+          mountPath: /dev
+        - name: modules
+          mountPath: /lib/modules
       volumes:
+      # /dev is needed for host raw device access
       - hostPath:
           path: /dev
         name: dev
@@ -238,6 +244,9 @@
       - hostPath:
           path: /run
         name: run
+      # TODO: don't hardcode docker socket location here
+      # This will require CRI proxy installation to run
+      # in host mount namespace.
       - hostPath:
           path: /var/run/docker.sock
         name: dockersock
@@ -277,28 +286,11 @@
       - configMap:
           name: virtlet-image-translations
         name: image-name-translations
-      {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
-      - hostPath:
-          path: /var/log/contrail
-        name: contrail-log
-      - hostPath:
-          path: /var/lib/contrail
-        name: contrail-data
-      - hostPath:
-          path: /virtlet
-        name: virtlet-bin
-      {%- endif %}
-  updateStrategy: {}
-status:
-  currentNumberScheduled: 0
-  desiredNumberScheduled: 0
-  numberMisscheduled: 0
-  numberReady: 0
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRoleBinding
 metadata:
+  creationTimestamp: null
   name: virtlet
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -310,27 +302,29 @@
 - kind: ServiceAccount
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRole
 metadata:
+  creationTimestamp: null
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
 rules:
-- apiGroups:
-  - ""
-  resources:
-  - configmaps
-  verbs:
-  - create
-
+  - apiGroups:
+    - ""
+    resources:
+    - configmaps
+    - nodes
+    verbs:
+    - create
+    - get
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRole
 metadata:
+  creationTimestamp: null
   name: configmap-reader
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -343,11 +337,11 @@
   - get
   - list
   - watch
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRole
 metadata:
+  creationTimestamp: null
   name: virtlet-userdata-reader
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -359,11 +353,11 @@
   - secrets
   verbs:
   - get
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRoleBinding
 metadata:
+  creationTimestamp: null
   name: kubelet-node-binding
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -375,11 +369,11 @@
 - apiGroup: rbac.authorization.k8s.io
   kind: Group
   name: system:nodes
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRoleBinding
 metadata:
+  creationTimestamp: null
   name: vm-userdata-binding
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -391,11 +385,11 @@
 - kind: ServiceAccount
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRole
 metadata:
+  creationTimestamp: null
   name: virtlet-crd
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -410,14 +404,15 @@
   - virtlet.k8s
   resources:
   - virtletimagemappings
+  - virtletconfigmappings
   verbs:
   - list
   - get
-
 ---
 apiVersion: rbac.authorization.k8s.io/v1beta1
 kind: ClusterRoleBinding
 metadata:
+  creationTimestamp: null
   name: virtlet-crd
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
@@ -429,16 +424,119 @@
 - kind: ServiceAccount
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
-
 ---
 apiVersion: v1
 kind: ServiceAccount
 metadata:
+  creationTimestamp: null
   name: virtlet
   namespace: {{ common.addons.virtlet.namespace }}
   labels:
     addonmanager.kubernetes.io/mode: Reconcile
 ---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  name: virtletimagemappings.virtlet.k8s
+  namespace: {{ common.addons.virtlet.namespace }}
+  labels:
+    virtlet.cloud: ""
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  group: virtlet.k8s
+  names:
+    kind: VirtletImageMapping
+    plural: virtletimagemappings
+    shortNames:
+    - vim
+    singular: virtletimagemapping
+  scope: Namespaced
+  version: v1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: null
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  creationTimestamp: null
+  name: virtletconfigmappings.virtlet.k8s
+  namespace: {{ common.addons.virtlet.namespace }}
+  labels:
+    virtlet.cloud: ""
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  group: virtlet.k8s
+  names:
+    kind: VirtletConfigMapping
+    plural: virtletconfigmappings
+    shortNames:
+    - vcm
+    singular: virtletconfigmapping
+  scope: Namespaced
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            config:
+              properties:
+                calicoSubnetSize:
+                  maximum: 32
+                  minimum: 0
+                  type: integer
+                cniConfigDir:
+                  type: string
+                cniPluginDir:
+                  type: string
+                criSocketPath:
+                  type: string
+                databasePath:
+                  type: string
+                disableKVM:
+                  type: boolean
+                disableLogging:
+                  type: boolean
+                downloadProtocol:
+                  pattern: ^https?$
+                  type: string
+                enableRegexpImageTranslation:
+                  type: boolean
+                enableSriov:
+                  type: boolean
+                fdServerSocketPath:
+                  type: string
+                  type: string
+                imageDir:
+                  type: string
+                imageTranslationConfigsDir:
+                  type: string
+                libvirtURI:
+                  type: string
+                logLevel:
+                  maximum: 2147483647
+                  minimum: 0
+                  type: integer
+                rawDevices:
+                  type: string
+                skipImageTranslation:
+                  type: boolean
+            nodeName:
+              type: string
+            nodeSelector:
+              type: object
+            priority:
+              type: integer
+  version: v1
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: null
+---
 apiVersion: v1
 data:
   images.yml: |
diff --git a/kubernetes/files/kubelet/default.master b/kubernetes/files/kubelet/default.master
index adf0f64..b3f0e41 100644
--- a/kubernetes/files/kubelet/default.master
+++ b/kubernetes/files/kubelet/default.master
@@ -13,6 +13,9 @@
 --hostname-override={{ master.host.name }} \
 --v={{ master.get('verbosity', 2) }} \
 --node-labels=node-role.kubernetes.io/master=true \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
 {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
 --fail-swap-on={{ master.kubelet.fail_on_swap }} \
 {%- if common.addons.get('virtlet', {}).get('enabled') %}
diff --git a/kubernetes/files/kubelet/default.pool b/kubernetes/files/kubelet/default.pool
index 19bb8f6..06f2cf4 100644
--- a/kubernetes/files/kubelet/default.pool
+++ b/kubernetes/files/kubelet/default.pool
@@ -12,6 +12,9 @@
 --cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
 --hostname-override={{ pool.host.name }} \
 --v={{ pool.get('verbosity', 2) }} \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
 {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
 --fail-swap-on={{ pool.kubelet.fail_on_swap }} \
 {%- if common.addons.get('virtlet', {}).get('enabled') %}
diff --git a/kubernetes/files/manifest/kube-apiserver.manifest b/kubernetes/files/manifest/kube-apiserver.manifest
index dddf336..afa0c4c 100644
--- a/kubernetes/files/manifest/kube-apiserver.manifest
+++ b/kubernetes/files/manifest/kube-apiserver.manifest
@@ -42,7 +42,6 @@
       {%- if master.auth.get('token', {}).enabled|default(True) %}
       --token-auth-file={{ master.auth.token.file|default("/srv/kubernetes/known_tokens.csv") }}
       {%- endif %}
-      --apiserver-count={{ master.apiserver.get('count', 1) }}
       --etcd-quorum-read=true
       --v={{ master.get('verbosity', 2) }}
       --allow-privileged=True
@@ -50,6 +49,11 @@
       {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
         --feature-gates=MountPropagation=true
       {%- endif %}
+      {%- if salt['pkg.version_cmp'](version,'1.9') >= 0 %}
+      --endpoint-reconciler-type={{ master.apiserver.get('endpoint-reconciler', 'lease') }}
+      {%- else %}
+      --apiserver-count={{ master.apiserver.get('count', 1) }}
+      {%- endif %}
       {%- endif %}
       {%- if master.auth.get('mode') %}
       --authorization-mode={{ master.auth.mode }}
diff --git a/kubernetes/map.jinja b/kubernetes/map.jinja
index 74f204e..b89cdad 100644
--- a/kubernetes/map.jinja
+++ b/kubernetes/map.jinja
@@ -1,4 +1,5 @@
 {% set version = salt['cmd.shell']("(hyperkube --version kubelet 2> /dev/null || echo '0.0') | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g' | awk -F'.' '{print $1 \".\" $2}'") %}
+{% set full_version = salt['cmd.shell']("(hyperkube --version kubelet 2> /dev/null || echo '0.0') | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g'") %}
 
 {% set common = salt['grains.filter_by']({
     'Debian': {
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index 720302d..83f752d 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -1,6 +1,7 @@
 {%- from "kubernetes/map.jinja" import master with context %}
 {%- from "kubernetes/map.jinja" import common with context %}
 {%- from "kubernetes/map.jinja" import version %}
+{%- from "kubernetes/map.jinja" import full_version %}
 {%- if master.enabled %}
 
 {%- if master.auth.get('token', {}).enabled|default(True) %}
@@ -93,6 +94,8 @@
     - group: root
     - mode: 644
     - contents: >-
+        # Using hyperkube version v{{ full_version }}
+
         DAEMON_ARGS="
         --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,DefaultStorageClass
         --allow-privileged=True
@@ -123,7 +126,6 @@
         {%- if master.auth.get('token', {}).enabled|default(True) %}
         --token-auth-file={{ master.auth.token.file|default("/srv/kubernetes/known_tokens.csv") }}
         {%- endif %}
-        --apiserver-count={{ master.apiserver.get('count', 1) }}
         --v={{ master.get('verbosity', 2) }}
         --advertise-address={{ master.apiserver.address }}
         --etcd-servers=
@@ -148,6 +150,12 @@
 {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
         --feature-gates=MountPropagation=true
 {%- endif %}
+{%- if salt['pkg.version_cmp'](version,'1.9') >= 0 %}
+        --endpoint-reconciler-type={{ master.apiserver.get('endpoint-reconciler', 'lease') }}
+{%- else %}
+        --apiserver-count={{ master.apiserver.get('count', 1) }}
+{%- endif %}
+
 {%- endif %}
 {%- for key, value in master.get('apiserver', {}).get('daemon_opts', {}).items() %}
         --{{ key }}={{ value }}
@@ -174,6 +182,8 @@
     - group: root
     - mode: 644
     - contents: >-
+        # Using hyperkube version v{{ full_version }}
+
         DAEMON_ARGS="
         --cluster-name=kubernetes
         --kubeconfig /etc/kubernetes/controller-manager.kubeconfig
@@ -202,6 +212,8 @@
     - group: root
     - mode: 644
     - contents: >-
+        # Using hyperkube version v{{ full_version }}
+
         DAEMON_ARGS="
         --kubeconfig /etc/kubernetes/scheduler.kubeconfig
         --leader-elect=true
diff --git a/kubernetes/master/kube-addons.sls b/kubernetes/master/kube-addons.sls
index bdd1c90..e25979e 100644
--- a/kubernetes/master/kube-addons.sls
+++ b/kubernetes/master/kube-addons.sls
@@ -9,6 +9,16 @@
     - group: root
     - mode: 0755
 
+{%- if common.addons.get('metallb', {}).get('enabled', False) %}
+/etc/kubernetes/addons/metallb/metallb.yaml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/metallb/metallb.yaml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+{% endif %}
+
 {%- if master.network.get('flannel', {}).get('enabled', False) %}
 /etc/kubernetes/addons/flannel/flannel.yml:
   file.managed:
diff --git a/kubernetes/meta/prometheus.yml b/kubernetes/meta/prometheus.yml
index 3ca5453..e873d38 100644
--- a/kubernetes/meta/prometheus.yml
+++ b/kubernetes/meta/prometheus.yml
@@ -155,8 +155,8 @@
         severity: warning
         service: kubernetes
       annotations:
-        summary: "Failed to get the container metrics"
-        description: "Prometheus was not able to scrape metrics from the container on the {{ $labels.instance }} instance."
+        summary: "Failed to get Kubernetes container metrics"
+        description: "Prometheus was not able to scrape metrics from the container on the {{ $labels.instance }} Kubernetes instance."
     {% endraw %}
     KubernetesProcessDown:
       if: >-
@@ -168,7 +168,7 @@
         service: kubernetes
       annotations:
         summary: "Kubernetes {{ $labels.process_name }} process is down"
-        description: "Kubernetes {{ $labels.process_name }} process on the {{ $labels.host }} node is down for at least 2 minutes."
+        description: "Kubernetes {{ $labels.process_name }} process on the {{ $labels.host }} node is down for 2 minutes."
     {% endraw %}
     KubernetesProcessDownMinor:
       if: >-
@@ -179,9 +179,9 @@
         severity: minor
         service: kubernetes
       annotations:
-        summary: "{% endraw %}{{ instance_minor_threshold_percent * 100 }}%{% raw %} of Kubernetes {{ $labels.process_name }} process instances are down"
+        summary: "{% endraw %}{{ instance_minor_threshold_percent * 100 }}%{% raw %} of Kubernetes {{ $labels.process_name }} processes are down"
         description: >-
-          {{ $value }} of Kubernetes {{ $labels.process_name }} process instances are down {% endraw %}(at least {{ instance_minor_threshold_percent * 100 }}%) for at least 2 minutes.
+          {{ $value }} of Kubernetes {{ $labels.process_name }} processes (>= {% endraw %} {{ instance_minor_threshold_percent * 100 }}%) are down for 2 minutes.
     KubernetesProcessDownMajor:
       if: >-
         count(procstat_running{process_name=~"hyperkube-.*"} == 0) by (process_name) > count(procstat_running{process_name=~"hyperkube-.*"}) by (process_name) * {{ instance_major_threshold_percent }}
@@ -190,9 +190,9 @@
         severity: major
         service: kubernetes
       annotations:
-        summary: "{{ instance_major_threshold_percent * 100 }}%{% raw %} of Kubernetes {{ $labels.process_name }} process instances are down"
+        summary: "{{ instance_major_threshold_percent * 100 }}%{% raw %} of Kubernetes {{ $labels.process_name }} processes are down"
         description: >-
-          {{ $value }} of Kubernetes {{ $labels.process_name }} process instances are down {% endraw %}(at least {{ instance_major_threshold_percent * 100 }}%) for at least 2 minutes.
+          {{ $value }} of Kubernetes {{ $labels.process_name }} processes (>= {% endraw %} {{ instance_major_threshold_percent * 100 }}%) are down for 2 minutes.
     KubernetesProcessOutage:
       if: >-
         count(procstat_running{process_name=~"hyperkube-.*"}) by (process_name) == count(procstat_running{process_name=~"hyperkube-.*"} == 0) by (process_name)
@@ -203,7 +203,7 @@
         service: kubernetes
       annotations:
         summary: "Kubernetes {{ $labels.process_name }} cluster outage"
-        description: "All Kubernetes {{ $labels.process_name }} process instances are down for at least 2 minutes."
+        description: "All Kubernetes {{ $labels.process_name }} processes are down for 2 minutes."
     {% endraw %}
 {%- if network.get('calico', {}).get('enabled', False) %}
     CalicoProcessDown:
@@ -216,7 +216,7 @@
         service: calico
       annotations:
         summary: "Calico {{ $labels.process_name }} process is down"
-        description: "Calico {{ $labels.process_name }} process on the {{ $labels.host }} node is down for at least 2 minutes."
+        description: "Calico {{ $labels.process_name }} process on the {{ $labels.host }} node is down for 2 minutes."
     {% endraw %}
     CalicoProcessDownMinor:
       if: >-
@@ -226,9 +226,9 @@
         severity: minor
         service: calico
       annotations:
-        summary: "{{ instance_minor_threshold_percent * 100 }}%{% raw %} of Calico {{ $labels.process_name }} process instances are down"
+        summary: "{{ instance_minor_threshold_percent * 100 }}%{% raw %} of Calico {{ $labels.process_name }} processes are down"
         description: >-
-          {{ $value }} of Calico {{ $labels.process_name }} process instances are down {% endraw %}(at least {{ instance_minor_threshold_percent * 100 }}%) for at least 2 minutes.
+          {{ $value }} of Calico {{ $labels.process_name }} processes (>= {% endraw %} {{ instance_minor_threshold_percent * 100 }}%) are down for 2 minutes.
     CalicoProcessDownMajor:
       if: >-
         count(procstat_running{process_name=~"calico-felix|bird|bird6|confd"} == 0) by (process_name) > count(procstat_running{process_name=~"calico-felix|bird|bird6|confd"}) by (process_name) * {{ instance_major_threshold_percent }}
@@ -237,9 +237,9 @@
         severity: major
         service: calico
       annotations:
-        summary: "{{ instance_major_threshold_percent * 100 }}%{% raw %} of Calico {{ $labels.process_name }} process instances are down"
+        summary: "{{ instance_major_threshold_percent * 100 }}%{% raw %} of Calico {{ $labels.process_name }} processes are down"
         description: >-
-          {{ $value }} of Calico {{ $labels.process_name }} process instances are down {% endraw %}(at least {{ instance_major_threshold_percent * 100 }}%) for at least 2 minutes.
+          {{ $value }} of Calico {{ $labels.process_name }} processes (>= {% endraw %} {{ instance_major_threshold_percent * 100 }}%) are down for 2 minutes.
     CalicoProcessOutage:
       if: >-
         count(procstat_running{process_name=~"calico-felix|bird|bird6|confd"}) by (process_name) == count(procstat_running{process_name=~"calico-felix|bird|bird6|confd"} == 0) by (process_name)
@@ -250,6 +250,6 @@
         service: calico
       annotations:
         summary: "Calico {{ $labels.process_name }} cluster outage"
-        description: "All Calico {{ $labels.process_name }} process instances are down for at least 2 minutes."
+        description: "All Calico {{ $labels.process_name }} processes are down for 2 minutes."
     {% endraw %}
 {% endif %}
diff --git a/kubernetes/pool/init.sls b/kubernetes/pool/init.sls
index c4b1967..1270cc2 100644
--- a/kubernetes/pool/init.sls
+++ b/kubernetes/pool/init.sls
@@ -13,4 +13,6 @@
 {%- if pool.network.get('genie', {}).get('enabled', False) %}
 - kubernetes.pool.genie
 {%- endif %}
+{%- if pool.get('kube_proxy', {}).get('enabled', True) %}
 - kubernetes.pool.kube-proxy
+{%- endif %}
diff --git a/metadata/service/common.yml b/metadata/service/common.yml
index 161e025..0fa49df 100644
--- a/metadata/service/common.yml
+++ b/metadata/service/common.yml
@@ -35,7 +35,7 @@
           server_image: mirantis/k8s-netchecker-server:stable
         calico_policy:
           enabled: False
-          image: calico/kube-policy-controller:v0.5.4
+          image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
           namespace: kube-system
         contrail_network_controller:
           enabled: False
@@ -58,7 +58,7 @@
         virtlet:
           enabled: False
           namespace: kube-system
-          image: mirantis/virtlet:v1.0.0
+          image: mirantis/virtlet:v1.0.3
           criproxy_version: v0.10.0
           criproxy_source: md5=52717b1f70f15558ef4bdb0e4d4948da
       cni:
diff --git a/metadata/service/control/cluster-admin.yml b/metadata/service/control/roles/cluster-admin.yml
similarity index 90%
rename from metadata/service/control/cluster-admin.yml
rename to metadata/service/control/roles/cluster-admin.yml
index 90abb23..f35b6a5 100644
--- a/metadata/service/control/cluster-admin.yml
+++ b/metadata/service/control/roles/cluster-admin.yml
@@ -1,5 +1,3 @@
-applications:
-- kubernetes
 parameters:
   kubernetes:
     control:
diff --git a/metadata/service/control/roles/fluentd-view.yml b/metadata/service/control/roles/fluentd-view.yml
new file mode 100644
index 0000000..18ef3f0
--- /dev/null
+++ b/metadata/service/control/roles/fluentd-view.yml
@@ -0,0 +1,12 @@
+parameters:
+  kubernetes:
+    control:
+      role:
+        view:
+          enabled: true
+          kind: ClusterRole
+          binding:
+            fluentd-cluster-watch:
+              subject:
+                system:nodes:
+                  kind: Group
diff --git a/metadata/service/master/single.yml b/metadata/service/master/single.yml
index 8eb6b63..223b4f0 100644
--- a/metadata/service/master/single.yml
+++ b/metadata/service/master/single.yml
@@ -53,7 +53,7 @@
           tiller_image: gcr.io/kubernetes-helm/tiller:v2.4.2
         calico_policy:
           enabled: False
-          image: calico/kube-policy-controller:v0.5.4
+          image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
           namespace: kube-system
         contrail_network_controller:
           enabled: False
@@ -62,7 +62,7 @@
         virtlet:
           enabled: False
           namespace: kube-system
-          image: mirantis/virtlet:v1.0.0
+          image: mirantis/virtlet:v1.0.3
       token:
         admin: ${_param:kubernetes_admin_token}
         kubelet: ${_param:kubernetes_kubelet_token}
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 9e8afa2..91c1ff3 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -40,7 +40,7 @@
         hosts:
         - cmp01
         - cmp02
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
     monitoring:
       backend: prometheus
   master:
diff --git a/tests/pillar/master_contrail.sls b/tests/pillar/master_contrail.sls
index e86a293..32478f7 100644
--- a/tests/pillar/master_contrail.sls
+++ b/tests/pillar/master_contrail.sls
@@ -37,7 +37,7 @@
       virtlet:
         enabled: true
         namespace: kube-system
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
         hosts:
         - cmp01
         - cmp02
diff --git a/tests/pillar/master_contrail4_0.sls b/tests/pillar/master_contrail4_0.sls
index ec48f54..e6c6085 100644
--- a/tests/pillar/master_contrail4_0.sls
+++ b/tests/pillar/master_contrail4_0.sls
@@ -37,7 +37,7 @@
       virtlet:
         enabled: true
         namespace: kube-system
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
         hosts:
         - cmp01
         - cmp02
diff --git a/tests/pillar/pool_cluster.sls b/tests/pillar/pool_cluster.sls
index 4de3614..c75b87b 100644
--- a/tests/pillar/pool_cluster.sls
+++ b/tests/pillar/pool_cluster.sls
@@ -16,7 +16,7 @@
       virtlet:
         enabled: true
         namespace: kube-system
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
         hosts:
         - cmp01
         - cmp02
diff --git a/tests/pillar/pool_cluster_with_domain.sls b/tests/pillar/pool_cluster_with_domain.sls
index 271d762..4fea3dc 100644
--- a/tests/pillar/pool_cluster_with_domain.sls
+++ b/tests/pillar/pool_cluster_with_domain.sls
@@ -16,7 +16,7 @@
       virtlet:
         enabled: true
         namespace: kube-system
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
         hosts:
         - cmp01
         - cmp02
diff --git a/tests/pillar/pool_contrail4_0.sls b/tests/pillar/pool_contrail4_0.sls
index f396906..98c1cf7 100644
--- a/tests/pillar/pool_contrail4_0.sls
+++ b/tests/pillar/pool_contrail4_0.sls
@@ -16,7 +16,7 @@
       virtlet:
         enabled: true
         namespace: kube-system
-        image: mirantis/virtlet:v1.0.0
+        image: mirantis/virtlet:v1.0.3
         hosts:
         - cmp01
         - cmp02