Merge "Update .travis.yml and .kitchen.yml files for parallel testing"
diff --git a/.kitchen.yml b/.kitchen.yml
index c227138..bdcae80 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -65,6 +65,11 @@
       pillars-from-files:
         kubernetes.sls: tests/pillar/pool_cluster.sls
 
+  - name: pool_cluster_with_domain
+    provisioner:
+      pillars-from-files:
+        kubernetes.sls: tests/pillar/pool_cluster_with_domain.sls
+
   - name: master_contrail
     provisioner:
       pillars-from-files:
diff --git a/README.rst b/README.rst
index 393fe1d..0ec06d2 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,3 @@
-
 ==================
 Kubernetes Formula
 ==================
@@ -31,7 +30,7 @@
       kubernetes:
         common:
           hyperkube:
-            image: gcr.io/google_containers/hyperkube:v1.5.2
+            image: gcr.io/google_containers/hyperkube:v1.6.5
         pool:
           network:
             calicoctl:
@@ -45,7 +44,7 @@
 
     parameters:
       kubernetes:
-        master:
+        common:
           addons:
             helm:
               enabled: true
@@ -56,7 +55,7 @@
 
     parameters:
       kubernetes:
-        master:
+        common:
           addons:
             calico_policy:
               enabled: true
@@ -67,15 +66,15 @@
 
     parameters:
       kubernetes:
-        master:
+        common:
           addons:
             virtlet:
               enabled: true
               namespace: kube-system
+              image: mirantis/virtlet:v0.7.0
               hosts:
               - cmp01
               - cmp02
-              image: mirantis/virtlet:latest
 
 Enable netchecker addon
 
@@ -83,13 +82,49 @@
 
     parameters:
       kubernetes:
+        common:
+          addons:
+            netchecker:
+              enabled: true
         master:
           namespace:
             netchecker:
               enabled: true
+
+Enable Kubenetes Federation control plane
+
+.. code-block:: yaml
+
+    parameters:
+      kubernetes:
+        master:
+          federation:
+            enabled: True
+            name: federation
+            namespace: federation-system
+            source: https://dl.k8s.io/v1.6.6/kubernetes-client-linux-amd64.tar.gz
+            hash: 94b2c9cd29981a8e150c187193bab0d8c0b6e906260f837367feff99860a6376
+            service_type: NodePort
+            dns_provider: coredns
+            childclusters:
+              - secondcluster.mydomain
+              - thirdcluster.mydomain
+
+Enable external DNS addon with CoreDNS provider
+
+.. code-block:: yaml
+
+    parameters:
+      kubernetes:
+        common:
           addons:
-            netchecker:
-              enabled: true
+            externaldns:
+              coredns:
+                enabled: True
+              externaldns:
+                enabled: True
+                domain: company.mydomain
+                provider: coredns
 
 Configure service verbosity
 
@@ -102,7 +137,7 @@
         pool:
           verbosity: 2
 
-Set cluster domain
+Set cluster name and domain
 
 .. code-block:: yaml
 
@@ -110,13 +145,14 @@
       kubernetes:
         common:
           kubernetes_cluster_domain: mycluster.domain
+          cluster_name : mycluster
 
 Enable autoscaler for dns addon. Poll period can be skipped.
 
 .. code-block:: yaml
 
     kubernetes:
-        master:
+        common:
           addons:
             dns:
               domain: cluster.local
@@ -181,13 +217,15 @@
 .. code-block:: yaml
 
     kubernetes:
-        master:
+        common:
+          cluster_name: cluster
           addons:
             dns:
               domain: cluster.local
               enabled: true
               replicas: 1
               server: 10.254.0.10
+        master:
           admin:
             password: password
             username: admin
@@ -275,12 +313,13 @@
 .. code-block:: yaml
 
     kubernetes:
-      master:
+      common:
         addons:
           contrail_network_controller:
             enabled: true
             namespace: kube-system
             image: yashulyak/contrail-controller:latest
+      master:
         network:
           engine: opencontrail
           default_domain: default-domain
@@ -310,7 +349,7 @@
 .. code-block:: yaml
 
     kubernetes:
-      master:
+      common:
         addons:
           public_ip: 1.1.1.1
 
diff --git a/kubernetes/_common.sls b/kubernetes/_common.sls
index 5171517..39a17dc 100644
--- a/kubernetes/_common.sls
+++ b/kubernetes/_common.sls
@@ -55,6 +55,7 @@
     - onlyif: /bin/false
     {%- endif %}
 
+{%- if common.addons.get('virtlet', {}).get('enabled') %}
 /tmp/criproxy:
   file.directory:
     - user: root
@@ -62,7 +63,7 @@
 
 copy-criproxy-bin:
   cmd.run:
-    - name: docker run --rm -v /tmp/criproxy/:/tmp/criproxy/ --entrypoint cp mirantis/virtlet -vr /criproxy /tmp/criproxy
+    - name: docker run --rm -v /tmp/criproxy/:/tmp/criproxy/ --entrypoint cp {{ common.addons.virtlet.image }} -vr /criproxy /tmp/criproxy
     - require:
       - file: /tmp/criproxy
     {%- if grains.get('noservices') %}
@@ -117,6 +118,8 @@
   - onlyif: /bin/false
   {%- endif %}
 
+{%- endif %}
+
 /etc/systemd/system/kubelet.service:
   file.managed:
     - source: salt://kubernetes/files/systemd/kubelet.service
diff --git a/kubernetes/files/calico/calico-node.service.master b/kubernetes/files/calico/calico-node.service.master
index 924364d..995f517 100644
--- a/kubernetes/files/calico/calico-node.service.master
+++ b/kubernetes/files/calico/calico-node.service.master
@@ -8,7 +8,7 @@
 ExecStartPre=-/usr/bin/docker rm -f calico-node
 ExecStart=/usr/bin/docker run --net=host --privileged \
  --name=calico-node \
- -e HOSTNAME={{ master.host.name }} \
+ -e NODENAME={{ master.host.name }}{% if master.host.get('domain') %}.{{ master.host.domain }}{%- endif %} \
  -e IP={{ master.apiserver.address }} \
  -e IP6={{ master.get('ipv6_address', '') }} \
 {%- if master.network.calico_network_backend is defined %}
diff --git a/kubernetes/files/calico/calico-node.service.pool b/kubernetes/files/calico/calico-node.service.pool
index dbc60e1..305518f 100644
--- a/kubernetes/files/calico/calico-node.service.pool
+++ b/kubernetes/files/calico/calico-node.service.pool
@@ -8,7 +8,7 @@
 ExecStartPre=-/usr/bin/docker rm -f calico-node
 ExecStart=/usr/bin/docker run --net=host --privileged \
  --name=calico-node \
- -e HOSTNAME={{ pool.host.name }} \
+ -e NODENAME={{ pool.host.name }}{% if pool.host.get('domain') %}.{{ pool.host.domain }}{%- endif %} \
  -e IP={{ pool.address }} \
  -e IP6={{ pool.get('ipv6_address', '') }} \
 {%- if pool.network.calico_network_backend is defined %}
diff --git a/kubernetes/files/calico/calico.conf b/kubernetes/files/calico/calico.conf
index 8b48f73..7952945 100644
--- a/kubernetes/files/calico/calico.conf
+++ b/kubernetes/files/calico/calico.conf
@@ -1,6 +1,7 @@
-{%- from "kubernetes/map.jinja" import pool with context %}
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import pool with context -%}
 {
+    "nodeName": "{{ pool.host.name }}{% if pool.host.get('domain') %}.{{ pool.host.domain }}{%- endif %}",
     "name": "calico-k8s-network",
     "type": "calico",
     "etcd_endpoints": "{% for member in pool.network.etcd.members %}http{% if pool.network.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}",
@@ -9,7 +10,7 @@
     "etcd_cert_file": "/var/lib/etcd/etcd-client.pem",
     "etcd_ca_cert_file": "/var/lib/etcd/ca.pem",
 {%- endif %}
-{%- if  master.get('addons', {}).get('calico_policy', {}).get('enabled', False) %}
+{%- if  common.get('addons', {}).get('calico_policy', {}).get('enabled', False) %}
     "policy": {
       "type": "k8s"
     },
diff --git a/kubernetes/files/calico/network-environment.pool b/kubernetes/files/calico/network-environment.pool
index 7746947..5607e65 100644
--- a/kubernetes/files/calico/network-environment.pool
+++ b/kubernetes/files/calico/network-environment.pool
@@ -4,7 +4,7 @@
 DEFAULT_IPV4={{ pool.address }}
 
 # The Kubernetes master IP
-KUBERNETES_MASTER={{ pool.apiserver.host }}
+KUBERNETES_MASTER={{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
 
 # IP and port of etcd instance used by Calico
-ETCD_ENDPOINTS={% for member in pool.network.etcd.members %}http://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
\ No newline at end of file
+ETCD_ENDPOINTS={% for member in pool.network.etcd.members %}http://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
diff --git a/kubernetes/files/federation/coredns.conf b/kubernetes/files/federation/coredns.conf
new file mode 100644
index 0000000..1999965
--- /dev/null
+++ b/kubernetes/files/federation/coredns.conf
@@ -0,0 +1,6 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
+[Global]
+etcd-endpoints = http://coredns-etcd.{{ common.addons.coredns.namespace }}:2379
+zones = {{ master.federation.name }}
+
diff --git a/kubernetes/files/grafana_dashboards/kubernetes_cluster_prometheus.json b/kubernetes/files/grafana_dashboards/kubernetes_cluster_prometheus.json
index ee49457..b34e210 100644
--- a/kubernetes/files/grafana_dashboards/kubernetes_cluster_prometheus.json
+++ b/kubernetes/files/grafana_dashboards/kubernetes_cluster_prometheus.json
@@ -1799,7 +1799,7 @@
         "allValue": ".*",
         "current": {},
         "hide": 0,
-        "includeAll": true,
+        "includeAll": false,
         "label": null,
         "multi": false,
         "name": "Node",
@@ -1808,7 +1808,7 @@
         "refresh": 1,
         "refresh_on_load": true,
         "regex": "",
-        "sort": 0,
+        "sort": 1,
         "tagValuesQuery": "",
         "tags": [],
         "tagsQuery": "",
diff --git a/kubernetes/files/known_tokens.csv b/kubernetes/files/known_tokens.csv
index c17d03e..caea56a 100644
--- a/kubernetes/files/known_tokens.csv
+++ b/kubernetes/files/known_tokens.csv
@@ -1,13 +1,14 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import master with context -%}
+{%- from "kubernetes/map.jinja" import common with context -%}
 {{ master.token.admin }},admin,admin
 {{ master.token.kubelet }},kubelet,kubelet
 {{ master.token.kube_proxy }},kube_proxy,kube_proxy
 {{ master.token.scheduler }},system:scheduler,system:scheduler
 {{ master.token.controller_manager }},system:controller_manager,system:controller_manager
-{%- if master.addons.logging is defined %}
+{%- if common.addons.logging is defined %}
 {{ master.token.logging }},system:logging,system:logging
 {%- endif %}
-{%- if master.addons.monitoring is defined %}
+{%- if common.addons.monitoring is defined %}
 {{ master.token.monitoring }},system:monitoring,system:monitoring
 {%- endif %}
-{{ master.token.dns }},system:dns,system:dns
\ No newline at end of file
+{{ master.token.dns }},system:dns,system:dns
diff --git a/kubernetes/files/kube-addon-manager/kube-addons.config b/kubernetes/files/kube-addon-manager/kube-addons.config
new file mode 100644
index 0000000..c98d034
--- /dev/null
+++ b/kubernetes/files/kube-addon-manager/kube-addons.config
@@ -0,0 +1,3 @@
+KUBECTL_BIN=/usr/bin/kubectl
+ADDON_PATH=/etc/kubernetes/addons
+TEST_ADDON_CHECK_INTERVAL_SEC=30
diff --git a/kubernetes/files/kube-addon-manager/kube-addons.sh b/kubernetes/files/kube-addon-manager/kube-addons.sh
new file mode 100644
index 0000000..2ee28c4
--- /dev/null
+++ b/kubernetes/files/kube-addon-manager/kube-addons.sh
@@ -0,0 +1,226 @@
+#!/bin/bash
+
+# Copyright 2014 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# LIMITATIONS
+# 1. Exit code is probably not always correct.
+# 2. There are no unittests.
+# 3. Will not work if the total length of paths to addons is greater than
+#    bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE.
+
+# cosmetic improvements to be done
+# 1. Improve the log function; add timestamp, file name, etc.
+# 2. Logging doesn't work from files that print things out.
+# 3. Kubectl prints the output to stderr (the output should be captured and then
+#    logged)
+
+# The business logic for whether a given object should be created
+# was already enforced by salt, and /etc/kubernetes/addons is the
+# managed result is of that. Start everything below that directory.
+KUBECTL=${KUBECTL_BIN:-/usr/bin/kubectl}
+KUBECTL_OPTS=${KUBECTL_OPTS:-}
+
+ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-60}
+ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons}
+
+SYSTEM_NAMESPACE=kube-system
+
+# Addons could use this label with two modes:
+# - ADDON_MANAGER_LABEL=Reconcile
+# - ADDON_MANAGER_LABEL=EnsureExists
+ADDON_MANAGER_LABEL="addonmanager.kubernetes.io/mode"
+# This label is deprecated (only for Addon Manager). In future release
+# addon-manager may not respect it anymore. Addons with
+# CLUSTER_SERVICE_LABEL=true and without ADDON_MANAGER_LABEL=EnsureExists
+# will be reconciled for now.
+CLUSTER_SERVICE_LABEL="kubernetes.io/cluster-service"
+
+# Remember that you can't log from functions that print some output (because
+# logs are also printed on stdout).
+# $1 level
+# $2 message
+function log() {
+  # manage log levels manually here
+
+  # add the timestamp if you find it useful
+  case $1 in
+    DB3 )
+#        echo "$1: $2"
+        ;;
+    DB2 )
+#        echo "$1: $2"
+        ;;
+    DBG )
+#        echo "$1: $2"
+        ;;
+    INFO )
+        echo "$1: $2"
+        ;;
+    WRN )
+        echo "$1: $2"
+        ;;
+    ERR )
+        echo "$1: $2"
+        ;;
+    * )
+        echo "INVALID_LOG_LEVEL $1: $2"
+        ;;
+  esac
+}
+
+# $1 filename of addon to start.
+# $2 count of tries to start the addon.
+# $3 delay in seconds between two consecutive tries
+function start_addon() {
+  local -r addon_filename=$1;
+  local -r tries=$2;
+  local -r delay=$3;
+
+  create_resource_from_string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}"
+}
+
+# $1 string with json or yaml.
+# $2 count of tries to start the addon.
+# $3 delay in seconds between two consecutive tries
+# $4 name of this object to use when logging about it.
+function create_resource_from_string() {
+  local -r config_string=$1;
+  local tries=$2;
+  local -r delay=$3;
+  local -r config_name=$4;
+  while [ ${tries} -gt 0 ]; do
+    echo "${config_string}" | ${KUBECTL} ${KUBECTL_OPTS} apply -f - && \
+      log INFO "== Successfully started ${config_name} at $(date -Is)" && \
+      return 0;
+    let tries=tries-1;
+    log WRN "== Failed to start ${config_name} at $(date -Is). ${tries} tries remaining. =="
+    sleep ${delay};
+  done
+  return 1;
+}
+
+# $1 resource type.
+function annotate_addons() {
+  local -r obj_type=$1;
+
+  # Annotate to objects already have this annotation should fail.
+  # Only try once for now.
+  ${KUBECTL} ${KUBECTL_OPTS} annotate ${obj_type} -l ${CLUSTER_SERVICE_LABEL}=true \
+    kubectl.kubernetes.io/last-applied-configuration='' --overwrite=false
+
+  if [[ $? -eq 0 ]]; then
+    log INFO "== Annotate resources completed successfully at $(date -Is) =="
+  else
+    log WRN "== Annotate resources completed with errors at $(date -Is) =="
+  fi
+}
+
+# $1 enable --prune or not.
+function reconcile_addons() {
+  local -r enable_prune=$1;
+
+  # TODO: Remove the first command in future release.
+  # Adding this for backward compatibility. Old addons have CLUSTER_SERVICE_LABEL=true and don't have
+  # ADDON_MANAGER_LABEL=EnsureExists will still be reconciled.
+  # Filter out `configured` message to not noisily log.
+  # `created`, `pruned` and errors will be logged.
+  log INFO "== Reconciling with deprecated label =="
+  ${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
+    -l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \
+    --prune=${enable_prune} --recursive | grep -v configured
+
+  log INFO "== Reconciling with addon-manager label =="
+  ${KUBECTL} ${KUBECTL_OPTS} apply -f ${ADDON_PATH} \
+    -l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \
+    --prune=${enable_prune} --recursive | grep -v configured
+
+  log INFO "== Kubernetes addon reconcile completed at $(date -Is) =="
+}
+
+function ensure_addons() {
+  # Create objects already exist should fail.
+  # Filter out `AlreadyExists` message to not noisily log.
+  ${KUBECTL} ${KUBECTL_OPTS} create -f ${ADDON_PATH} \
+    -l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists
+
+  log INFO "== Kubernetes addon ensure completed at $(date -Is) =="
+}
+
+# The business logic for whether a given object should be created
+# was already enforced by salt, and /etc/kubernetes/addons is the
+# managed result is of that. Start everything below that directory.
+log INFO "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} =="
+
+## Wait for the default service account to be created in the kube-system namespace.
+#token_found=""
+#while [ -z "${token_found}" ]; do
+#  sleep .5
+#  token_found=$(${KUBECTL} ${KUBECTL_OPTS} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}")
+#  if [[ $? -ne 0 ]]; then
+#    token_found="";
+#    log WRN "== Error getting default service account, retry in 0.5 second =="
+#  fi
+#done
+#
+#log INFO "== Default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} =="
+#
+# Create admission_control objects if defined before any other addon services. If the limits
+# are defined in a namespace other than default, we should still create the limits for the
+# default namespace.
+for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do
+  start_addon "${obj}" 100 10 default &
+  log INFO "++ obj ${obj} is created ++"
+done
+
+# TODO: The annotate and spin up parts should be removed after 1.6 is released.
+
+# Fake the "kubectl.kubernetes.io/last-applied-configuration" annotation on old resources
+# in order to clean them up by `kubectl apply --prune`.
+# RCs have to be annotated for 1.4->1.5+ upgrade, because we migrated from RCs to deployments for all default addons in 1.5.
+# Other types resources will also need this fake annotation if their names are changed,
+# otherwise they would be leaked during upgrade.
+log INFO "== Annotating the old addon resources at $(date -Is) =="
+annotate_addons ReplicationController
+annotate_addons Deployment
+
+# Create new addon resources by apply (with --prune=false).
+# The old RCs will not fight for pods created by new Deployments with the same label because the `controllerRef` feature.
+# The new Deployments will not fight for pods created by old RCs with the same label because the additional `pod-template-hash` label.
+# Apply will fail if some fields are modified but not are allowed, in that case should bump up addon version and name (e.g. handle externally).
+log INFO "== Executing apply to spin up new addon resources at $(date -Is) =="
+ensure_addons
+reconcile_addons false
+
+# Wait for new addons to be spinned up before delete old resources
+log INFO "== Wait for addons to be spinned up at $(date -Is) =="
+sleep ${ADDON_CHECK_INTERVAL_SEC}
+
+# Start the apply loop.
+# Check if the configuration has changed recently - in case the user
+# created/updated/deleted the files on the master.
+log INFO "== Entering periodical apply loop at $(date -Is) =="
+while true; do
+  start_sec=$(date +"%s")
+  # Only print stderr for the readability of logging
+  ensure_addons
+  reconcile_addons true
+  end_sec=$(date +"%s")
+  len_sec=$((${end_sec}-${start_sec}))
+  # subtract the time passed from the sleep time
+  if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then
+    sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec}))
+    sleep ${sleep_time}
+  fi
+done
diff --git a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
index 670506c..3234421 100644
--- a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
+++ b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
@@ -1,24 +1,24 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 apiVersion: extensions/v1beta1
 kind: ReplicaSet
 metadata:
   name: calico-policy-controller
-  namespace: {{ master.addons.calico_policy.namespace }}
+  namespace: {{ common.addons.calico_policy.namespace }}
   labels:
     k8s-app: calico-policy
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   replicas: 1
   selector:
     matchLabels:
-      kubernetes.io/cluster-service: "true"
       k8s-app: calico-policy
   template:
     metadata:
       name: calico-policy-controller
-      namespace: {{ master.addons.calico_policy.namespace }}
+      namespace: {{ common.addons.calico_policy.namespace }}
       labels:
-        kubernetes.io/cluster-service: "true"
         k8s-app: calico-policy
     spec:
       hostNetwork: true
@@ -27,7 +27,7 @@
           effect: NoSchedule
       containers:
         - name: calico-policy-controller
-          image: {{ master.addons.calico_policy.image }}
+          image: {{ common.addons.calico_policy.image }}
           imagePullPolicy: IfNotPresent
           resources:
             limits:
diff --git a/kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-configmap.yml b/kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-configmap.yml
similarity index 86%
rename from kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-configmap.yml
rename to kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-configmap.yml
index fbf12de..4252e5a 100644
--- a/kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-configmap.yml
+++ b/kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-configmap.yml
@@ -1,9 +1,12 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
 {%- from "kubernetes/map.jinja" import master with context -%}
 apiVersion: v1
 kind: ConfigMap
 metadata:
   name: contrail-kube-manager
-  namespace: {{ master.addons.contrail_network_controller.get('namespace', 'kube-system') }}
+  namespace: {{ common.addons.contrail_network_controller.get('namespace', 'kube-system') }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 data:
   contrail.conf: |
     [DEFAULT]
diff --git a/kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-deploy.yml b/kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-deploy.yml
similarity index 66%
rename from kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-deploy.yml
rename to kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-deploy.yml
index ebe981f..18ddc78 100644
--- a/kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-deploy.yml
+++ b/kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-deploy.yml
@@ -1,14 +1,18 @@
-{%- from "kubernetes/map.jinja" import master with context -%}
-apiVersion: apps/v1beta1
+{%- from "kubernetes/map.jinja" import common with context -%}
+apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: contrail-network-controller
-  namespace: {{ master.addons.contrail_network_controller.get('namespace', 'kube-system') }}
+  namespace: {{ common.addons.contrail_network_controller.get('namespace', 'kube-system') }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
+  replicas: 1
   template:
     metadata:
       labels:
         app: contrail-network-controller
+        addonmanager.kubernetes.io/mode: Reconcile
     spec:
       hostNetwork: true
       tolerations:
@@ -18,7 +22,7 @@
         node-role.kubernetes.io: master
       containers:
         - name: contrail-kube-manager
-          image: {{ master.addons.contrail_network_controller.get('image', 'docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/contrail-integration/contrail-network-controller:v1.0.0') }}
+          image: {{ common.addons.contrail_network_controller.get('image', 'yashulyak/contrail-controller:latest') }}
           imagePullPolicy: Always
           args: ["--config-file", "/etc/kube-manager/contrail.conf", "--alsologtostderr"]
           volumeMounts:
diff --git a/kubernetes/files/kube-addons/coredns/coredns-cm.yml b/kubernetes/files/kube-addons/coredns/coredns-cm.yml
new file mode 100644
index 0000000..0cd3a48
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/coredns-cm.yml
@@ -0,0 +1,33 @@
+{%- from "kubernetes/map.jinja" import common with context %}
+{%- from "kubernetes/map.jinja" import master with context %}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: coredns
+  namespace: {{ common.addons.coredns.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+data:
+  Corefile: |
+    .:53 {
+{% if master.federation.enabled %}
+        etcd {{ master.federation.name }} {
+          stubzones
+          path /skydns
+          endpoint http://coredns-etcd:2379
+        }
+{% endif %}
+{% if common.addons.externaldns.enabled %}
+        etcd {{ common.addons.externaldns.domain }} {
+          stubzones
+          path /skydns
+          endpoint http://coredns-etcd:2379
+        }
+{% endif %}
+        errors
+        log stdout
+        health
+        proxy . /etc/resolv.conf
+        cache 30
+    }
diff --git a/kubernetes/files/kube-addons/coredns/coredns-deploy.yml b/kubernetes/files/kube-addons/coredns/coredns-deploy.yml
new file mode 100644
index 0000000..f225af5
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/coredns-deploy.yml
@@ -0,0 +1,60 @@
+{%- from "kubernetes/map.jinja" import common with context %}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: coredns
+  namespace: {{ common.addons.coredns.namespace }}
+  labels:
+    k8s-app: coredns
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "CoreDNS"
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      k8s-app: coredns
+  template:
+    metadata:
+      labels:
+        k8s-app: coredns
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
+    spec:
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      containers:
+      - name: coredns
+        image: {{ common.addons.coredns.image }}
+        imagePullPolicy: Always
+        args: [ "-conf", "/etc/coredns/Corefile" ]
+        volumeMounts:
+        - name: config-volume
+          mountPath: /etc/coredns
+        ports:
+        - containerPort: 53
+          name: dns
+          protocol: UDP
+        - containerPort: 53
+          name: dns-tcp
+          protocol: TCP
+        livenessProbe:
+          httpGet:
+            path: /health
+            port: 8080
+            scheme: HTTP
+          initialDelaySeconds: 60
+          timeoutSeconds: 5
+          successThreshold: 1
+          failureThreshold: 5
+      dnsPolicy: ClusterFirst
+      volumes:
+        - name: config-volume
+          configMap:
+            name: coredns
+            items:
+            - key: Corefile
+              path: Corefile
diff --git a/kubernetes/files/kube-addons/coredns/coredns-svc.yml b/kubernetes/files/kube-addons/coredns/coredns-svc.yml
new file mode 100644
index 0000000..be49e94
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/coredns-svc.yml
@@ -0,0 +1,22 @@
+{%- from "kubernetes/map.jinja" import common with context %}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: coredns
+  namespace: {{ common.addons.coredns.namespace }}
+  labels:
+    k8s-app: coredns
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "coredns"
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  selector:
+    k8s-app: coredns
+  ports:
+  - name: dns
+    port: 53
+    protocol: UDP
+  - name: dns-tcp
+    port: 53
+    protocol: TCP
diff --git a/kubernetes/files/kube-addons/coredns/etcd-deploy.yml b/kubernetes/files/kube-addons/coredns/etcd-deploy.yml
new file mode 100644
index 0000000..937ae69
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/etcd-deploy.yml
@@ -0,0 +1,48 @@
+{%- from "kubernetes/map.jinja" import common with context %}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  namespace: {{ common.addons.coredns.namespace }}
+  labels:
+    app: coredns-etcd
+    addonmanager.kubernetes.io/mode: Reconcile
+  name: coredns-etcd
+spec:
+  strategy:
+    type: Recreate
+  replicas: 1
+  selector:
+    matchLabels:
+      name: coredns-etcd
+  template:
+    metadata:
+      labels:
+        name: coredns-etcd
+    spec:
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      containers:
+      - command:
+        - /usr/local/bin/etcd
+        - --name
+        - coredns-etcd
+        - --listen-peer-urls
+        - http://0.0.0.0:2380
+        - --listen-client-urls
+        - http://0.0.0.0:2379
+        - --advertise-client-urls
+        - http://coredns-etcd:2379
+        - --initial-cluster-state
+        - new
+        image: {{ common.addons.coredns.etcd_image }}
+        name: coredns-etcd
+        ports:
+        - containerPort: 2379
+          name: client
+          protocol: TCP
+        - containerPort: 2380
+          name: server
+          protocol: TCP
+      restartPolicy: Always
diff --git a/kubernetes/files/kube-addons/coredns/etcd-svc.yml b/kubernetes/files/kube-addons/coredns/etcd-svc.yml
new file mode 100644
index 0000000..4272275
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/etcd-svc.yml
@@ -0,0 +1,24 @@
+{%- from "kubernetes/map.jinja" import common with context %}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: coredns-etcd
+  namespace: {{ common.addons.coredns.namespace }}
+  labels:
+    k8s-app: coredns-etcd
+    kubernetes.io/cluster-service: "true"
+    kubernetes.io/name: "coredns-etcd"
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  selector:
+    k8s-app: coredns-etcd
+  ports:
+  - name: client
+    port: 2379
+    protocol: TCP
+    targetPort: 2379
+  - name: server
+    port: 2380
+    protocol: TCP
+    targetPort: 2380
diff --git a/kubernetes/files/kube-addons/dashboard/dashboard-address.yaml b/kubernetes/files/kube-addons/dashboard/dashboard-address.yaml
index 92c63a7..763b98f 100644
--- a/kubernetes/files/kube-addons/dashboard/dashboard-address.yaml
+++ b/kubernetes/files/kube-addons/dashboard/dashboard-address.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Service
 metadata:
@@ -7,10 +7,11 @@
   labels:
     k8s-app: kubernetes-dashboard
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   selector:
     k8s-app: kubernetes-dashboard
-  deprecatedPublicIPs: ["{{ master.addons.dashboard.public_ip }}"]
+  deprecatedPublicIPs: ["{{ common.addons.dashboard.public_ip }}"]
   type: LoadBalancer
   ports:
   - port: 80
diff --git a/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml b/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
index 1135f29..46517de 100644
--- a/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
+++ b/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
@@ -6,6 +6,7 @@
   labels:
     k8s-app: kubernetes-dashboard
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   selector:
     matchLabels:
diff --git a/kubernetes/files/kube-addons/dashboard/dashboard-endpoint.yaml b/kubernetes/files/kube-addons/dashboard/dashboard-endpoint.yaml
index c35fad0..6e971b2 100644
--- a/kubernetes/files/kube-addons/dashboard/dashboard-endpoint.yaml
+++ b/kubernetes/files/kube-addons/dashboard/dashboard-endpoint.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Endpoints
 metadata:
@@ -7,9 +7,10 @@
   labels:
     k8s-app: kubernetes-dashboard
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 subsets:
   - addresses:
-    - ip: {{ master.addons.dashboard.public_ip }}
+    - ip: {{ common.addons.dashboard.public_ip }}
 
     ports:
     - port: 9090
diff --git a/kubernetes/files/kube-addons/dashboard/dashboard-service.yaml b/kubernetes/files/kube-addons/dashboard/dashboard-service.yaml
index 2c2ce3f..26deb7b 100644
--- a/kubernetes/files/kube-addons/dashboard/dashboard-service.yaml
+++ b/kubernetes/files/kube-addons/dashboard/dashboard-service.yaml
@@ -7,6 +7,7 @@
   labels:
     k8s-app: kubernetes-dashboard
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
 {%- if master.network.engine != 'opencontrail' %}
   selector:
diff --git a/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml b/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
index 2260a7b..917f162 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
@@ -6,6 +6,7 @@
   namespace: kube-system
   labels:
     k8s-app: kube-dns-autoscaler
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   template:
     metadata:
@@ -32,8 +33,8 @@
           # If using small nodes, "nodesPerReplica" should dominate.
           - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":10,"min":2}}
           - --logtostderr=true
-          {%- if master.addons.dns.autoscaler.get('poll-period-seconds')  %}
-          - --poll-period-seconds={{ master.addons.dns.autoscaler.get('poll-period-seconds') }}
+          {%- if common.addons.dns.autoscaler.get('poll-period-seconds')  %}
+          - --poll-period-seconds={{ common.addons.dns.autoscaler.get('poll-period-seconds') }}
           {%- endif %}
           - --v=2
 
diff --git a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
index 514bc26..c34a1a1 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 # Copyright 2016 The Kubernetes Authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,14 +21,15 @@
   labels:
     k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   # replicas: not specified here:
   # 1. In order to make Addon Manager do not reconcile this replicas parameter.
   # 2. Default is 1.
   # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
   # replicas set only when required other than 1
-{%- if master.addons.dns.get('replicas', 1) != 1 %}
-  replicas: {{ master.addons.dns.replicas }}
+{%- if common.addons.dns.get('replicas', 1) != 1 %}
+  replicas: {{ common.addons.dns.replicas }}
 {%- endif %}
   strategy:
     rollingUpdate:
@@ -80,7 +81,7 @@
           initialDelaySeconds: 3
           timeoutSeconds: 5
         args:
-        - --domain={{ master.addons.dns.domain }}.
+        - --domain={{ common.addons.dns.domain|replace('_', '-') }}.
         - --dns-port=10053
         - --config-map=kube-dns
         - -v=2
@@ -109,8 +110,8 @@
           successThreshold: 1
           failureThreshold: 5
         args:
-{%- if master.addons.dns.get('dnsmasq', {}) %}
-{%- for option_name, option_value in master.addons.dns.dnsmasq.iteritems() %}
+{%- if common.addons.dns.get('dnsmasq', {}) %}
+{%- for option_name, option_value in common.addons.dns.dnsmasq.iteritems() %}
         - --{{ option_name }}{% if option_value %}={{ option_value }}{% endif %}
 {%- endfor %}
 {%- endif %}
@@ -140,8 +141,8 @@
         args:
         - --v=2
         - --logtostderr
-        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ master.addons.dns.domain }},5,A
-        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ master.addons.dns.domain }},5,A
+        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ common.addons.dns.domain|replace('_', '-') }},5,A
+        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ common.addons.dns.domain|replace('_', '-') }},5,A
         ports:
         - containerPort: 10054
           name: metrics
diff --git a/kubernetes/files/kube-addons/dns/kubedns-svc.yaml b/kubernetes/files/kube-addons/dns/kubedns-svc.yaml
index 6585954..ab0c65b 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-svc.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-svc.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 # Copyright 2016 The Kubernetes Authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,10 +22,11 @@
     k8s-app: kube-dns
     kubernetes.io/cluster-service: "true"
     kubernetes.io/name: "KubeDNS"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   selector:
     k8s-app: kube-dns
-  clusterIP: {{ master.addons.dns.server }}
+  clusterIP: {{ common.addons.dns.server }}
   ports:
   - name: dns
     port: 53
diff --git a/kubernetes/files/kube-addons/externaldns/externaldns-deploy.yml b/kubernetes/files/kube-addons/externaldns/externaldns-deploy.yml
new file mode 100644
index 0000000..39723b1
--- /dev/null
+++ b/kubernetes/files/kube-addons/externaldns/externaldns-deploy.yml
@@ -0,0 +1,36 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  namespace: {{ common.addons.externaldns.namespace }}
+  name: external-dns
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+spec:
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        app: external-dns
+    spec:
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      containers:
+      - name: external-dns
+        image: {{ common.addons.externaldns.image }}
+        args:
+        - --source=service
+        - --source=ingress
+        - --domain-filter={{ common.addons.externaldns.domain }} # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
+        - --provider={{ common.addons.externaldns.provider }}
+        #- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
+        - --registry=txt
+        - --txt-owner-id=my-identifier
+        {% if common.addons.externaldns.provider == "coredns" %}
+        env:
+        - name: ETCD_URLS
+          value: "https://coredns-etcd:2379"
+        {% endif %}
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/heapster-address.yaml b/kubernetes/files/kube-addons/heapster-influxdb/heapster-address.yaml
index 28f2c24..5fa86f3 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/heapster-address.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/heapster-address.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Service
 metadata:
@@ -6,6 +6,7 @@
     k8s-app: heapster
     kubernetes.io/cluster-service: 'true'
     kubernetes.io/name: 'Heapster'
+    addonmanager.kubernetes.io/mode: Reconcile
   name: heapster-address
   namespace: kube-system
 spec:
@@ -14,5 +15,5 @@
     targetPort: 8082
   selector:
     k8s-app: heapster
-  deprecatedPublicIPs: ['{{ master.addons.heapster_influxdb.public_ip }}']
-  type: LoadBalancer
\ No newline at end of file
+  deprecatedPublicIPs: ['{{ common.addons.heapster_influxdb.public_ip }}']
+  type: LoadBalancer
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/heapster-controller.yaml b/kubernetes/files/kube-addons/heapster-influxdb/heapster-controller.yaml
index 8b3f251..6f26727 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/heapster-controller.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/heapster-controller.yaml
@@ -4,6 +4,7 @@
   labels:
     k8s-app: heapster
     version: v6
+    addonmanager.kubernetes.io/mode: Reconcile
   name: heapster
   namespace: kube-system
 spec:
@@ -27,4 +28,4 @@
         command:
         - /heapster
         - --source=kubernetes:https://kubernetes.default
-        - --sink=influxdb:http://monitoring-influxdb:8086
\ No newline at end of file
+        - --sink=influxdb:http://monitoring-influxdb:8086
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/heapster-endpoint.yaml b/kubernetes/files/kube-addons/heapster-influxdb/heapster-endpoint.yaml
index 35a140c..8d915b5 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/heapster-endpoint.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/heapster-endpoint.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Endpoints
 metadata:
@@ -8,10 +8,11 @@
     k8s-app: heapster
     kubernetes.io/cluster-service: "true"
     kubernetes.io/name: "Heapster"
+    addonmanager.kubernetes.io/mode: Reconcile
 subsets:
   - addresses:
-    - ip: {{ master.addons.heapster_influxdb.public_ip }}
+    - ip: {{ common.addons.heapster_influxdb.public_ip }}
 
     ports:
     - port: 8082
-      protocol: TCP
\ No newline at end of file
+      protocol: TCP
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/heapster-service.yaml b/kubernetes/files/kube-addons/heapster-influxdb/heapster-service.yaml
index f04cf83..d487f2e 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/heapster-service.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/heapster-service.yaml
@@ -5,9 +5,10 @@
     k8s-app: heapster
     kubernetes.io/cluster-service: 'true'
     kubernetes.io/name: 'Heapster'
+    addonmanager.kubernetes.io/mode: Reconcile
   name: heapster
   namespace: kube-system
 spec:
   ports:
   - port: 80
-    targetPort: 8082
\ No newline at end of file
+    targetPort: 8082
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/influxdb-controller.yaml b/kubernetes/files/kube-addons/heapster-influxdb/influxdb-controller.yaml
index 6235c18..695f592 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/influxdb-controller.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/influxdb-controller.yaml
@@ -3,6 +3,7 @@
 metadata:
   labels:
     name: influxGrafana
+    addonmanager.kubernetes.io/mode: Reconcile
   name: influxdb-grafana
   namespace: kube-system
 spec:
@@ -22,4 +23,4 @@
           name: influxdb-storage
       volumes:
       - name: influxdb-storage
-        emptyDir: {}
\ No newline at end of file
+        emptyDir: {}
diff --git a/kubernetes/files/kube-addons/heapster-influxdb/influxdb-service.yaml b/kubernetes/files/kube-addons/heapster-influxdb/influxdb-service.yaml
index 64bed1e..f4565eb 100644
--- a/kubernetes/files/kube-addons/heapster-influxdb/influxdb-service.yaml
+++ b/kubernetes/files/kube-addons/heapster-influxdb/influxdb-service.yaml
@@ -3,6 +3,7 @@
 metadata:
   labels:
     name: monitoring-influxdb
+    addonmanager.kubernetes.io/mode: Reconcile
   name: monitoring-influxdb
   namespace: kube-system
 spec:
@@ -14,4 +15,4 @@
     port: 8086
     targetPort: 8086
   selector:
-    name: influxGrafana
\ No newline at end of file
+    name: influxGrafana
diff --git a/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml b/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
index b1828ce..4c87aa8 100644
--- a/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
+++ b/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
@@ -6,6 +6,7 @@
   labels:
     app: helm
     name: tiller
+    addonmanager.kubernetes.io/mode: Reconcile
   name: tiller-deploy
   namespace: kube-system
 spec:
@@ -24,7 +25,7 @@
         - key: node-role.kubernetes.io/master
           effect: NoSchedule
       containers:
-      - image: {{ master.addons.helm.tiller_image }}
+      - image: {{ common.addons.helm.tiller_image }}
         imagePullPolicy: IfNotPresent
         resources:
           requests:
diff --git a/kubernetes/files/kube-addons/netchecker/netchecker-agent.yml b/kubernetes/files/kube-addons/netchecker/netchecker-agent.yml
index 855884a..5ee722b 100644
--- a/kubernetes/files/kube-addons/netchecker/netchecker-agent.yml
+++ b/kubernetes/files/kube-addons/netchecker/netchecker-agent.yml
@@ -1,11 +1,12 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
+  name: netchecker-agent
+  namespace: {{ common.addons.netchecker.namespace }}
   labels:
     app: netchecker-agent
-  name: netchecker-agent
-  namespace: {{ master.addons.netchecker.namespace }}
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   template:
     metadata:
@@ -18,7 +19,7 @@
           effect: NoSchedule
       containers:
         - name: netchecker-agent
-          image: {{ master.addons.netchecker.agent_image }}
+          image: {{ common.addons.netchecker.agent_image }}
           env:
             - name: MY_NODE_NAME
               valueFrom:
@@ -31,8 +32,8 @@
           args:
             - "-v=5"
             - "-alsologtostderr=true"
-            - "-serverendpoint=netchecker:{{ master.addons.netchecker.port }}"
-            - "-reportinterval={{ master.addons.netchecker.interval }}"
-{%- if master.addons.netchecker.get('agent_probeurls') %}
-            - "-probeurls={{ master.addons.netchecker.agent_probeurls }}"
+            - "-serverendpoint=netchecker:{{ common.addons.netchecker.port }}"
+            - "-reportinterval={{ common.addons.netchecker.interval }}"
+{%- if common.addons.netchecker.get('agent_probeurls') %}
+            - "-probeurls={{ common.addons.netchecker.agent_probeurls }}"
 {%- endif %}
diff --git a/kubernetes/files/kube-addons/netchecker/netchecker-server.yml b/kubernetes/files/kube-addons/netchecker/netchecker-server.yml
index dc5195c..6cfac36 100644
--- a/kubernetes/files/kube-addons/netchecker/netchecker-server.yml
+++ b/kubernetes/files/kube-addons/netchecker/netchecker-server.yml
@@ -1,29 +1,48 @@
 {%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
   name: netchecker-server
-  namespace: {{ master.addons.netchecker.namespace }}
+  namespace: {{ common.addons.netchecker.namespace }}
+  labels:
+    app: netchecker-agent
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   replicas: 1
   template:
     metadata:
       labels:
         app: netchecker-server
+        addonmanager.kubernetes.io/mode: Reconcile
       annotations:
         prometheus.io/scrape: "true"
-        prometheus.io/port: "{{ master.addons.netchecker.port }}"
+        prometheus.io/port: "{{ common.addons.netchecker.port }}"
     spec:
       tolerations:
         - key: node-role.kubernetes.io/master
           effect: NoSchedule
       containers:
         - name: netchecker-server
-          image: {{ master.addons.netchecker.server_image }}
+          image: {{ common.addons.netchecker.server_image }}
           ports:
-            - containerPort: {{ master.addons.netchecker.port }}
+            - containerPort: {{ common.addons.netchecker.port }}
           args:
             - "-v=5"
             - "-logtostderr"
-            - "-kubeproxyinit"
-            - "-endpoint=0.0.0.0:{{ master.addons.netchecker.port }}"
+            - "-kubeproxyinit=false"
+            - "-endpoint=0.0.0.0:{{ common.addons.netchecker.port }}"
+            - "-etcd-endpoints={% for member in master.etcd.members %}https://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %}"
+{%- if master.etcd.get('ssl', {}).get('enabled') %}
+            - "-etcd-key=/var/lib/etcd/etcd-client.key"
+            - "-etcd-cert=/var/lib/etcd/etcd-client.pem"
+            - "-etcd-ca=/var/lib/etcd/ca.pem"
+{%- endif %}
+          volumeMounts:
+          - mountPath: /var/lib/etcd/
+            name: etcd-certs
+            readOnly: true
+      volumes:
+      - hostPath:
+          path: /var/lib/etcd
+        name: etcd-certs
diff --git a/kubernetes/files/kube-addons/netchecker/netchecker-svc.yml b/kubernetes/files/kube-addons/netchecker/netchecker-svc.yml
index 309d61f..e880ed5 100644
--- a/kubernetes/files/kube-addons/netchecker/netchecker-svc.yml
+++ b/kubernetes/files/kube-addons/netchecker/netchecker-svc.yml
@@ -1,14 +1,16 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 kind: Service
 apiVersion: "v1"
 metadata:
   name: netchecker
-  namespace: {{ master.addons.netchecker.namespace }}
+  namespace: {{ common.addons.netchecker.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   selector:
     app: netchecker-server
   ports:
     - protocol: "TCP"
-      port: {{ master.addons.netchecker.port }}
-      targetPort: {{ master.addons.netchecker.port }}
+      port: {{ common.addons.netchecker.port }}
+      targetPort: {{ common.addons.netchecker.port }}
   type: NodePort
diff --git a/kubernetes/files/kube-addons/registry/registry-rc.yaml b/kubernetes/files/kube-addons/registry/registry-rc.yaml
index 055e596..bd2f50f 100644
--- a/kubernetes/files/kube-addons/registry/registry-rc.yaml
+++ b/kubernetes/files/kube-addons/registry/registry-rc.yaml
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: ReplicationController
 metadata:
@@ -8,6 +8,7 @@
     k8s-app: kube-registry
     version: v0
     kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   replicas: 1
   selector:
@@ -18,7 +19,6 @@
       labels:
         k8s-app: kube-registry
         version: v0
-        kubernetes.io/cluster-service: "true"
     spec:
       tolerations:
         - key: node-role.kubernetes.io/master
@@ -35,28 +35,28 @@
             memory: 100Mi
         env:
         - name: REGISTRY_HTTP_ADDR
-          value: {{ master.addons.registry.bind.get('host', '0.0.0.0'}}:{{ master.addons.registry.bind.get('port', '5000'}}
+          value: {{ common.addons.registry.bind.get('host', '0.0.0.0'}}:{{ common.addons.registry.bind.get('port', '5000'}}
         - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
           value: /var/lib/registry
         ports:
-        - containerPort: {{ master.addons.registry.bind.get('port', '5000') }}
+        - containerPort: {{ common.addons.registry.bind.get('port', '5000') }}
           name: registry
           protocol: TCP
-      {%- if master.addons.registry.volume is defined %}
+      {%- if common.addons.registry.volume is defined %}
         volumeMounts:
         - name: image-store
           mountPath: /var/lib/registry
       volumes:
         - name: image-store
-          {%- if master.addons.registry.volume.get('type', 'emptyDir') %}
+          {%- if common.addons.registry.volume.get('type', 'emptyDir') %}
           emptyDir: {}
-          {%- elif master.addons.registry.volume.type == 'hostPath' %}
+          {%- elif common.addons.registry.volume.type == 'hostPath' %}
           hostPath:
-            path: {{ master.addons.registry.volume.path }}
-          {%- elif master.addons.registry.volume.type == 'glusterfs' %}
+            path: {{ common.addons.registry.volume.path }}
+          {%- elif common.addons.registry.volume.type == 'glusterfs' %}
           glusterfs:
-            endpoints: {{ master.addons.registry.volume.endpoints }}
-            path: {{ master.addons.registry.volume.path }}
-            readOnly: {{ master.addons.registry.volume.read_only }}
+            endpoints: {{ common.addons.registry.volume.endpoints }}
+            path: {{ common.addons.registry.volume.path }}
+            readOnly: {{ common.addons.registry.volume.read_only }}
           {%- endif %}
       {%- endif %}
diff --git a/kubernetes/files/kube-addons/registry/registry.svc b/kubernetes/files/kube-addons/registry/registry.svc
index 708a1ba..ad78a96 100644
--- a/kubernetes/files/kube-addons/registry/registry.svc
+++ b/kubernetes/files/kube-addons/registry/registry.svc
@@ -1,4 +1,4 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Service
 metadata:
@@ -8,10 +8,11 @@
     k8s-app: kube-registry
     kubernetes.io/cluster-service: "true"
     kubernetes.io/name: "KubeRegistry"
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   selector:
     k8s-app: kube-registry
   ports:
   - name: registry
-    port: {{ master.addons.registry.bind.get('port', '5000') }}
-    protocol: TCP
\ No newline at end of file
+    port: {{ common.addons.registry.bind.get('port', '5000') }}
+    protocol: TCP
diff --git a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
index 511580c..b1c75a3 100644
--- a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
+++ b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
@@ -1,10 +1,13 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 ---
 apiVersion: extensions/v1beta1
 kind: DaemonSet
 metadata:
   name: virtlet
-  namespace: {{ master.addons.virtlet.namespace }}
+  namespace: {{ common.addons.virtlet.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 spec:
   template:
     metadata:
@@ -46,7 +49,7 @@
       # debugging criproxy
       # At the end it ensures that /var/lib/libvirt/images exists on node.
       - name: prepare-node
-        image: {{ master.addons.virtlet.image }}
+        image: {{ common.addons.virtlet.image }}
         imagePullPolicy: IfNotPresent
         command:
         - /prepare-node.sh
@@ -69,7 +72,7 @@
 
       containers:
       - name: virtlet
-        image: {{ master.addons.virtlet.image }}
+        image: {{ common.addons.virtlet.image }}
         # In case we inject local virtlet image we want to use it not officially available one
         imagePullPolicy: IfNotPresent
         volumeMounts:
@@ -122,7 +125,7 @@
         - name: VIRTLET_VM_LOG_LOCATION
           value: "/var/log/vms"
       - name: virtlet-log
-        image: {{ master.addons.virtlet.image }}
+        image: {{ common.addons.virtlet.image }}
         imagePullPolicy: IfNotPresent
         command:
           - /virtlet_log
@@ -210,6 +213,8 @@
 kind: ClusterRoleBinding
 metadata:
   name: virtlet
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
@@ -217,13 +222,15 @@
 subjects:
 - kind: ServiceAccount
   name: virtlet
-  namespace: {{ master.addons.virtlet.namespace }}
+  namespace: {{ common.addons.virtlet.namespace }}
 ---
 kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1beta1
 metadata:
   name: virtlet
-  namespace: {{ master.addons.virtlet.namespace }}
+  namespace: {{ common.addons.virtlet.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
 rules:
   - apiGroups:
       - ""
@@ -236,4 +243,6 @@
 kind: ServiceAccount
 metadata:
   name: virtlet
-  namespace: {{ master.addons.virtlet.namespace }}
+  namespace: {{ common.addons.virtlet.namespace }}
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig b/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
index 91206bc..9ec6761 100644
--- a/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
+++ b/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
@@ -1,21 +1,22 @@
 {%- from "kubernetes/map.jinja" import pool with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
 
 apiVersion: v1
 kind: Config
-current-context: {{ pool.cluster_domain }}
+current-context: {{ common.cluster_name }}
 preferences: {}
 clusters:
 - cluster:
     certificate-authority: /etc/kubernetes/ssl/ca-kubernetes.crt
-    server: https://{{ pool.apiserver.host }}:443
-  name: {{ pool.cluster_domain }}
+    server: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
+  name: {{ common.cluster_name }}
 contexts:
 - context:
-    cluster: {{ pool.cluster_domain }}
-    user: controller_manager-{{ pool.cluster_domain }}
-  name: {{ pool.cluster_domain }}
+    cluster: {{ common.cluster_name }}
+    user: controller_manager-{{ common.cluster_name }}
+  name: {{ common.cluster_name }}
 users:
-- name: controller_manager-{{ pool.cluster_domain }}
+- name: controller_manager-{{ common.cluster_name }}
   user:
     client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
     client-key: /etc/kubernetes/ssl/kubelet-client.key
diff --git a/kubernetes/files/kube-proxy/proxy.kubeconfig b/kubernetes/files/kube-proxy/proxy.kubeconfig
index 868ecb5..307daf8 100644
--- a/kubernetes/files/kube-proxy/proxy.kubeconfig
+++ b/kubernetes/files/kube-proxy/proxy.kubeconfig
@@ -1,21 +1,22 @@
 {%- from "kubernetes/map.jinja" import pool with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
 
 apiVersion: v1
 kind: Config
-current-context: {{ pool.cluster_domain }}
+current-context: {{ common.cluster_name }}
 preferences: {}
 clusters:
 - cluster:
     certificate-authority: /etc/kubernetes/ssl/ca-kubernetes.crt
-    server: https://{{ pool.apiserver.host }}:443
-  name: {{ pool.cluster_domain }}
+    server: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
+  name: {{ common.cluster_name }}
 contexts:
 - context:
-    cluster: {{ pool.cluster_domain }}
-    user: kube_proxy-{{ pool.cluster_domain }}
-  name: {{ pool.cluster_domain }}
+    cluster: {{ common.cluster_name }}
+    user: kube_proxy-{{ common.cluster_name }}
+  name: {{ common.cluster_name }}
 users:
-- name: kube_proxy-{{ pool.cluster_domain }}
+- name: kube_proxy-{{ common.cluster_name }}
   user:
     client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
     client-key: /etc/kubernetes/ssl/kubelet-client.key
diff --git a/kubernetes/files/kube-scheduler/scheduler.kubeconfig b/kubernetes/files/kube-scheduler/scheduler.kubeconfig
index 8441a2e..8a87e39 100644
--- a/kubernetes/files/kube-scheduler/scheduler.kubeconfig
+++ b/kubernetes/files/kube-scheduler/scheduler.kubeconfig
@@ -1,20 +1,21 @@
 {%- from "kubernetes/map.jinja" import pool with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
 apiVersion: v1
 kind: Config
-current-context: {{ pool.cluster_domain }}
+current-context: {{ common.cluster_name }}
 preferences: {}
 clusters:
 - cluster:
     certificate-authority: /etc/kubernetes/ssl/ca-kubernetes.crt
-    server: https://{{ pool.apiserver.host }}:443
-  name: {{ pool.cluster_domain }}
+    server: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
+  name: {{ common.cluster_name }}
 contexts:
 - context:
-    cluster: {{ pool.cluster_domain }}
-    user: scheduler-{{ pool.cluster_domain }}
-  name: {{ pool.cluster_domain }}
+    cluster: {{ common.cluster_name }}
+    user: scheduler-{{ common.cluster_name }}
+  name: {{ common.cluster_name }}
 users:
-- name: scheduler-{{ pool.cluster_domain }}
+- name: scheduler-{{ common.cluster_name }}
   user:
     client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
     client-key: /etc/kubernetes/ssl/kubelet-client.key
diff --git a/kubernetes/files/kubeconfig.sh b/kubernetes/files/kubeconfig.sh
index 0b029fc..b05e907 100644
--- a/kubernetes/files/kubeconfig.sh
+++ b/kubernetes/files/kubeconfig.sh
@@ -1,14 +1,14 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
 #!/bin/bash
 
 # server url
-server="$(cat /etc/kubernetes/kubelet.kubeconfig  | grep server | awk '{ print $2 }')"
+server="$(awk '/server/ { print $2 }' /etc/kubernetes/kubelet.kubeconfig)"
 
 # certificates
-cert="$(cat /etc/kubernetes/ssl/kubelet-client.crt | base64 | sed 's/^/      /g')"
-key="$(cat /etc/kubernetes/ssl/kubelet-client.key | base64 | sed 's/^/      /g')"
-ca="$(cat /etc/kubernetes/ssl/ca-kubernetes.crt | base64 | sed 's/^/      /g')"
-cluster="{{ master.addons.dns.domain }}"
+cert="$(base64 /etc/kubernetes/ssl/kubelet-client.crt | sed 's/^/      /g')"
+key="$(base64 /etc/kubernetes/ssl/kubelet-client.key | sed 's/^/      /g')"
+ca="$(base64 /etc/kubernetes/ssl/ca-kubernetes.crt | sed 's/^/      /g')"
+cluster="{{ common.cluster_name }}"
 
 echo "apiVersion: v1
 clusters:
diff --git a/kubernetes/files/kubelet/default.master b/kubernetes/files/kubelet/default.master
index b388671..b4cf09e 100644
--- a/kubernetes/files/kubelet/default.master
+++ b/kubernetes/files/kubelet/default.master
@@ -1,12 +1,14 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
 --pod-manifest-path=/etc/kubernetes/manifests \
 --allow-privileged={{ master.kubelet.allow_privileged }} \
---cluster_dns={{ master.addons.dns.server }} \
---cluster_domain={{ master.addons.dns.domain }} \
+--cluster_dns={{ common.addons.dns.server }} \
+--cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
 --cni-bin-dir={{ master.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
+--hostname-override={{ master.host.name }} \
 --v={{ master.get('verbosity', 2) }} \
 --node-labels=node-role.kubernetes.io/master=true \
 {%- for key, value in master.get('kubelet', {}).get('daemon_opts', {}).iteritems() %}
diff --git a/kubernetes/files/kubelet/default.pool b/kubernetes/files/kubelet/default.pool
index df49d4c..73c9171 100644
--- a/kubernetes/files/kubelet/default.pool
+++ b/kubernetes/files/kubelet/default.pool
@@ -1,4 +1,5 @@
-{%- from "kubernetes/map.jinja" import pool with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import pool with context -%}
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
@@ -6,9 +7,10 @@
 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
 --pod-manifest-path=/etc/kubernetes/manifests \
 --allow-privileged={{ pool.kubelet.allow_privileged }} \
---cluster_dns={{ pool.cluster_dns }} \
---cluster_domain={{ pool.cluster_domain }} \
+--cluster_dns={{ common.addons.dns.server }} \
+--cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
 --cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
+--hostname-override={{ pool.host.name }} \
 --v={{ pool.get('verbosity', 2) }} \
 --node-labels=node-role.kubernetes.io/node=true \
 {%- if pool.network.engine in ['calico', 'opencontrail'] %}
@@ -16,11 +18,13 @@
 --network-plugin-dir=/etc/cni/net.d \
 {%- endif %}
 --file-check-frequency={{ pool.kubelet.frequency }} \
---container-runtime=remote \
---container-runtime-endpoint=/var/run/criproxy.sock \
---image-service-endpoint=/var/run/criproxy.sock \
---enable-controller-attach-detach=false \
+{%- if common.addons.get('virtlet', {}).get('enabled') %}
+--container-runtime={{ pool.get('container-runtime', 'remote') }} \
+--container-runtime-endpoint={{ pool.get('container-runtime-endpoint', '/var/run/criproxy.sock') }} \
+--image-service-endpoint={{ pool.get('image-service-endpoint', '/var/run/criproxy.sock') }} \
+--enable-controller-attach-detach={{ pool.get('enable-controller-attach-detach', 'false') }} \
+{%- endif %}
 {%- for key, value in pool.get('kubelet', {}).get('daemon_opts', {}).iteritems() %}
 --{{ key }}="{{ value }}" \
-{% endfor %}
+{%- endfor %}
 "
diff --git a/kubernetes/files/kubelet/kubelet.kubeconfig.master b/kubernetes/files/kubelet/kubelet.kubeconfig.master
index 7514b6d..3c70ded 100644
--- a/kubernetes/files/kubelet/kubelet.kubeconfig.master
+++ b/kubernetes/files/kubelet/kubelet.kubeconfig.master
@@ -1,21 +1,21 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 apiVersion: v1
 kind: Config
-current-context: {{ master.addons.dns.domain }}
+current-context: {{ common.cluster_name }}
 preferences: {}
 clusters:
 - cluster:
     certificate-authority: /etc/kubernetes/ssl/ca-kubernetes.crt
-    server: https://{{ master.apiserver.address }}:443
-  name: {{ master.addons.dns.domain }}
+    server: https://{{ master.apiserver.address }}:{{ master.apiserver.secure_port }}
+  name: {{ common.cluster_name }}
 contexts:
 - context:
-    cluster: {{ master.addons.dns.domain }}
-    user: kubelet-{{ master.addons.dns.domain }}
-  name: {{ master.addons.dns.domain }}
+    cluster: {{ common.cluster_name }}
+    user: kubelet-{{ common.cluster_name }}
+  name: {{ common.cluster_name }}
 users:
-- name: kubelet-{{ master.addons.dns.domain }}
+- name: kubelet-{{ common.cluster_name }}
   user:
     client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
     client-key: /etc/kubernetes/ssl/kubelet-client.key
diff --git a/kubernetes/files/kubelet/kubelet.kubeconfig.pool b/kubernetes/files/kubelet/kubelet.kubeconfig.pool
index 494c038..3228ea6 100644
--- a/kubernetes/files/kubelet/kubelet.kubeconfig.pool
+++ b/kubernetes/files/kubelet/kubelet.kubeconfig.pool
@@ -1,21 +1,21 @@
-{%- from "kubernetes/map.jinja" import pool with context %}
-
+{%- from "kubernetes/map.jinja" import pool with context -%}
+{%- from "kubernetes/map.jinja" import common with context -%}
 apiVersion: v1
 kind: Config
-current-context: {{ pool.cluster_domain }}
+current-context: {{ common.cluster_name }}
 preferences: {}
 clusters:
 - cluster:
     certificate-authority: /etc/kubernetes/ssl/ca-kubernetes.crt
-    server: https://{{ pool.apiserver.host }}:443
-  name: {{ pool.cluster_domain }}
+    server: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
+  name: {{ common.cluster_name }}
 contexts:
 - context:
-    cluster: {{ pool.cluster_domain }}
-    user: kubelet-{{ pool.cluster_domain }}
-  name: {{ pool.cluster_domain }}
+    cluster: {{ common.cluster_name }}
+    user: kubelet-{{ common.cluster_name }}
+  name: {{ common.cluster_name }}
 users:
-- name: kubelet-{{ pool.cluster_domain }}
+- name: kubelet-{{ common.cluster_name }}
   user:
     client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
     client-key: /etc/kubernetes/ssl/kubelet-client.key
diff --git a/kubernetes/files/manifest/kube-apiserver.manifest b/kubernetes/files/manifest/kube-apiserver.manifest
index 353cea1..b363766 100644
--- a/kubernetes/files/manifest/kube-apiserver.manifest
+++ b/kubernetes/files/manifest/kube-apiserver.manifest
@@ -24,7 +24,7 @@
       --basic-auth-file=/srv/kubernetes/basic_auth.csv
       --tls-cert-file=/etc/kubernetes/ssl/kubernetes-server.crt
       --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-server.key
-      --secure-port={{ master.apiserver.get('secure_port', '443') }}
+      --secure-port={{ master.apiserver.secure_port }}
       --bind-address={{ master.apiserver.address }}
       --token-auth-file=/srv/kubernetes/known_tokens.csv
       --apiserver-count={{ master.apiserver.get('count', 1) }}
@@ -43,17 +43,17 @@
       httpGet:
         host: 127.0.0.1
         path: /healthz
-        port: {{ master.apiserver.get('insecure_port', '8080') }}
+        port: {{ master.apiserver.insecure_port }}
         scheme: HTTP
       initialDelaySeconds: 15
       timeoutSeconds: 15
     ports:
-    - containerPort: {{ master.apiserver.get('secure_port', '443') }}
-      hostPort: {{ master.apiserver.get('secure_port', '443') }}
+    - containerPort: {{ master.apiserver.secure_port }}
+      hostPort: {{ master.apiserver.secure_port }}
       name: https
       protocol: TCP
-    - containerPort: {{ master.apiserver.get('insecure_port', '8080') }}
-      hostPort: {{ master.apiserver.get('insecure_port', '8080') }}
+    - containerPort: {{ master.apiserver.insecure_port }}
+      hostPort: {{ master.apiserver.insecure_port }}
       name: local
       protocol: TCP
     resources:
diff --git a/kubernetes/files/manifest/kube-proxy.manifest.pool b/kubernetes/files/manifest/kube-proxy.manifest.pool
index 2d563e9..2fb1118 100644
--- a/kubernetes/files/manifest/kube-proxy.manifest.pool
+++ b/kubernetes/files/manifest/kube-proxy.manifest.pool
@@ -19,7 +19,8 @@
       --logtostderr=true
       --v={{ pool.get('verbosity', 2) }}
       --kubeconfig=/etc/kubernetes/proxy.kubeconfig
-      --master={%- if pool.apiserver.insecure.enabled %}http://{{ pool.apiserver.host }}:8080{%- else %}https://{{ pool.apiserver.host }}{%- endif %}
+      --master={%- if pool.apiserver.insecure.enabled %}http://{{
+pool.apiserver.host }}:{{ pool.apiserver.insecure_port }}{%- else %}https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}{%- endif %}
 {%- if pool.network.engine == 'calico' %}
       --proxy-mode=iptables
 {%- endif %}
diff --git a/kubernetes/files/opencontrail/4.0/contrail-kubernetes.conf b/kubernetes/files/opencontrail/4.0/contrail-kubernetes.conf
index 16a68d5..14aafbe 100644
--- a/kubernetes/files/opencontrail/4.0/contrail-kubernetes.conf
+++ b/kubernetes/files/opencontrail/4.0/contrail-kubernetes.conf
@@ -2,7 +2,7 @@
 [KUBERNETES]
 kubernetes_api_server={{ master.apiserver.insecure_address }}
 kubernetes_api_port={{ master.apiserver.insecure_port }}
-kubernetes_api_secure_port=443
+kubernetes_api_secure_port={{ master.apiserver.secure_port }}
 service_subnets={{ master.network.get('service_subnets', '10.96.0.0/12') }}
 pod_subnets={{ master.network.get('pod_subnets', '10.32.0.0/12') }}
 cluster_name={{ master.network.get('cluster_name', 'default') }}
@@ -56,4 +56,4 @@
 auth_token_url={{ master.network.identity.get('auth_token_url', None) }}
 auth_user={{ master.network.identity.get('auth_user', 'admin') }}
 auth_password={{ master.network.identity.get('auth_token_url', 'admin') }}
-auth_tenant={{ master.network.identity.get('auth_token_url', 'admin') }}
\ No newline at end of file
+auth_tenant={{ master.network.identity.get('auth_token_url', 'admin') }}
diff --git a/kubernetes/files/systemd/kube-addon-manager.service b/kubernetes/files/systemd/kube-addon-manager.service
new file mode 100644
index 0000000..1785096
--- /dev/null
+++ b/kubernetes/files/systemd/kube-addon-manager.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Kubernetes Addon Manager
+Documentation=https://github.com/kubernetes/kubernetes
+After=kube-apiserver.service
+
+[Service]
+SyslogIdentifier=kube-addon-manager
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/kube-addons.sh
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/kubernetes/files/virtlet/kubelet.conf b/kubernetes/files/virtlet/kubelet.conf
index db0baa4..f8cf16d 100644
--- a/kubernetes/files/virtlet/kubelet.conf
+++ b/kubernetes/files/virtlet/kubelet.conf
@@ -32,7 +32,7 @@
     "clusterDNS": [
         "10.254.0.10"
     ],
-    "clusterDomain": "{{ common.cluster_domain }}",
+    "clusterDomain": "{{ common.cluster_domain|replace('_', '-') }}",
     "cniBinDir": "/opt/cni/bin",
     "cniConfDir": "",
     "containerRuntime": "docker",
diff --git a/kubernetes/map.jinja b/kubernetes/map.jinja
index 5668958..daa7fe7 100644
--- a/kubernetes/map.jinja
+++ b/kubernetes/map.jinja
@@ -1,4 +1,4 @@
-{% set _version = salt['cmd.run']("hyperkube --version | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g' | awk -F '.' '{ print $1 \".\" $2 }'") %}
+{% set _version = salt['cmd.shell']("(hyperkube --version 2> /dev/null || echo '0.0') | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g' | awk -F'.' '{print $1 \".\" $2}'") %}
 
 {% set common = salt['grains.filter_by']({
     'Debian': {
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index d52190d..d57434e 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -82,12 +82,12 @@
         --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
         --allow-privileged=True
         --basic-auth-file=/srv/kubernetes/basic_auth.csv
-        --bind-address={{ master.apiserver.address }}
+        --bind-address={{ master.apiserver.get('bind_address', master.apiserver.address) }}
         --client-ca-file=/etc/kubernetes/ssl/ca-{{ master.ca }}.crt
         --etcd-quorum-read=true
         --insecure-bind-address={{ master.apiserver.insecure_address }}
-        --insecure-port={{ master.apiserver.get('insecure_port', '8080') }}
-        --secure-port={{ master.apiserver.get('secure_port', '443') }}
+        --insecure-port={{ master.apiserver.insecure_port }}
+        --secure-port={{ master.apiserver.secure_port }}
         --service-cluster-ip-range={{ master.service_addresses }}
         --tls-cert-file=/etc/kubernetes/ssl/kubernetes-server.crt
         --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-server.key
@@ -186,7 +186,11 @@
   file.managed:
     - source: salt://{{ master.get('cert_source','_certs/kubernetes') }}/{{ filename }}
     - user: root
+    {%- if pillar.get('haproxy', {}).get('proxy', {}).get('enabled') %}
     - group: haproxy
+    {%- else %}
+    - group: root
+    {%- endif %}
     - mode: 640
     - watch_in:
       - service: master_services
@@ -224,7 +228,7 @@
 
 kubernetes_namespace_delete_{{ name }}:
   cmd.run:
-    - name: kubectl get ns -o=custom-columns=NAME:.metadata.name | grep -v NAME | grep "{{ name }}" > /dev/null && kubectl delete ns "{{ name }}"
+    - name: kubectl get ns -o=custom-columns=NAME:.metadata.name | grep -v NAME | grep "{{ name }}" > /dev/null && kubectl delete ns "{{ name }} || true"
 
 {%- endif %}
 
diff --git a/kubernetes/master/federation.sls b/kubernetes/master/federation.sls
new file mode 100644
index 0000000..2becd61
--- /dev/null
+++ b/kubernetes/master/federation.sls
@@ -0,0 +1,122 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context %}
+{%- if master.enabled %}
+
+extract_kubernetes_client:
+  archive.extracted:
+    - name: /tmp/kubernetes-client
+    - source: {{ master.federation.source }}
+    {%- if {{ master.federation.get('hash') }} %}
+    - source_hash: sha256={{ master.federation.hash }}
+    {%- endif %}
+    - tar_options: xzf
+    - archive_format: tar
+    - keep: true
+    {%- if grains.get('noservices') %}
+    - onlyif: /bin/false
+    {%- endif %}
+
+/usr/bin/kubefed:
+  file.managed:
+  - source: /tmp/kubernetes-client/kubernetes/client/bin/kubefed
+  - mode: 755
+  - owner: root
+  - group: root
+  - require:
+    - archive: extract_kubernetes_client
+
+/etc/kubernetes/federation/federation.kubeconfig:
+  file.copy:
+  - source: /etc/kubernetes/admin-kube-config
+  - force: false
+  - mode: 0700
+  - owner: root
+  - group: root
+  - dir_mode: 755
+  - makedirs: True
+
+#Set server to apiserver VIP instead of localhost to be reached from pod net
+federation_kubeconfig_replace_server:
+  file.replace:
+  - name: /etc/kubernetes/federation/federation.kubeconfig
+  - repl: "server: https://{{ master.apiserver.vip_address }}:{{ master.apiserver.secure_port }}"
+  - pattern: "server: http://127.0.0.1:{{ master.apiserver.insecure_port }}"
+  - count: 1
+  - show_changes: True
+
+/etc/kubernetes/federation/dns.conf:
+  file.managed:
+  - source: salt://kubernetes/files/federation/{{ master.federation.dns_provider }}.conf
+  - template: jinja
+  - user: root
+  - group: root
+  - mode: 644
+  - makedirs: true
+  - dir_mode: 755
+
+kubefed_init:
+  cmd.run:
+  - name: kubefed init {{ master.federation.name }} --host-cluster-context=local --kubeconfig=/etc/kubernetes/federation/federation.kubeconfig --federation-system-namespace={{ master.federation.namespace }} --api-server-service-type={{ master.federation.service_type }} --etcd-persistent-storage=false  --dns-provider={{ master.federation.dns_provider }} --dns-provider-config=/etc/kubernetes/federation/dns.conf --dns-zone-name={{ master.federation.name }} --image={{ common.hyperkube.image }}
+  - require:
+    - file: /usr/bin/kubefed
+    - file: /etc/kubernetes/federation/federation.kubeconfig
+  - unless: kubectl get namespace {{ master.federation.namespace }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+federation_kubeconfig_set_context:
+  cmd.run:
+  - name: kubectl config use-context {{ master.federation.name }}
+  - env:
+    - KUBECONFIG: /etc/kubernetes/federation/federation.kubeconfig
+  - require:
+    - cmd: kubefed_init
+  - unless: kubectl config current-context | grep {{ master.federation.name }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+kubefed_join_host_cluster:
+  cmd.run:
+  - name: kubefed join {{ common.cluster_name }} --host-cluster-context=local --context={{ master.federation.name }}
+  - env:
+    - KUBECONFIG: /etc/kubernetes/federation/federation.kubeconfig
+  - require:
+    - cmd: kubefed_init
+  - unless: kubectl --context={{ master.federation.name }} get clusters | grep {{ common.cluster_name }}
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+# Assumes the following:
+# * Pillar data master.federation.childclusters is populated
+# * kubeconfig data for each cluster exists in /etc/kubernetes/federation/federation.kubeconfig
+{%- if master.federation.get('childclusters') }
+{%- for childcluster in master.federation.childclusters %}
+
+federation_verify_kubeconfig_{{ childcluster }}:
+  cmd.run:
+  - name: kubectl config get-contexts -o name | grep {{ childcluster }}
+  - env:
+    - KUBECONFIG: /etc/kubernetes/federation/childclusters.kubeconfig
+  - require:
+    - cmd: kubefed_init
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+
+federation_join_cluster_{{ childcluster }}:
+  cmd.run:
+  - name: kubefed join {{ childcluster }} --host-cluster-context=local --context={{ master.federation.name }}
+  - env:
+    - KUBECONFIG: /etc/kubernetes/federation.kubeconfig
+  - require:
+    - cmd: verify_kubeconfig_{{ childcluster }}
+  - unless: kubectl get clusters | grep {{ childcluster }}
+
+{%- endfor %}
+{%- endif %}
+
+{%- endif %}
+
diff --git a/kubernetes/master/kube-addons.sls b/kubernetes/master/kube-addons.sls
index 61eda90..812c84b 100644
--- a/kubernetes/master/kube-addons.sls
+++ b/kubernetes/master/kube-addons.sls
@@ -1,4 +1,5 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 {%- if master.enabled %}
 
 addon-dir-create:
@@ -9,17 +10,17 @@
     - mode: 0755
 
 {%- if master.network.engine == "opencontrail" and master.network.get('version', 3.0) < 4.0 %}
-/etc/kubernetes/addons/contrail_network_controller/contrail-network-controller-configmap.yml:
+/etc/kubernetes/addons/contrail-network-controller/contrail-network-controller-configmap.yml:
   file.managed:
-    - source: salt://kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-configmap.yml
+    - source: salt://kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-configmap.yml
     - template: jinja
     - group: root
     - dir_mode: 755
     - makedirs: True
 
-/etc/kubernetes/addons/contrail_network_controller/contrail-network-controller-deploy.yml:
+/etc/kubernetes/addons/contrail-network-controller/contrail-network-controller-deploy.yml:
   file.managed:
-    - source: salt://kubernetes/files/kube-addons/contrail_network_controller/contrail-network-controller-deploy.yml
+    - source: salt://kubernetes/files/kube-addons/contrail-network-controller/contrail-network-controller-deploy.yml
     - template: jinja
     - group: root
     - dir_mode: 755
@@ -27,7 +28,7 @@
 
 {% endif %}
 
-{%- if master.addons.get('virtlet', {}).get('enabled') %}
+{%- if common.addons.get('virtlet', {}).get('enabled') %}
 /etc/kubernetes/addons/virtlet/virtlet-ds.yml:
   file.managed:
     - source: salt://kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
@@ -38,7 +39,7 @@
 
 {% endif %}
 
-{%- if master.addons.get('calico_policy', {}).get('enabled', False) and master.network.engine == "calico" %}
+{%- if common.addons.get('calico_policy', {}).get('enabled', False) and master.network.engine == "calico" %}
 /etc/kubernetes/addons/calico_policy/calico-policy-controller.yml:
   file.managed:
     - source: salt://kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
@@ -50,7 +51,7 @@
 {% endif %}
 
 
-{%- if master.addons.helm.enabled %}
+{%- if common.addons.helm.enabled %}
 /etc/kubernetes/addons/helm/helm-tiller-deploy.yml:
   file.managed:
     - source: salt://kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
@@ -61,7 +62,7 @@
 
 {% endif %}
 
-{%- if master.addons.netchecker.enabled %}
+{%- if common.addons.netchecker.enabled %}
 
 {%- for resource in ['svc', 'server', 'agent'] %}
 
@@ -77,8 +78,7 @@
 
 {% endif %}
 
-
-{%- if master.addons.dns.enabled %}
+{%- if common.addons.dns.enabled %}
 
 /etc/kubernetes/addons/dns/kubedns-svc.yaml:
   file.managed:
@@ -96,7 +96,7 @@
     - dir_mode: 755
     - makedirs: True
 
-{% if master.addons.dns.get('autoscaler', {}).get('enabled', True) %}
+{% if common.addons.dns.get('autoscaler', {}).get('enabled', True) %}
 
 /etc/kubernetes/addons/dns/kubedns-autoscaler.yaml:
   file.managed:
@@ -108,9 +108,62 @@
 
 {% endif %}
 
+{%- if common.addons.coredns.enabled or master.federation.enabled %}
+
+/etc/kubernetes/addons/coredns/coredns-cm.yml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/coredns/coredns-cm.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+
+/etc/kubernetes/addons/coredns/coredns-deploy.yml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/coredns/coredns-deploy.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+
+/etc/kubernetes/addons/coredns/coredns-svc.yml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/coredns/coredns-svc.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+
+/etc/kubernetes/addons/coredns/etcd-svc.yml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/coredns/etcd-svc.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+
+/etc/kubernetes/addons/coredns/etcd-deploy.yml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/coredns/etcd-deploy.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
 {% endif %}
 
-{%- if master.addons.dashboard.enabled %}
+{% endif %}
+
+{%- if common.addons.get('externaldns', {}).get('enabled') %}
+/etc/kubernetes/addons/externaldns/externaldns-deploy.yaml:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addons/externaldns/externaldns-deploy.yml
+    - template: jinja
+    - group: root
+    - dir_mode: 755
+    - makedirs: True
+{% endif %}
+
+{%- if common.addons.dashboard.enabled %}
 
 /etc/kubernetes/addons/dashboard/dashboard-service.yaml:
   file.managed:
@@ -130,7 +183,7 @@
 
 {% endif %}
 
-{%- if master.addons.heapster_influxdb.enabled %}
+{%- if common.addons.heapster_influxdb.enabled %}
 
 /etc/kubernetes/addons/heapster-influxdb/heapster-address.yaml:
   file.managed:
diff --git a/kubernetes/master/setup.sls b/kubernetes/master/setup.sls
index b7d3806..82af347 100644
--- a/kubernetes/master/setup.sls
+++ b/kubernetes/master/setup.sls
@@ -1,4 +1,5 @@
-{%- from "kubernetes/map.jinja" import master with context %}
+{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
 {%- if master.enabled %}
 
 /etc/kubernetes/kubeconfig.sh:
@@ -16,19 +17,41 @@
     - watch:
       - file: /etc/kubernetes/kubeconfig.sh
 
-{%- for addon_name, addon in master.addons.iteritems() %}
-{%- if addon.enabled %}
+/etc/default/kube-addon-manager:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addon-manager/kube-addons.config
+    - user: root
+    - group: root
+    - mode: 755
+    - makedirs: True
 
-kubernetes_addons_{{ addon_name }}:
-  cmd.run:
-    - name: "hyperkube kubectl apply -f /etc/kubernetes/addons/{{ addon_name }}"
-    - unless: "hyperkube kubectl get {{ addon.get('creates', 'service') }} kube-{{ addon.get('name', addon_name) }} --namespace={{ addon.get('namespace', 'kube-system') }}"
-    {%- if grains.get('noservices') %}
-    - onlyif: /bin/false
-    {%- endif %}
+/usr/bin/kube-addons.sh:
+  file.managed:
+    - source: salt://kubernetes/files/kube-addon-manager/kube-addons.sh
+    - user: root
+    - group: root
+    - mode: 755
+    - makedirs: True
 
-{%- endif %}
-{%- endfor %}
+/etc/systemd/system/kube-addon-manager.service:
+  file.managed:
+    - source: salt://kubernetes/files/systemd/kube-addon-manager.service
+    - user: root
+    - group: root
+    - mode: 644
+    - makedirs: True
+
+kube-addon-manager_service:
+  service.running:
+  - name: kube-addon-manager
+  - enable: True
+  - watch:
+    - file: /etc/default/kube-addon-manager
+    - file: /usr/bin/kube-addons.sh
+    - file: /etc/systemd/system/kube-addon-manager.service
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
 
 {%- if master.label is defined %}
 
@@ -41,7 +64,7 @@
     - name: {{ label.key }}
     - value: {{ label.value }}
     - node: {{ label.node }}
-    - apiserver: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.get('insecure_port', '8080') }}
+    - apiserver: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.insecure_port }}
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
     {%- endif %}
@@ -52,7 +75,7 @@
   k8s.label_absent:
     - name: {{ label.key }}
     - node: {{ label.node }}
-    - apiserver: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.get('insecure_port', '8080') }}
+    - apiserver: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.insecure_port }}
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
     {%- endif %}
@@ -63,8 +86,8 @@
 
 {%- endif %}
 
-{%- if master.addons.get('virtlet', {}).get('enabled') %}
-{% for host in master.addons.virtlet.hosts %}
+{%- if common.addons.get('virtlet', {}).get('enabled') %}
+{% for host in common.addons.virtlet.hosts %}
 
 label_virtlet_{{ host }}:
   cmd.run:
diff --git a/kubernetes/meta/collectd.yml b/kubernetes/meta/collectd.yml
index c1f7f04..50ebbe9 100644
--- a/kubernetes/meta/collectd.yml
+++ b/kubernetes/meta/collectd.yml
@@ -11,6 +11,7 @@
 
 {%- if master.get('enabled', False) or pool.get('enabled', False) %}
 local_plugin:
+  {%- if network is defined and network.get('engine', None) == 'calico' %}
   collectd_calico_felix:
    plugin: python
    template: kubernetes/files/collectd_calico_felix.conf
@@ -21,6 +22,7 @@
    endpoints:
      ipv4_socket: /var/run/calico/bird.ctl
      ipv6_socket: /var/run/calico/bird6.ctl
+  {%- endif %}
 
   collectd_http_check:
    interval: 30
@@ -29,7 +31,7 @@
      apiserver:
        expected_code: 200
        expected_content: ok
-       url: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.get('insecure_port', '8080') }}/healthz
+       url: http://{{ master.apiserver.insecure_address }}:{{ master.apiserver.insecure_port }}/healthz
        metric_name: k8s_service_health
      scheduler:
        expected_code: 200
@@ -99,7 +101,7 @@
        verify: false
        client_cert: /etc/kubernetes/ssl/kubelet-client.crt
        client_key: /etc/kubernetes/ssl/kubelet-client.key
-       url: https://{{ pool.apiserver.host }}:{{ pool.apiserver.port|default('443') }}/healthz
+       url: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}/healthz
        metric_name: k8s_service_health_vip
   collectd_k8s_get:
    plugin: python
@@ -109,5 +111,5 @@
    verify: false
    client_cert: /etc/kubernetes/ssl/kubelet-client.crt
    client_key: /etc/kubernetes/ssl/kubelet-client.key
-   endpoint: https://{{ pool.apiserver.host }}:{{ pool.apiserver.port|default('443') }}
+   endpoint: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
 {%- endif %}
diff --git a/metadata/service/common.yml b/metadata/service/common.yml
index 12f782f..8c921db 100644
--- a/metadata/service/common.yml
+++ b/metadata/service/common.yml
@@ -6,7 +6,58 @@
     kubernetes_cluster_domain: ${_param:cluster_domain}
   kubernetes:
     common:
+      addons:
+        dns:
+          enabled: true
+          replicas: 1
+          autoscaler:
+            enabled: true
+          domain: ${_param:kubernetes_cluster_domain}
+          server: 10.254.0.10
+          dnsmasq:
+            cache-size: 1000
+            no-resolv:
+            server: 127.0.0.1#10053
+            log-facility: "-"
+        dashboard:
+          enabled: True
+        heapster_influxdb:
+          enabled: False
+        helm:
+          enabled: False
+          tiller_image: gcr.io/kubernetes-helm/tiller:v2.4.2
+        netchecker:
+          enabled: False
+          interval: 60
+          namespace: netchecker
+          port: 80
+          agent_image: mirantis/k8s-netchecker-agent:stable
+          server_image: mirantis/k8s-netchecker-server:stable
+        calico_policy:
+          enabled: False
+          image: calico/kube-policy-controller:v0.5.4
+          namespace: kube-system
+        contrail_network_controller:
+          enabled: False
+          namespace: kube-system
+          image: yashulyak/contrail-controller:latest
+        coredns:
+          enabled: False
+          namespace: kube-system
+          image: coredns/coredns:latest
+          etcd_image: quay.io/coreos/etcd:v3.1.0
+        externaldns:
+          enabled: True
+          namespace: kube-system
+          image: mirantis/external-dns:latest
+          domain: ${_param:kubernetes_cluster_domain}
+          provider: coredns
+        virtlet:
+          enabled: False
+          namespace: kube-system
+          image: mirantis/virtlet:v0.7.0
       cluster_domain: ${_param:kubernetes_cluster_domain}
+      cluster_name: ${_param:cluster_name}
       network:
         engine: none
         mtu: 1500
diff --git a/metadata/service/master/cluster.yml b/metadata/service/master/cluster.yml
index f755401..f267e5e 100644
--- a/metadata/service/master/cluster.yml
+++ b/metadata/service/master/cluster.yml
@@ -20,6 +20,8 @@
         allow_privileged: True
       apiserver:
         address: ${_param:cluster_local_address}
+        vip_address: ${_param:cluster_vip_address}
+        secure_port: 6443
         internal_address: ${_param:kubernetes_internal_api_address}
         insecure_address: 127.0.0.1
         insecure_port: 8080
@@ -34,45 +36,6 @@
             name: ${_param:cluster_node02_hostname}
           - host: ${_param:cluster_node03_address}
             name: ${_param:cluster_node03_hostname}
-      addons:
-        dns:
-          enabled: true
-          replicas: 1
-          autoscaler:
-            enabled: true
-          domain: ${_param:kubernetes_cluster_domain}
-          server: 10.254.0.10
-          dnsmasq:
-            cache-size: 1000
-            no-resolv:
-            server: 127.0.0.1#10053
-            log-facility: "-"
-        dashboard:
-          enabled: True
-        heapster_influxdb:
-          enabled: False
-        helm:
-          enabled: False
-          tiller_image: gcr.io/kubernetes-helm/tiller:v2.3.0
-        netchecker:
-          enabled: False
-          interval: 60
-          namespace: netchecker
-          port: 80
-          agent_image: mirantis/k8s-netchecker-agent:v1.1.0
-          server_image: mirantis/k8s-netchecker-server:v1.1.0
-        calico_policy:
-          enabled: False
-          image: calico/kube-policy-controller:v0.5.4
-          namespace: kube-system
-        contrail_network_controller:
-          enabled: False
-          namespace: kube-system
-          image: yashulyak/contrail-controller:latest
-        virtlet:
-          enabled: False
-          namespace: kube-system
-          image: mirantis/virtlet:latest
       token:
         admin: ${_param:kubernetes_admin_token}
         kubelet: ${_param:kubernetes_kubelet_token}
@@ -95,3 +58,11 @@
             port: 4001
           - host: ${_param:cluster_node03_address}
             port: 4001
+      federation:
+        enabled: False
+        name: federation
+        namespace: federation-system
+        source: https://dl.k8s.io/v1.6.6/kubernetes-client-linux-amd64.tar.gz
+        hash: 94b2c9cd29981a8e150c187193bab0d8c0b6e906260f837367feff99860a6376
+        service_type: NodePort
+        dns_provider: coredns
diff --git a/metadata/service/master/single.yml b/metadata/service/master/single.yml
index 00dff34..61464f7 100644
--- a/metadata/service/master/single.yml
+++ b/metadata/service/master/single.yml
@@ -20,6 +20,7 @@
         allow_privileged: True
       apiserver:
         address: ${_param:single_address}
+        secure_port: 443
         internal_address: ${_param:kubernetes_internal_api_address}
         insecure_address: 127.0.0.1
         insecure_port: 8080
@@ -48,7 +49,7 @@
           enabled: False
         helm:
           enabled: False
-          tiller_image: gcr.io/kubernetes-helm/tiller:v2.3.0
+          tiller_image: gcr.io/kubernetes-helm/tiller:v2.4.2
         calico_policy:
           enabled: False
           image: calico/kube-policy-controller:v0.5.4
@@ -60,7 +61,7 @@
         virtlet:
           enabled: False
           namespace: kube-system
-          image: mirantis/virtlet:latest
+          image: mirantis/virtlet:v0.7.0
       token:
         admin: ${_param:kubernetes_admin_token}
         kubelet: ${_param:kubernetes_kubelet_token}
@@ -79,3 +80,11 @@
           members:
             - host: ${_param:single_address}
               port: 4001
+      federation:
+        enabled: False
+        name: federation
+        namespace: federation-system
+        source: https://dl.k8s.io/v1.6.6/kubernetes-client-linux-amd64.tar.gz
+        hash: 94b2c9cd29981a8e150c187193bab0d8c0b6e906260f837367feff99860a6376
+        service_type: NodePort
+        dns_provider: coredns
diff --git a/metadata/service/pool/cluster.yml b/metadata/service/pool/cluster.yml
index 41c8ffb..b529623 100644
--- a/metadata/service/pool/cluster.yml
+++ b/metadata/service/pool/cluster.yml
@@ -13,8 +13,10 @@
         name: ${linux:system:name}
       apiserver:
         host: ${_param:cluster_vip_address}
+        secure_port: 443
         insecure:
           enabled: True
+        insecure_port: 8080
         members:
            - host: ${_param:cluster_vip_address}
 # Temporary disabled until kubelet HA would be fixed
diff --git a/metadata/service/pool/single.yml b/metadata/service/pool/single.yml
index e5826c3..0d4085b 100644
--- a/metadata/service/pool/single.yml
+++ b/metadata/service/pool/single.yml
@@ -13,14 +13,16 @@
         name: ${linux:system:name}
       apiserver:
         host: ${_param:master_address}
+        secure_port: 443
         insecure:
           enabled: True
+        insecure_port: 8080
         members:
           - host: ${_param:master_address}
       address: 0.0.0.0
       cluster_dns: 10.254.0.10
       allow_privileged: True
-      cluster_domain: ${param:kubernetes_cluster_domain}
+      cluster_domain: ${_param:kubernetes_cluster_domain}
       kubelet:
         config: /etc/kubernetes/manifests
         allow_privileged: True
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 2d03b69..513d6b1 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -1,11 +1,12 @@
 kubernetes:
   common:
     cluster_domain: cluster.local
+    cluster_name: cluster
     network:
       engine: none
     hyperkube:
-      image: hyperkube-amd64:v1.5.0-beta.3-1
-  master:
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
     addons:
       dns:
         domain: cluster.local
@@ -41,7 +42,8 @@
         hosts:
         - cmp01
         - cmp02
-        image: mirantis/virtlet:latest
+        image: mirantis/virtlet:v0.7.0
+  master:
     admin:
       password: password
       username: admin
@@ -51,6 +53,7 @@
       name: node040
     apiserver:
       address: 10.0.175.100
+      secure_port: 6443
       internal_address: 182.22.97.1
       insecure_address: 127.0.0.1
       insecure_port: 8080
@@ -102,5 +105,3 @@
         enabled: true
       netchecker:
         enabled: true
-    hyperkube:
-      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
diff --git a/tests/pillar/master_contrail.sls b/tests/pillar/master_contrail.sls
index 7cf9d68..862bb9a 100644
--- a/tests/pillar/master_contrail.sls
+++ b/tests/pillar/master_contrail.sls
@@ -1,11 +1,12 @@
 kubernetes:
   common:
     cluster_domain: cluster.local
+    cluster_name: cluster
     network:
       engine: opencontrail
     hyperkube:
-      image: hyperkube-amd64:v1.5.0-beta.3-1
-  master:
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
     addons:
       dns:
         domain: cluster.local
@@ -35,10 +36,18 @@
         enabled: true
         namespace: kube-system
         image: image
+      virtlet:
+        enabled: true
+        namespace: kube-system
+        image: mirantis/virtlet:v0.7.0
+        hosts:
+        - cmp01
+        - cmp02
       contrail_network_controller:
         enabled: true
         namespace: kube-system
         image: yashulyak/contrail-controller:latest
+  master:
     admin:
       password: password
       username: admin
@@ -48,6 +57,7 @@
       name: node040
     apiserver:
       address: 10.0.175.100
+      secure_port: 6443
       internal_address: 185.22.97.1
       insecure_address: 127.0.0.1
       insecure_port: 8080
@@ -102,5 +112,3 @@
         enabled: true
       netchecker:
         enabled: true
-    hyperkube:
-      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
diff --git a/tests/pillar/master_contrail4_0.sls b/tests/pillar/master_contrail4_0.sls
index b303623..d948e7c 100644
--- a/tests/pillar/master_contrail4_0.sls
+++ b/tests/pillar/master_contrail4_0.sls
@@ -1,12 +1,12 @@
 kubernetes:
   common:
     cluster_domain: cluster.local
+    cluster_name: cluster
     network:
       engine: opencontrail
     hyperkube:
-      image: hyperkube-amd64:v1.5.0-beta.3-1
-  master:
-    service_addresses: 127.0.0.1/24
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
     addons:
       dns:
         domain: cluster.local
@@ -32,6 +32,19 @@
         server_image: image
         agent_image: image
         agent_probeurls: "http://ipinfo.io"
+      calico_policy:
+        enabled: true
+        namespace: kube-system
+        image: image
+      virtlet:
+        enabled: true
+        namespace: kube-system
+        image: mirantis/virtlet:v0.7.0
+        hosts:
+        - cmp01
+        - cmp02
+  master:
+    service_addresses: 127.0.0.1/24
     admin:
       password: password
       username: admin
@@ -41,6 +54,7 @@
       name: node040
     apiserver:
       address: 10.0.175.100
+      secure_port: 6443
       internal_address: 185.22.97.1
       insecure_address: 127.0.0.1
       insecure_port: 8080
@@ -99,5 +113,3 @@
         enabled: true
       netchecker:
         enabled: true
-    hyperkube:
-      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
diff --git a/tests/pillar/pool_cluster.sls b/tests/pillar/pool_cluster.sls
index f9d06f4..b9e7840 100644
--- a/tests/pillar/pool_cluster.sls
+++ b/tests/pillar/pool_cluster.sls
@@ -1,10 +1,27 @@
 kubernetes:
   common:
     cluster_domain: cluster.local
+    cluster_name: cluster
     network:
       engine: none
     hyperkube:
-      image: hyperkube-amd64:v1.5.0-beta.3-1
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
+    addons:
+      dns:
+        domain: cluster.local
+        enabled: false
+        replicas: 1
+        server: 10.254.0.10
+        autoscaler:
+          enabled: true
+      virtlet:
+        enabled: true
+        namespace: kube-system
+        image: mirantis/virtlet:v0.7.0
+        hosts:
+        - cmp01
+        - cmp02
   pool:
     enabled: true
     version: v1.2.0
@@ -12,15 +29,15 @@
       name: ${linux:system:name}
     apiserver:
       host: 127.0.0.1
+      secure_port: 443
       insecure:
         enabled: True
+      insecure_port: 8080
       members:
         - host: 127.0.0.1
         - host: 127.0.0.1
         - host: 127.0.0.1
     address: 0.0.0.0
-    cluster_dns: 10.254.0.10
-    cluster_domain: cluster.local
     kubelet:
       config: /etc/kubernetes/manifests
       allow_privileged: True
@@ -48,5 +65,3 @@
           port: 4001
         - host: 127.0.0.1
           port: 4001
-    hyperkube:
-      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
diff --git a/tests/pillar/pool_cluster_with_domain.sls b/tests/pillar/pool_cluster_with_domain.sls
new file mode 100644
index 0000000..6201348
--- /dev/null
+++ b/tests/pillar/pool_cluster_with_domain.sls
@@ -0,0 +1,68 @@
+kubernetes:
+  common:
+    cluster_domain: cluster.local
+    cluster_name: cluster
+    network:
+      engine: none
+    hyperkube:
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
+    addons:
+      dns:
+        domain: cluster.local
+        enabled: false
+        replicas: 1
+        server: 10.254.0.10
+        autoscaler:
+          enabled: true
+      virtlet:
+        enabled: true
+        namespace: kube-system
+        image: mirantis/virtlet:v0.7.0
+        hosts:
+        - cmp01
+        - cmp02
+  pool:
+    enabled: true
+    version: v1.2.0
+    host:
+      name: ${linux:system:name}
+      domain: ${linux:system:domain}
+    apiserver:
+      host: 127.0.0.1
+      secure_port: 443
+      insecure:
+        enabled: True
+      insecure_port: 8080
+      members:
+        - host: 127.0.0.1
+        - host: 127.0.0.1
+        - host: 127.0.0.1
+    address: 0.0.0.0
+    kubelet:
+      config: /etc/kubernetes/manifests
+      allow_privileged: True
+      frequency: 5s
+    token:
+      kubelet: 7bN5hJ9JD4fKjnFTkUKsvVNfuyEddw3r
+      kube_proxy: DFvQ8GelB7afH3wClC9romaMPhquyyEe
+    ca: kubernetes
+    network:
+      calicoctl:
+        image: calico/ctl
+      cni:
+        image: calico/cni
+      engine: calico
+      hash: c15ae251b633109e63bf128c2fbbc34a
+      ipam:
+        hash: 6e6d7fac0567a8d90a334dcbfd019a99
+        version: v1.3.1
+      version: v0.20.0
+      etcd:
+        members:
+        - host: 127.0.0.1
+          port: 4001
+        - host: 127.0.0.1
+          port: 4001
+        - host: 127.0.0.1
+          port: 4001
diff --git a/tests/pillar/pool_contrail4_0.sls b/tests/pillar/pool_contrail4_0.sls
index 6cce55d..ea4426a 100644
--- a/tests/pillar/pool_contrail4_0.sls
+++ b/tests/pillar/pool_contrail4_0.sls
@@ -1,10 +1,27 @@
 kubernetes:
   common:
     cluster_domain: cluster.local
+    cluster_name: cluster
     network:
       engine: none
     hyperkube:
-      image: hyperkube-amd64:v1.5.0-beta.3-1
+      image: hyperkube-amd64:v1.6.4-3
+      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
+    addons:
+      dns:
+        domain: cluster.local
+        enabled: false
+        replicas: 1
+        server: 10.254.0.10
+        autoscaler:
+          enabled: true
+      virtlet:
+        enabled: true
+        namespace: kube-system
+        image: mirantis/virtlet:v0.7.0
+        hosts:
+        - cmp01
+        - cmp02
   pool:
     enabled: true
     version: v1.2.0
@@ -12,15 +29,15 @@
       name: ${linux:system:name}
     apiserver:
       host: 127.0.0.1
+      secure_port: 443
       insecure:
         enabled: True
+      insecure_port: 8080
       members:
         - host: 127.0.0.1
         - host: 127.0.0.1
         - host: 127.0.0.1
     address: 0.0.0.0
-    cluster_dns: 10.254.0.10
-    cluster_domain: cluster.local
     kubelet:
       config: /etc/kubernetes/manifests
       allow_privileged: True
@@ -35,5 +52,3 @@
       config:
         api:
           host: 127.0.0.1
-    hyperkube:
-      hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd