Merge "Optimize kitchen tests for Travis CI"
diff --git a/README.rst b/README.rst
index 7f4dedc..37fa482 100644
--- a/README.rst
+++ b/README.rst
@@ -1083,7 +1083,7 @@
     kubernetes:
       master:
         auth:
-          mode: RBAC
+          mode: Node,RBAC
 
 Then you can use ``kubernetes.control.role`` state to orchestrate role and
 rolebindings. Following example shows how to create brand new role and binding
diff --git a/kubernetes/_common.sls b/kubernetes/_common.sls
index 283956e..0e5b5ca 100644
--- a/kubernetes/_common.sls
+++ b/kubernetes/_common.sls
@@ -56,39 +56,19 @@
     {%- endif %}
 
 {%- if common.addons.get('virtlet', {}).get('enabled') %}
-/tmp/criproxy:
-  file.directory:
-    - user: root
-    - group: root
-
-copy-criproxy-bin:
-  cmd.run:
-    - name: docker run --rm -v /tmp/criproxy/:/tmp/criproxy/ --entrypoint cp {{ common.addons.virtlet.image }} -vr /criproxy /tmp/criproxy
-    - require:
-      - file: /tmp/criproxy
-    {%- if grains.get('noservices') %}
-    - onlyif: /bin/false
-    {%- endif %}
 
 /usr/bin/criproxy:
   file.managed:
-    - source: /tmp/criproxy/criproxy
+    - source: https://github.com/mirantis/criproxy/releases/download/{{ common.addons.virtlet.get('criproxy_version', 'v0.9.2') }}/criproxy
     - mode: 750
     - makedirs: true
     - user: root
     - group: root
-    - require:
-      - cmd: copy-criproxy-bin
+    - source_hash: {{ common.addons.virtlet.get('criproxy_source', 'md5=c52d3c4e457144c6523570c847a442b2') }}
     {%- if grains.get('noservices') %}
     - onlyif: /bin/false
     {%- endif %}
 
-/usr/bin/dockershim:
-  file.symlink:
-    - target: /usr/bin/criproxy
-    - require:
-      - file: /usr/bin/criproxy
-
 {%- if not pillar.kubernetes.pool is defined %}
 
 /etc/default/dockershim:
@@ -146,7 +126,7 @@
   - enable: True
   - watch:
     - file: /etc/default/dockershim
-    - file: /usr/bin/dockershim
+    - file: /usr/bin/hyperkube
   {%- if grains.get('noservices') %}
   - onlyif: /bin/false
   {%- endif %}
diff --git a/kubernetes/files/calico/calicoctl.cfg.master b/kubernetes/files/calico/calicoctl.cfg.master
index d005e5a..144c9a8 100644
--- a/kubernetes/files/calico/calicoctl.cfg.master
+++ b/kubernetes/files/calico/calicoctl.cfg.master
@@ -1,9 +1,9 @@
 {%- from "kubernetes/map.jinja" import master with context %}
-apiVersion: v1
-kind: calicoApiConfig
+apiVersion: projectcalico.org/v3
+kind: CalicoAPIConfig
 metadata:
 spec:
-  datastoreType: "etcdv2"
+  datastoreType: "etcdv3"
   etcdEndpoints: {% for member in master.network.etcd.members %}http{% if master.network.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
 {%- if master.network.etcd.get('ssl', {}).get('enabled') %}
   etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/calico/calicoctl.cfg.pool b/kubernetes/files/calico/calicoctl.cfg.pool
index 4d3c786..25e95ad 100644
--- a/kubernetes/files/calico/calicoctl.cfg.pool
+++ b/kubernetes/files/calico/calicoctl.cfg.pool
@@ -1,9 +1,9 @@
 {%- from "kubernetes/map.jinja" import pool with context %}
-apiVersion: v1
-kind: calicoApiConfig
+apiVersion: projectcalico.org/v3
+kind: CalicoAPIConfig
 metadata:
 spec:
-  datastoreType: "etcdv2"
+  datastoreType: "etcdv3"
   etcdEndpoints: {% for member in pool.network.etcd.members %}http{% if pool.network.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
 {%- if pool.network.etcd.get('ssl', {}).get('enabled') %}
   etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/dockershim/default.master b/kubernetes/files/dockershim/default.master
index 1e30e39..e0af9f5 100644
--- a/kubernetes/files/dockershim/default.master
+++ b/kubernetes/files/dockershim/default.master
@@ -3,6 +3,8 @@
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
+--experimental-dockershim \
+--port 11250 \
 --pod-manifest-path=/etc/kubernetes/manifests \
 --address={{ master.kubelet.address }} \
 --allow-privileged={{ master.kubelet.allow_privileged }} \
diff --git a/kubernetes/files/dockershim/default.pool b/kubernetes/files/dockershim/default.pool
index cd717c4..a9d2839 100644
--- a/kubernetes/files/dockershim/default.pool
+++ b/kubernetes/files/dockershim/default.pool
@@ -3,14 +3,14 @@
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
---require-kubeconfig \
+--experimental-dockershim \
+--port 11250 \
 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
 --pod-manifest-path=/etc/kubernetes/manifests \
 --address={{ pool.kubelet.address }} \
 --allow-privileged={{ pool.kubelet.allow_privileged }} \
 --cluster_dns={{ common.addons.dns.server }} \
 --cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
---cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
 --hostname-override={{ pool.host.name }} \
 --v={{ pool.get('verbosity', 2) }} \
 {%- if pillar.kubernetes.master is defined %}
@@ -23,6 +23,8 @@
 {%- endif %}
 {%- if pool.network.engine in ['calico', 'opencontrail'] %}
 --network-plugin=cni \
+--cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
+{%- else %}
 --network-plugin-dir=/etc/cni/net.d \
 {%- endif %}
 --file-check-frequency={{ pool.kubelet.frequency }} \
@@ -32,10 +34,7 @@
 --cloud-config=/etc/kubernetes/cloud-config.conf \
 {%- endif %}
 {%- endif %}
-{%- if common.addons.get('virtlet', {}).get('enabled') %}
---container-runtime={{ pool.get('container-runtime', 'remote') }} \
 --enable-controller-attach-detach={{ pool.get('enable-controller-attach-detach', 'false') }} \
-{%- endif %}
 {%- for key, value in pool.get('kubelet', {}).get('daemon_opts', {}).iteritems() %}
 --{{ key }}={{ value }} \
 {%- endfor %}
diff --git a/kubernetes/files/kube-addons/dns/kubedns-autoscaler-rbac.yaml b/kubernetes/files/kube-addons/dns/kubedns-autoscaler-rbac.yaml
new file mode 100644
index 0000000..c718560
--- /dev/null
+++ b/kubernetes/files/kube-addons/dns/kubedns-autoscaler-rbac.yaml
@@ -0,0 +1,44 @@
+kind: ServiceAccount
+apiVersion: v1
+metadata:
+  name: kube-dns-autoscaler
+  namespace: kube-system
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: system:kube-dns-autoscaler
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["list"]
+  - apiGroups: [""]
+    resources: ["replicationcontrollers/scale"]
+    verbs: ["get", "update"]
+  - apiGroups: ["extensions"]
+    resources: ["deployments/scale", "replicasets/scale"]
+    verbs: ["get", "update"]
+# Remove the configmaps rule once below issue is fixed:
+# kubernetes-incubator/cluster-proportional-autoscaler#16
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "create"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: system:kube-dns-autoscaler
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+subjects:
+  - kind: ServiceAccount
+    name: kube-dns-autoscaler
+    namespace: kube-system
+roleRef:
+  kind: ClusterRole
+  name: system:kube-dns-autoscaler
+  apiGroup: rbac.authorization.k8s.io
diff --git a/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml b/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
index f8928df..d07f47b 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-autoscaler.yaml
@@ -37,4 +37,4 @@
           - --poll-period-seconds={{ common.addons.dns.autoscaler.get('poll-period-seconds') }}
           {%- endif %}
           - --v=2
-
+      serviceAccountName: kube-dns-autoscaler
diff --git a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
index e4cf120..1857aeb 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
@@ -152,3 +152,4 @@
             memory: 20Mi
             cpu: 10m
       dnsPolicy: Default  # Don't use cluster DNS.
+      serviceAccountName: kube-dns
diff --git a/kubernetes/files/kube-addons/dns/kubedns-sa.yaml b/kubernetes/files/kube-addons/dns/kubedns-sa.yaml
new file mode 100644
index 0000000..7455b2e
--- /dev/null
+++ b/kubernetes/files/kube-addons/dns/kubedns-sa.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: kube-dns
+  namespace: kube-system
+  labels:
+    kubernetes.io/cluster-service: "true"
+    addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/netchecker/netchecker-roles.yml b/kubernetes/files/kube-addons/netchecker/netchecker-roles.yml
new file mode 100644
index 0000000..3dfe75e
--- /dev/null
+++ b/kubernetes/files/kube-addons/netchecker/netchecker-roles.yml
@@ -0,0 +1,46 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: netchecker-server
+rules:
+  - apiGroups:
+      - apiextensions.k8s.io
+    resources:
+      - customresourcedefinitions
+    verbs:
+      - create
+      - get
+      - list
+      - update
+      - watch
+  - apiGroups:
+      - network-checker.ext
+    resources:
+      - agents
+    verbs:
+      - create
+      - get
+      - list
+      - update
+      - watch
+  - apiGroups:
+      - ''
+    resources:
+      - pods
+    verbs:
+      - get
+      - list
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: netchecker
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: netchecker-server
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:serviceaccounts
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
index eaced34..13a12f2 100644
--- a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
+++ b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
@@ -56,19 +56,19 @@
         volumeMounts:
         - name: k8s-flexvolume-plugins-dir
           mountPath: /kubelet-volume-plugins
-        - name: criproxybin
-          mountPath: /opt/criproxy/bin
         - name: run
           mountPath: /run
         - name: dockersock
           mountPath: /var/run/docker.sock
-        - name: criproxyconf
-          mountPath: /etc/criproxy
         - name: log
           mountPath: /hostlog
         # for ensuring that /var/lib/libvirt/images exists on node
         - name: var-lib
           mountPath: /host-var-lib
+        - name: criproxybin
+          mountPath: /opt/criproxy/bin
+        - name: criproxyconf
+          mountPath: /etc/criproxy
         securityContext:
           privileged: true
 
@@ -136,7 +136,7 @@
           name: kubernetes
         - mountPath: /etc/cni
           name: cniconf
-        - mountPath: /opt/cni/bin
+        - mountPath: /opt/cni/bin.orig
           name: cnibin
         - mountPath: /var/run/libvirt
           name: libvirt-sockets
diff --git a/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig b/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
index 9ec6761..85721af 100644
--- a/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
+++ b/kubernetes/files/kube-controller-manager/controller-manager.kubeconfig
@@ -18,5 +18,5 @@
 users:
 - name: controller_manager-{{ common.cluster_name }}
   user:
-    client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
-    client-key: /etc/kubernetes/ssl/kubelet-client.key
+    client-certificate: /etc/kubernetes/ssl/kube-controller-manager-client.crt
+    client-key: /etc/kubernetes/ssl/kube-controller-manager-client.key
diff --git a/kubernetes/files/kube-proxy/proxy.kubeconfig b/kubernetes/files/kube-proxy/proxy.kubeconfig
index 307daf8..e6755d4 100644
--- a/kubernetes/files/kube-proxy/proxy.kubeconfig
+++ b/kubernetes/files/kube-proxy/proxy.kubeconfig
@@ -18,5 +18,5 @@
 users:
 - name: kube_proxy-{{ common.cluster_name }}
   user:
-    client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
-    client-key: /etc/kubernetes/ssl/kubelet-client.key
+    client-certificate: /etc/kubernetes/ssl/kube-proxy-client.crt
+    client-key: /etc/kubernetes/ssl/kube-proxy-client.key
diff --git a/kubernetes/files/kube-scheduler/scheduler.kubeconfig b/kubernetes/files/kube-scheduler/scheduler.kubeconfig
index 8a87e39..439bd05 100644
--- a/kubernetes/files/kube-scheduler/scheduler.kubeconfig
+++ b/kubernetes/files/kube-scheduler/scheduler.kubeconfig
@@ -17,5 +17,5 @@
 users:
 - name: scheduler-{{ common.cluster_name }}
   user:
-    client-certificate: /etc/kubernetes/ssl/kubelet-client.crt
-    client-key: /etc/kubernetes/ssl/kubelet-client.key
+    client-certificate: /etc/kubernetes/ssl/kube-scheduler-client.crt
+    client-key: /etc/kubernetes/ssl/kube-scheduler-client.key
diff --git a/kubernetes/files/kubeconfig.sh b/kubernetes/files/kubeconfig.sh
index b05e907..cfdc79b 100644
--- a/kubernetes/files/kubeconfig.sh
+++ b/kubernetes/files/kubeconfig.sh
@@ -5,16 +5,15 @@
 server="$(awk '/server/ { print $2 }' /etc/kubernetes/kubelet.kubeconfig)"
 
 # certificates
-cert="$(base64 /etc/kubernetes/ssl/kubelet-client.crt | sed 's/^/      /g')"
-key="$(base64 /etc/kubernetes/ssl/kubelet-client.key | sed 's/^/      /g')"
-ca="$(base64 /etc/kubernetes/ssl/ca-kubernetes.crt | sed 's/^/      /g')"
+cert="$(base64 --wrap=0 /etc/kubernetes/ssl/kubelet-client.crt)"
+key="$(base64 --wrap=0 /etc/kubernetes/ssl/kubelet-client.key)"
+ca="$(base64 --wrap=0 /etc/kubernetes/ssl/ca-kubernetes.crt )"
 cluster="{{ common.cluster_name }}"
 
 echo "apiVersion: v1
 clusters:
 - cluster:
-    certificate-authority-data: |
-${ca}
+    certificate-authority-data: ${ca}
     server: ${server}
   name: ${cluster}
 - cluster:
@@ -34,9 +33,7 @@
 users:
 - name: admin-${cluster}
   user:
-    client-certificate-data: |
-${cert}
-    client-key-data: |
-${key}
+    client-certificate-data: ${cert}
+    client-key-data: ${key}
 kind: Config
 preferences: {}"
diff --git a/kubernetes/files/kubelet/default.master b/kubernetes/files/kubelet/default.master
index 1e30e39..7374f41 100644
--- a/kubernetes/files/kubelet/default.master
+++ b/kubernetes/files/kubelet/default.master
@@ -1,5 +1,6 @@
 {%- from "kubernetes/map.jinja" import common with context -%}
 {%- from "kubernetes/map.jinja" import master with context -%}
+{%- from "kubernetes/map.jinja" import version %}
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
@@ -12,6 +13,9 @@
 --hostname-override={{ master.host.name }} \
 --v={{ master.get('verbosity', 2) }} \
 --node-labels=node-role.kubernetes.io/master=true \
+{%- if version|float >= 1.8 %}
+--fail-swap-on={{ master.kubelet.fail_on_swap }} \
+{%- endif %}
 {%- if master.get('unschedulable', 'false') %}
 --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
 {%- endif %}
diff --git a/kubernetes/files/kubelet/default.pool b/kubernetes/files/kubelet/default.pool
index 8207a7d..5d343b8 100644
--- a/kubernetes/files/kubelet/default.pool
+++ b/kubernetes/files/kubelet/default.pool
@@ -4,16 +4,17 @@
 
 # test_args has to be kept at the end, so they'll overwrite any prior configuration
 DAEMON_ARGS="\
---require-kubeconfig \
 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
 --pod-manifest-path=/etc/kubernetes/manifests \
 --address={{ pool.kubelet.address }} \
 --allow-privileged={{ pool.kubelet.allow_privileged }} \
 --cluster_dns={{ common.addons.dns.server }} \
 --cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
---cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
 --hostname-override={{ pool.host.name }} \
 --v={{ pool.get('verbosity', 2) }} \
+{%- if version|float >= 1.8 %}
+--fail-swap-on={{ pool.kubelet.fail_on_swap }} \
+{%- endif %}
 {%- if pillar.kubernetes.master is defined %}
 --node-labels=node-role.kubernetes.io/master=true \
 {%-   if pillar.kubernetes.get('master', {}).get('unschedulable', 'false') %}
@@ -24,6 +25,8 @@
 {%- endif %}
 {%- if pool.network.engine in ['calico', 'opencontrail'] %}
 --network-plugin=cni \
+--cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
+{%- else %}
 --network-plugin-dir=/etc/cni/net.d \
 {%- endif %}
 --file-check-frequency={{ pool.kubelet.frequency }} \
diff --git a/kubernetes/files/systemd/criproxy.service b/kubernetes/files/systemd/criproxy.service
index 1c0318c..cc6ac41 100644
--- a/kubernetes/files/systemd/criproxy.service
+++ b/kubernetes/files/systemd/criproxy.service
@@ -1,3 +1,7 @@
+{%- from "kubernetes/map.jinja" import pool with context -%}
+{%- from "kubernetes/map.jinja" import master with context -%}
+{%- from "kubernetes/map.jinja" import version %}
+
 [Unit]
 Description=CRI Proxy
 After=dockershim.service
@@ -7,8 +11,18 @@
 SyslogIdentifier=criproxy
 User=root
 ExecStart=/usr/bin/criproxy -alsologtostderr \
-          -connect /var/run/dockershim.sock,virtlet:/var/run/virtlet.sock \
-          -listen /var/run/criproxy.sock
+          -connect /var/run/dockershim.sock,virtlet.cloud:/run/virtlet.sock \
+          -listen /var/run/criproxy.sock \
+          -v 3 \
+          -alsologtostderr \
+          -apiVersion {{ version }} \
+{%- if pool.get('enabled', False) %}
+          -streamUrl http://{{ pool.kubelet.address }}:11250
+{%- else %}
+{%- if master.get('enabled', False) %}
+           -streamUrl http://{{ master.kubelet.address }}:11250
+{% endif %}
+{% endif %}
 Restart=on-failure
 
 [Install]
diff --git a/kubernetes/files/systemd/dockershim.service b/kubernetes/files/systemd/dockershim.service
index eb2eb99..84b8c0c 100644
--- a/kubernetes/files/systemd/dockershim.service
+++ b/kubernetes/files/systemd/dockershim.service
@@ -10,7 +10,8 @@
 EnvironmentFile=-/etc/kubernetes/config
 EnvironmentFile=-/etc/default/%p
 User=root
-ExecStart=/usr/bin/dockershim \
+ExecStart=/usr/bin/hyperkube \
+    kubelet \
     $KUBE_LOGTOSTDERR \
     $KUBE_LOG_LEVEL \
     $KUBE_ALLOW_PRIV \
@@ -24,4 +25,5 @@
 Restart=on-failure
 
 [Install]
-WantedBy=kubelet.service
\ No newline at end of file
+WantedBy=kubelet.service
+RequiredBy=criproxy.service
\ No newline at end of file
diff --git a/kubernetes/files/systemd/kubelet.service b/kubernetes/files/systemd/kubelet.service
index c1f2522..c353ece 100644
--- a/kubernetes/files/systemd/kubelet.service
+++ b/kubernetes/files/systemd/kubelet.service
@@ -25,7 +25,8 @@
         $CADVISOR_PORT \
         $DAEMON_ARGS
 Restart=on-failure
+RestartSec=5
 
 [Install]
 WantedBy=multi-user.target
-Alias=cadvisor.service
\ No newline at end of file
+Alias=cadvisor.service
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index 282fd36..38b170e 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -176,6 +176,7 @@
         --leader-elect=true
         --root-ca-file=/etc/kubernetes/ssl/ca-{{ master.ca }}.crt
         --service-account-private-key-file=/etc/kubernetes/ssl/kubernetes-server.key
+        --use-service-account-credentials
 {%- if common.get('cloudprovider', {}).get('enabled') %}
         --cloud-provider={{ common.cloudprovider.provider }}
 {%- if common.get('cloudprovider', {}).get('provider') == 'openstack' %}
diff --git a/kubernetes/meta/fluentd.yml b/kubernetes/meta/fluentd.yml
new file mode 100644
index 0000000..6db50b3
--- /dev/null
+++ b/kubernetes/meta/fluentd.yml
@@ -0,0 +1,148 @@
+{%- if pillar.get('fluentd', {}).get('agent', {}).get('enabled', False) %}
+{%- from "kubernetes/map.jinja" import pool, master %}
+{%- if pool.get('enabled', False) %}
+{% set network = pool.get('network', []) %}
+{%- else %}
+{%- if master.get('enabled', False) %}
+{% set network = master.get('network', []) %}
+{% endif %}
+{% endif %}
+
+{%- set positiondb = pillar.fluentd.agent.dir.positiondb %}
+agent:
+  plugin:
+    fluent-plugin-kubernetes_metadata_filter:
+      gem: ['fluent-plugin-kubernetes_metadata_filter']
+  config:
+    label:
+      docker:
+        filter:
+          add_drop_tag:
+            tag: 'temp.docker.container.**'
+            type: record_transformer
+            enable_ruby: true
+            record:
+              - name: drop_event
+                value: ${ record.fetch('attrs', {}).fetch('io.kubernetes.pod.name', '') }
+      kubernetes:
+        input:
+          container:
+            type: tail
+            tag: temp.kubernetes.container.*
+            path: /var/log/containers/*.log
+            path_key: log_path
+            pos_file: {{ positiondb }}/kubernetes.pos
+            parser:
+              type: json
+              time_format: '%Y-%m-%dT%H:%M:%S.%NZ'
+              keep_time_key: false
+          {%- if network is defined and network.get('engine', None) == 'calico' %}
+          bird:
+            type: tail
+            tag: kubernetes.calico.bird
+            path: /var/log/calico/bird/current, /var/log/calico/bird6/current
+            path_key: log_path
+            pos_file: {{ positiondb }}/kubernetes.calico.bird.pos
+            parser:
+              type: regexp
+              time_format: '%Y-%m-%d_%H:%M:%S.%N'
+              time_key: Timestamp
+              keep_time_key: false
+              format: '/^(?<Timestamp>[^ ]+) (?<programname>[^ ]+): (?<Payload>.*)$/'
+          confd:
+            type: tail
+            tag: kubernetes.calico.confd
+            path: /var/log/calico/confd/current
+            path_key: log_path
+            pos_file: {{ positiondb }}/kubernetes.calico.confd.pos
+            parser:
+              type: regexp
+              time_format: '%Y-%m-%dT%H:%M:%SZ'
+              time_key: Timestamp
+              keep_time_key: false
+              format: '/^(?<Timestamp>[^ ]+) (?<Hostname>[^ ]+) (?<programname>[^ ]+)\[\d+?\]: (?<orig_severity_label>[^ ]+) (?<Payload>.*)$/'
+          felix:
+            type: tail
+            tag: kubernetes.calico.felix
+            path: /var/log/calico/felix/current
+            path_key: log_path
+            pos_file: {{ positiondb }}/kubernetes.calico.felix.pos
+            parser:
+              type: regexp
+              time_format: '%Y-%m-%d %H:%M:%S.%L'
+              time_key: Timestamp
+              keep_time_key: false
+              format: '/^(?<Timestamp>[^ ]+ [^ ]+) \[(?<orig_severity_label>[^ ]+)\]\[\d+?\] (?<Payload>.*)$/'
+          {%- endif %}
+        filter:
+          add_kubernetes_meta:
+            tag: 'temp.kubernetes.container.**'
+            type: kubernetes_metadata
+            kubernetes_url: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
+            client_cert: /etc/kubernetes/ssl/kubelet-client.crt
+            client_key: /etc/kubernetes/ssl/kubelet-client.key
+            ca_file: /etc/kubernetes/ssl/ca-kubernetes.crt
+            verify_ssl: True
+          enrich_container:
+            require:
+              - add_kubernetes_meta
+            tag: 'temp.kubernetes.container.**'
+            type: record_transformer
+            enable_ruby: true
+            record:
+              - name: severity_label
+                value: INFO
+              - name: Severity
+                value: 6
+              - name: programname
+                value: ${ record['kubernetes']['container_name'] }
+          {%- if network is defined and network.get('engine', None) == 'calico' %}
+          enrich_bird:
+            tag: 'kubernetes.calico.bird'
+            type: record_transformer
+            enable_ruby: true
+            record:
+              - name: severity_label
+                value: INFO
+              - name: Severity
+                value: 6
+              - name: programname
+                value: calico-${ record["programname"] }
+          enrich_confd:
+            tag: 'kubernetes.calico.confd'
+            type: record_transformer
+            enable_ruby: true
+            remove_keys: orig_severity_label
+            record:
+              - name: severity_label
+                value: ${ {'DEBUG'=>'DEBUG','INFO'=>'INFO','WARNING'=>'WARNING','ERROR'=>'ERROR','FATAL'=>'CRITICAL','PANIC'=>'ALERT'}[record['orig_severity_label']] }
+              - name: Severity
+                value: ${ {'DEBUG'=>7,'INFO'=>6,'WARNING'=>4,'ERROR'=>3,'FATAL'=>2,'PANIC'=>1}[record['orig_severity_label']].to_i }
+              - name: programname
+                value: calico-${ record["programname"] }
+          enrich_felix:
+            tag: 'kubernetes.calico.felix'
+            type: record_transformer
+            enable_ruby: true
+            remove_keys: orig_severity_label
+            record:
+              - name: severity_label
+                value: ${ {'DEBUG'=>'DEBUG','INFO'=>'INFO','WARNING'=>'WARNING','ERROR'=>'ERROR','FATAL'=>'CRITICAL','PANIC'=>'ALERT'}[record['orig_severity_label']] }
+              - name: Severity
+                value: ${ {'DEBUG'=>7,'INFO'=>6,'WARNING'=>4,'ERROR'=>3,'FATAL'=>2,'PANIC'=>1}[record['orig_severity_label']].to_i }
+              - name: programname
+                value: calico-felix
+          {%- endif %}
+        match:
+          cast_service_tag:
+            tag: 'temp.kubernetes.container.**'
+            type: rewrite_tag_filter
+            rule:
+              - name: log_path
+                regexp: '^.*\/(.*)\.log$'
+                result: kubernetes.container.$1
+          push_to_default:
+            tag: 'kubernetes.**'
+            type: relabel
+            label: default_output
+{%- endif %}
diff --git a/metadata/service/common.yml b/metadata/service/common.yml
index ad61b89..bedb3b3 100644
--- a/metadata/service/common.yml
+++ b/metadata/service/common.yml
@@ -58,7 +58,9 @@
         virtlet:
           enabled: False
           namespace: kube-system
-          image: mirantis/virtlet:v0.8.0
+          image: mirantis/virtlet:v0.9.2
+          criproxy_version: v0.9.2
+          criproxy_source: md5=c52d3c4e457144c6523570c847a442b2
       cloudprovider:
         enabled: False
         provider: openstack
diff --git a/metadata/service/master/cluster.yml b/metadata/service/master/cluster.yml
index 50e6d81..8088d33 100644
--- a/metadata/service/master/cluster.yml
+++ b/metadata/service/master/cluster.yml
@@ -19,6 +19,7 @@
       kubelet:
         address: ${_param:cluster_local_address}
         allow_privileged: True
+        fail_on_swap: True
       apiserver:
         address: ${_param:cluster_local_address}
         bind_address: 0.0.0.0
diff --git a/metadata/service/pool/cluster.yml b/metadata/service/pool/cluster.yml
index 826a78b..173690c 100644
--- a/metadata/service/pool/cluster.yml
+++ b/metadata/service/pool/cluster.yml
@@ -31,6 +31,7 @@
         config: /etc/kubernetes/manifests
         allow_privileged: True
         frequency: 5s
+        fail_on_swap: True
       token:
         kubelet: ${_param:kubernetes_kubelet_token}
         kube_proxy: ${_param:kubernetes_kube-proxy_token}
diff --git a/metadata/service/support.yml b/metadata/service/support.yml
index 655bb27..8bed1d0 100644
--- a/metadata/service/support.yml
+++ b/metadata/service/support.yml
@@ -1,6 +1,8 @@
 parameters:
   kubernetes:
     _support:
+      fluentd:
+        enabled: true
       prometheus:
         enabled: true
       telegraf:
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index 8c07e58..ea1a363 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -111,7 +111,7 @@
 }
 
 salt_run() {
-    [ -e ${VEN_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
+    [ -e ${VENV_DIR}/bin/activate ] && source ${VENV_DIR}/bin/activate
     salt-call ${SALT_OPTS} $*
 }