Merge "Bump metallb version to 0.7.3"
diff --git a/README.rst b/README.rst
index 22df406..5eab3ee 100644
--- a/README.rst
+++ b/README.rst
@@ -22,7 +22,7 @@
Sample Pillars
==============
-**REQUIRED:** Define image to use for hyperkube, CNIs and calicoctl image
+**REQUIRED:** Define images to use for hyperkube and Calico
.. code-block:: yaml
@@ -36,6 +36,9 @@
calico:
calicoctl_image: calico/ctl
cni_image: calico/cni
+ image: calico/node
+ kube_controllers_image: calico/kube-controllers
+
Enable helm-tiller addon
@@ -48,16 +51,17 @@
helm:
enabled: true
-Enable calico-policy addon
+Enable calico-policy
.. code-block:: yaml
parameters:
kubernetes:
- common:
- addons:
- calico_policy:
- enabled: true
+ pool:
+ network:
+ calico:
+ policy:
+ enabled: true
Enable virtlet addon
@@ -604,7 +608,7 @@
ssl:
enabled: true
-Running with calico-policy controller:
+Running with calico-policy:
.. code-block:: yaml
@@ -613,16 +617,14 @@
network:
calico:
enabled: true
- addons:
- calico_policy:
+ policy:
enabled: true
master:
network:
calico:
enabled: true
- addons:
- calico_policy:
+ policy:
enabled: true
diff --git a/kubernetes/files/calico/calico-node.service.master b/kubernetes/files/calico/calico-node.service.master
index e79fc65..7cf05b7 100644
--- a/kubernetes/files/calico/calico-node.service.master
+++ b/kubernetes/files/calico/calico-node.service.master
@@ -15,8 +15,10 @@
-e CALICO_NETWORKING_BACKEND="{{ master.network.calico.network_backend }}"
{%- endif %}
-e AS={{ master.network.calico.get('as', '64512') }} \
- -e NO_DEFAULT_MASTERS={{ master.network.calico.get('no_default_masters') }} \
- -e CALICO_LIBNETWORK_ENABLED={{ master.network.calico.get('libnetwork_enabled', true ) }} \
+ -e NO_DEFAULT_POOLS={{ master.network.calico.get('no_default_pools', false ) }} \
+ -e CALICO_STARTUP_LOGLEVEL={{ master.network.calico.get('log_level', 'INFO') }} \
+ -e CLUSTER_TYPE='k8s,bgp' \
+ -e CALICO_LIBNETWORK_ENABLED={{ master.network.calico.get('libnetwork_enabled', false ) }} \
-e ETCD_ENDPOINTS={% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %} \
{%- if master.network.calico.etcd.get('ssl', {}).get('enabled') %}
-e ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem \
@@ -26,7 +28,8 @@
{%- endif %}
{%- if master.network.calico.get('prometheus', {}).get('enabled') %}
-e FELIX_PROMETHEUSMETRICSENABLED=true \
- -p {{ pool.network.calico.prometheus.get('address', '0.0.0.0') }}:{{ master.network.calico.get('prometheus', {}).get('port', 9091) }}:9091 \
+ -e FELIX_PROMETHEUSMETRICSPORT={{ master.network.calico.prometheus.get('port', 9091) }} \
+ -p {{ master.network.calico.prometheus.get('address', '0.0.0.0') }}:{{ master.network.calico.get('prometheus', {}).get('port', 9091) }}:9091 \
{%- endif %}
-v /var/log/calico:/var/log/calico \
-v /var/lib/calico:/var/lib/calico \
@@ -38,12 +41,12 @@
-v {{ volume }} \
{%- endfor %}
{%- endif %}
- {{ master.network.calico.get('image', 'calico/node') }}
+ {{ master.network.calico.image }}
Restart=always
RestartSec=10s
-ExecStop=-/usr/bin/docker stop calico-node
+ExecStop=-/usr/bin/docker rm -f calico-node
[Install]
WantedBy=multi-user.target
diff --git a/kubernetes/files/calico/calico-node.service.pool b/kubernetes/files/calico/calico-node.service.pool
index 0797fa3..7b76921 100644
--- a/kubernetes/files/calico/calico-node.service.pool
+++ b/kubernetes/files/calico/calico-node.service.pool
@@ -12,11 +12,13 @@
-e IP={{ pool.address }} \
-e IP6={{ pool.get('ipv6_address', '') }} \
{%- if pool.network.calico.network_backend is defined %}
- -e CALICO_NETWORKING_BACKEND="{{ pool.network.calico.network_backend }}"
+ -e CALICO_NETWORKING_BACKEND="{{ pool.network.calico.network_backend }}" \
{%- endif %}
-e AS={{ pool.network.calico.get('as', '64512') }} \
- -e NO_DEFAULT_POOLS={{ pool.network.calico.get('no_default_pools') }} \
- -e CALICO_LIBNETWORK_ENABLED={{ pool.network.calico.get('libnetwork_enabled', true ) }} \
+ -e NO_DEFAULT_POOLS={{ pool.network.calico.get('no_default_pools', false ) }} \
+ -e CALICO_STARTUP_LOGLEVEL={{ pool.network.calico.get('log_level', 'INFO') }} \
+ -e CLUSTER_TYPE='k8s,bgp' \
+ -e CALICO_LIBNETWORK_ENABLED={{ pool.network.calico.get('libnetwork_enabled', false ) }} \
-e ETCD_ENDPOINTS={% for member in pool.network.calico.etcd.members %}http{% if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %} \
{%- if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}
-e ETCD_CA_CERT_FILE=/var/lib/etcd/ca.pem \
@@ -39,12 +41,12 @@
-v {{ volume }} \
{%- endfor %}
{%- endif %}
- {{ pool.network.calico.get('image', 'calico/node') }}
+ {{ pool.network.calico.image }}
Restart=always
RestartSec=10s
-ExecStop=-/usr/bin/docker stop calico-node
+ExecStop=-/usr/bin/docker rm -f calico-node
[Install]
WantedBy=multi-user.target
diff --git a/kubernetes/files/calico/calico.conf b/kubernetes/files/calico/calico.conf
index 0a231aa..29c85b7 100644
--- a/kubernetes/files/calico/calico.conf
+++ b/kubernetes/files/calico/calico.conf
@@ -3,11 +3,7 @@
{
"nodeName": "{{ pool.host.name }}{% if pool.host.get('domain') %}.{{ pool.host.domain }}{%- endif %}",
"name": "calico-k8s-network",
- {%- if pool.network.get('genie', {}).get('enabled', False) %}
"cniVersion": "0.3.0",
- {%- else %}
- "cniVersion": "0.2.0",
- {%- endif %}
"type": "calico",
"etcd_endpoints": "{% for member in pool.network.calico.etcd.members %}http{% if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}",
{%- if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}
@@ -15,7 +11,7 @@
"etcd_cert_file": "/var/lib/etcd/etcd-client.pem",
"etcd_ca_cert_file": "/var/lib/etcd/ca.pem",
{%- endif %}
-{%- if common.get('addons', {}).get('calico_policy', {}).get('enabled', False) %}
+{%- if pool.network.calico.get('policy', {}).get('enabled', False) %}
"policy": {
"type": "k8s"
},
@@ -23,7 +19,7 @@
{%- if pool.network.calico.get('mtu') %}
"mtu": {{ pool.network.calico.mtu }},
{%- endif %}
- "log_level": "info",
+ "log_level": "{{ pool.network.calico.get('log_level', 'INFO') }}",
"ipam": {
"type": "calico-ipam"
},
diff --git a/kubernetes/files/calico/calicoctl.cfg.master b/kubernetes/files/calico/calicoctl.cfg.master
index c0fd0ad..39e40d2 100644
--- a/kubernetes/files/calico/calicoctl.cfg.master
+++ b/kubernetes/files/calico/calicoctl.cfg.master
@@ -1,9 +1,9 @@
{%- from "kubernetes/map.jinja" import master with context %}
-apiVersion: v1
-kind: calicoApiConfig
+apiVersion: projectcalico.org/v3
+kind: CalicoAPIConfig
metadata:
spec:
- datastoreType: "etcdv2"
+ datastoreType: "etcdv3"
etcdEndpoints: {% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
{%- if master.network.calico.etcd.get('ssl', {}).get('enabled') %}
etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/calico/calicoctl.cfg.pool b/kubernetes/files/calico/calicoctl.cfg.pool
index 1d5f9f8..b31ba68 100644
--- a/kubernetes/files/calico/calicoctl.cfg.pool
+++ b/kubernetes/files/calico/calicoctl.cfg.pool
@@ -1,9 +1,9 @@
{%- from "kubernetes/map.jinja" import pool with context %}
-apiVersion: v1
-kind: calicoApiConfig
+apiVersion: projectcalico.org/v3
+kind: CalicoAPIConfig
metadata:
spec:
- datastoreType: "etcdv2"
+ datastoreType: "etcdv3"
etcdEndpoints: {% for member in pool.network.calico.etcd.members %}http{% if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
{%- if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}
etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
deleted file mode 100644
index 52d1b26..0000000
--- a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
+++ /dev/null
@@ -1,147 +0,0 @@
-{%- from "kubernetes/map.jinja" import common with context -%}
-{%- from "kubernetes/map.jinja" import master with context -%}
-# This manifest deploys the Calico Kubernetes controllers.
-# See https://github.com/projectcalico/kube-controllers
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: calico-kube-controllers
- namespace: {{ common.addons.calico_policy.namespace }}
- labels:
- k8s-app: calico-kube-controllers
- addonmanager.kubernetes.io/mode: Reconcile
-spec:
- # The controllers can only have a single active instance.
- replicas: 1
- selector:
- matchLabels:
- k8s-app: calico-kube-controllers
- strategy:
- type: Recreate
- template:
- metadata:
- name: calico-kube-controllers
- namespace: {{ common.addons.calico_policy.namespace }}
- labels:
- k8s-app: calico-kube-controllers
- annotations:
- # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
- # reserves resources for critical add-on pods so that they can be rescheduled after
- # a failure. This annotation works in tandem with the toleration below.
- # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
- scheduler.alpha.kubernetes.io/critical-pod: ''
-{%- if common.addons.calico_policy.cni is defined %}
- cni: {{ common.addons.calico_policy.cni }}
-{%- endif %}
- spec:
- # The controllers must run in the host network namespace so that
- # it isn't governed by policy that would prevent it from working.
- hostNetwork: true
- tolerations:
- # this taint is set by all kubelets running `--cloud-provider=external`
- # so we should tolerate it to schedule the calico pods
- - key: node.cloudprovider.kubernetes.io/uninitialized
- value: "true"
- effect: NoSchedule
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
- # This, along with the annotation above marks this pod as a critical add-on.
- # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
- - key: CriticalAddonsOnly
- operator: Exists
- serviceAccountName: calico-kube-controllers
- containers:
- - name: calico-kube-controllers
- image: {{ common.addons.calico_policy.image }}
- imagePullPolicy: IfNotPresent
- resources:
- limits:
- cpu: 100m
- memory: 256M
- requests:
- cpu: 30m
- memory: 64M
- env:
- # The list of etcd nodes in the cluster.
- - name: ETCD_ENDPOINTS
- value: "{% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}"
- # CA certificate, client certificate, client key files for accessing the etcd cluster.
- - name: ETCD_CA_CERT_FILE
- value: "/var/lib/etcd/ca.pem"
- - name: ETCD_CERT_FILE
- value: "/var/lib/etcd/etcd-client.pem"
- - name: ETCD_KEY_FILE
- value: "/var/lib/etcd/etcd-client.pem"
- # Which controllers to run.
- - name: ENABLED_CONTROLLERS
- value: "policy,profile,workloadendpoint,node"
- # Minimum log level to be displayed.
- - name: LOG_LEVEL
- value: "info"
- # Period to perform reconciliation with the Calico datastore. Default is 5m.
- - name: RECONCILER_PERIOD
- value: "1m"
- volumeMounts:
- - mountPath: /var/lib/etcd/
- name: etcd-certs
- readOnly: true
- volumes:
- - hostPath:
- path: /var/lib/etcd
- name: etcd-certs
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
- name: calico-kube-controllers
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: calico-kube-controllers
-subjects:
-- kind: ServiceAccount
- name: calico-kube-controllers
- namespace: {{ common.addons.calico_policy.namespace }}
-
----
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: calico-kube-controllers
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
- - apiGroups:
- - ""
- - extensions
- resources:
- - pods
- - namespaces
- - networkpolicies
- - nodes
- verbs:
- - watch
- - list
- - apiGroups:
- - networking.k8s.io
- resources:
- - networkpolicies
- verbs:
- - watch
- - list
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: calico-kube-controllers
- namespace: {{ common.addons.calico_policy.namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/calico/calico-kube-controllers.yml b/kubernetes/files/kube-addons/calico/calico-kube-controllers.yml
new file mode 100644
index 0000000..cf90c9e
--- /dev/null
+++ b/kubernetes/files/kube-addons/calico/calico-kube-controllers.yml
@@ -0,0 +1,90 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+# Calico Version v3.1.3
+# https://docs.projectcalico.org/v3.1/releases#v3.1.3
+# This manifest includes the following component versions:
+# calico/kube-controllers:v3.1.3
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ addonmanager.kubernetes.io/mode: Reconcile
+spec:
+ # Only a single instance of the this pod should be
+ # active at a time. Since this pod is run as a Deployment,
+ # Kubernetes will ensure the pod is recreated in case of failure,
+ # removing the need for passive backups.
+ replicas: 1
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ spec:
+ # The controllers must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-kube-controllers
+ # Make sure to pin this to your desired version.
+ image: {{ master.network.calico.kube_controllers_image }}
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256M
+ requests:
+ cpu: 30m
+ memory: 64M
+ env:
+ # The list of etcd nodes in the cluster.
+ - name: ETCD_ENDPOINTS
+ value: "{% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}"
+{%- if master.network.calico.etcd.get('ssl', {}).get('enabled') %}
+ # CA certificate, client certificate, client key files for accessing the etcd cluster.
+ - name: ETCD_CA_CERT_FILE
+ value: "/var/lib/etcd/ca.pem"
+ - name: ETCD_CERT_FILE
+ value: "/var/lib/etcd/etcd-client.pem"
+ - name: ETCD_KEY_FILE
+ value: "/var/lib/etcd/etcd-client.pem"
+{%- endif %}
+ # Which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: "policy,profile,workloadendpoint,node"
+ # Minimum log level to be displayed.
+ - name: LOG_LEVEL
+ value: "info"
+ # Period to perform reconciliation with the Calico datastore. Default is 5m.
+ - name: RECONCILER_PERIOD
+ value: "1m"
+ volumeMounts:
+ - mountPath: /var/lib/etcd/
+ name: etcd-certs
+ readOnly: true
+ volumes:
+ - hostPath:
+ path: /var/lib/etcd
+ name: etcd-certs
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/calico/calico-rbac.yml b/kubernetes/files/kube-addons/calico/calico-rbac.yml
new file mode 100644
index 0000000..ea3de58
--- /dev/null
+++ b/kubernetes/files/kube-addons/calico/calico-rbac.yml
@@ -0,0 +1,78 @@
+# Calico Version v3.1.3
+# https://docs.projectcalico.org/v3.1/releases#v3.1.3
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - nodes
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
diff --git a/kubernetes/files/kube-addons/coredns/coredns-cm.yml b/kubernetes/files/kube-addons/coredns/coredns-cm.yml
index 54de711..4fed36c 100644
--- a/kubernetes/files/kube-addons/coredns/coredns-cm.yml
+++ b/kubernetes/files/kube-addons/coredns/coredns-cm.yml
@@ -30,4 +30,11 @@
health
proxy . /etc/resolv.conf
cache 30
+ reload
+ loadbalance
+ kubernetes {{ common.addons.coredns.domain|replace('_', '-') }}. in-addr.arpa ip6.arpa {
+ pods insecure
+ upstream
+ fallthrough in-addr.arpa ip6.arpa
+ }
}
diff --git a/kubernetes/files/kube-addons/coredns/coredns-deploy.yml b/kubernetes/files/kube-addons/coredns/coredns-deploy.yml
index 065b48c..6696f08 100644
--- a/kubernetes/files/kube-addons/coredns/coredns-deploy.yml
+++ b/kubernetes/files/kube-addons/coredns/coredns-deploy.yml
@@ -11,32 +11,40 @@
kubernetes.io/name: "CoreDNS"
addonmanager.kubernetes.io/mode: Reconcile
spec:
- replicas: 1
+ replicas: 2
selector:
matchLabels:
k8s-app: coredns
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
template:
metadata:
labels:
k8s-app: coredns
- annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
- scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
-{%- if common.addons.coredns.cni is defined %}
- cni: {{ common.addons.coredns.cni }}
-{%- endif %}
spec:
+ serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
containers:
- name: coredns
image: {{ common.addons.coredns.image }}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
+ resources:
+ limits:
+ memory: 170Mi
+ requests:
+ cpu: 100m
+ memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
+ readOnly: true
ports:
- containerPort: 53
name: dns
@@ -44,6 +52,17 @@
- containerPort: 53
name: dns-tcp
protocol: TCP
+ - containerPort: 9153
+ name: metrics
+ protocol: TCP
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ add:
+ - NET_BIND_SERVICE
+ drop:
+ - all
+ readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
@@ -53,7 +72,7 @@
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
- dnsPolicy: ClusterFirst
+ dnsPolicy: Default
volumes:
- name: config-volume
configMap:
diff --git a/kubernetes/files/kube-addons/coredns/coredns-rbac.yml b/kubernetes/files/kube-addons/coredns/coredns-rbac.yml
new file mode 100644
index 0000000..19262c3
--- /dev/null
+++ b/kubernetes/files/kube-addons/coredns/coredns-rbac.yml
@@ -0,0 +1,46 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: coredns
+ namespace: {{ common.addons.coredns.namespace }}
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ addonmanager.kubernetes.io/mode: Reconcile
+ name: system:coredns
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - pods
+ - namespaces
+ verbs:
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ addonmanager.kubernetes.io/mode: Reconcile
+ name: system:coredns
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:coredns
+subjects:
+- kind: ServiceAccount
+ name: coredns
+ namespace: kube-system
diff --git a/kubernetes/files/kube-addons/coredns/coredns-svc.yml b/kubernetes/files/kube-addons/coredns/coredns-svc.yml
index be49e94..0ea083c 100644
--- a/kubernetes/files/kube-addons/coredns/coredns-svc.yml
+++ b/kubernetes/files/kube-addons/coredns/coredns-svc.yml
@@ -8,11 +8,12 @@
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "coredns"
+ kubernetes.io/name: "CoreDNS"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: coredns
+ clusterIP: {{ common.addons.coredns.server }}
ports:
- name: dns
port: 53
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
index 7cb8a38..03bd870 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
@@ -4,7 +4,7 @@
kind: Deployment
metadata:
name: fluentd-aggregator
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
k8s-app: fluentd-aggregator
version: v1
@@ -30,20 +30,18 @@
effect: NoSchedule
containers:
- name: fluentd-aggregator
- image: fluent/fluentd-kubernetes-daemonset:v1.2-debian-elasticsearch
+ image: {{ common.addons.fluentd.aggregator.get('image', 'fluent/fluentd-kubernetes-daemonset:v1.2-debian-elasticsearch') }}
env:
- name: FLUENTD_ELASTICSEARCH_HOST
- value: "{{ common.addons.fluentd.aggregator.es.get('host', '127.0.0.1') }}"
+ value: "{{ common.addons.fluentd.aggregator.config.output.es.get('host', '127.0.0.1') }}"
- name: FLUENTD_ELASTICSEARCH_PORT
- value: "{{ common.addons.fluentd.aggregator.es.get('port', '9200') }}"
+ value: "{{ common.addons.fluentd.aggregator.config.output.es.get('port', '9200') }}"
- name: FLUENTD_ELASTICSEARCH_SCHEME
- value: "{{ common.addons.fluentd.aggregator.es.get('scheme', 'http') }}"
+ value: "{{ common.addons.fluentd.aggregator.config.output.es.get('scheme', 'http') }}"
- name: FLUENTD_AGGREGATOR_BIND_PORT
- value: "{{ common.addons.fluentd.aggregator.bind.get('port', '24224') }}"
+ value: "{{ common.addons.fluentd.aggregator.config.forward_input.bind.get('port', '24224') }}"
- name: ENVIRONMENT_LABEL
value: "{{ grains.domain }}"
- # - name: FLUENTD_OPT
- # value: "--use-v1-config"
# TODO: a hack to pass the broken entrypoint in upstream docker image for k8s fluent when configmap is used
- name: FLUENT_ELASTICSEARCH_USER
value: "null"
@@ -51,11 +49,11 @@
value: "null"
resources:
limits:
- memory: 500Mi
+ memory: {{ common.addons.fluentd.aggregator.resources.limits.get('memory', '500Mi') }}
requests:
- memory: 500Mi
+ memory: {{ common.addons.fluentd.aggregator.resources.requests.get('memory', '500Mi') }}
ports:
- - containerPort: {{ common.addons.fluentd.aggregator.bind.get('port', '24224') }}
+ - containerPort: {{ common.addons.fluentd.aggregator.config.forward_input.bind.get('port', '24224') }}
name: main-input
protocol: TCP
- containerPort: 9880
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-fluent-conf.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-fluent-conf.yaml
index 34fb625..d76f290 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-fluent-conf.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-fluent-conf.yaml
@@ -1,9 +1,10 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-aggregator-cfg
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
@@ -37,7 +38,7 @@
Type log
environment_label "#{ENV['ENVIRONMENT_LABEL']}"
Hostname ${ hostname }
- Timestamp ${ time.strftime('%Y-%m-%dT%H:%M:%S.%N%z') }
+ Timestamp ${ time.strftime("{{ common.addons.fluentd.aggregator.config.general.get('time_format', '%Y-%m-%dT%H:%M:%S.%N%z') }}") }
</record>
</filter>
@@ -47,33 +48,52 @@
merge_json_log true
preserve_json_log true
</filter>
- <filter temp.kubernetes.**>
+ <match temp.kubernetes.container>
+ @type rewrite_tag_filter
+ <rule>
+ key
+ pattern ^(.*)$
+ tag __TAG__.$1
+ </rule>
+ </match>
+ <match temp.kubernetes.service>
+ @type rewrite_tag_filter
+ <rule>
+ key service
+ pattern (.*)
+ tag __TAG__.$1
+ </rule>
+ </match>
+ <filter temp.kubernetes.service.**>
@type record_transformer
- enable_ruby true
- remove_keys log
+ enable_ruby
+ remove_keys severity
<record>
- kubernetes_namespace_container_name ${record["kubernetes"]["namespace_name"]}.${record["kubernetes"]["container_name"]}
- Payload ${record['log']}
+ severity_label ${ {"ERROR"=>"E","INFO"=>"I","WARNING"=>"W","NOTICE"=>"N"}.key(record["severity"]) }
+ Severity ${ {3=>"E",4=>"W",5=>"N",6=>"I",}.key(record["severity"]) }
+ programname ${ record['service'] }
+ service ${record['service']}.service
</record>
</filter>
- <filter temp.kubernetes.kube-system.**>
- @type parser
- format kubernetes
- reserve_data true
- key_name log
- suppress_parse_error_log true
- </filter>
<filter temp.kubernetes.container.**>
@type record_transformer
enable_ruby
<record>
- severity_label INFO
- Severity 6
+ severity_label ${ {"ERROR"=>"stderr","INFO"=>"stdout"}.key(record["stream"]) }
+ Severity ${ {3=>"stderr",6=>"stdout"}.key(record["stream"]) }
programname ${ record['kubernetes']['container_name'] }
+ kubernetes_namespace_container_name ${record["kubernetes"]["namespace_name"]}.${record["kubernetes"]["container_name"]}
</record>
</filter>
systemd-filter.conf: |
+ <filter systemd.source>
+ @type record_transformer
+ enable_ruby
+ <record>
+ severity_label ${ {"TRACE"=>8,"DEBUG"=>7,"INFO"=>6,"NOTICE"=>5,"WARNING"=>4,"ERROR"=>3,"CRITICAL"=>2,"ALERT"=>1,"EMERGENCY"=>0}.key(record["Severity"].to_i) }
+ </record>
+ </filter>
<match systemd.source>
@type rewrite_tag_filter
<rule>
@@ -82,19 +102,9 @@
tag __TAG__.$1
</rule>
</match>
- <filter systemd.source.kubelet>
- type parser
- format kubernetes
- reserve_data true
- key_name MESSAGE
- suppress_parse_error_log true
- </filter>
- <filter systemd.source.docker>
- type parser
- format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
- reserve_data true
- key_name MESSAGE
- suppress_parse_error_log true
+ <filter systemd.source.**>
+ @type record_transformer
+ remove_keys message, severity
</filter>
output.conf: |
@@ -106,27 +116,36 @@
tag kubernetes.container.$1
</rule>
</match>
+ <match temp.kubernetes.service.**>
+ @type rewrite_tag_filter
+ <rule>
+ key programname
+ pattern (.*)
+ tag kubernetes.service.$1
+ </rule>
+ </match>
<match **>
@type elasticsearch
- log_level debug
+ @log_level {{ common.addons.fluentd.aggregator.config.output.get('log_level', 'info') }}
host "#{ENV['FLUENTD_ELASTICSEARCH_HOST']}"
port "#{ENV['FLUENTD_ELASTICSEARCH_PORT']}"
scheme "#{ENV['FLUENTD_ELASTICSEARCH_SCHEME'] || 'http'}"
ssl_verify "#{ENV['FLUENTD_ELASTICSEARCH_SSL_VERIFY'] || 'true'}"
reload_connections "#{ENV['FLUENTD_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'true'}"
type_name message
+ tag_key Logger
include_tag_key true
time_key Timestamp
time_key_exclude_timestamp true
- logstash_format true
- logstash_prefix k8s
- logstash_dateformat %Y.%m.%d
- request_timeout 10s
- buffer_chunk_limit 2M
- buffer_queue_limit 32
- flush_interval 10s
- max_retry_wait 30
+ logstash_format {{ common.addons.fluentd.aggregator.config.output.get('logstash_format', 'true') | lower }}
+ logstash_prefix {{ common.addons.fluentd.aggregator.config.output.get('logstash_prefix', 'log') }}
+ logstash_dateformat {{ common.addons.fluentd.aggregator.config.output.get('logstash_dateformat', '%Y.%m.%d') }}
+ request_timeout {{ common.addons.fluentd.aggregator.config.output.get('request_timeout', '10s') }}
+ buffer_chunk_limit {{ common.addons.fluentd.aggregator.config.output.get('buffer_chunk_limit', '2m') }}
+ buffer_queue_limit {{ common.addons.fluentd.aggregator.config.output.get('buffer_queue_limit', '32') }}
+ flush_interval {{ common.addons.fluentd.aggregator.config.output.get('flush_interval', '10s') }}
+ max_retry_wait {{ common.addons.fluentd.aggregator.config.output.get('max_retry_wait', '30') }}
disable_retry_limit
- num_threads 8
+ num_threads {{ common.addons.fluentd.aggregator.config.output.get('num_threads', '8') }}
</match>
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-svc.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-svc.yaml
index 7c58fd5..f6569b5 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-svc.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-svc.yaml
@@ -4,7 +4,7 @@
kind: Service
metadata:
name: fluentd-aggregator
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
k8s-app: fluentd-aggregator
kubernetes.io/cluster-service: "true"
@@ -15,5 +15,5 @@
k8s-app: fluentd-aggregator
ports:
- name: fluentd-aggregator
- port: {{ common.addons.fluentd.aggregator.bind.get('port', '24224') }}
+ port: {{ common.addons.fluentd.aggregator.config.forward_input.bind.get('port', '24224') }}
protocol: TCP
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-logger-ds.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-logger-ds.yaml
index a5c5764..685ade4 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-logger-ds.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-logger-ds.yaml
@@ -4,7 +4,7 @@
kind: DaemonSet
metadata:
name: fluentd-logger
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
k8s-app: fluentd-logger
version: v1
@@ -30,17 +30,17 @@
- key: node-role.kubernetes.io/master
containers:
- name: fluentd-logger
- image: fluent/fluentd-kubernetes-daemonset:v1.2-debian-stackdriver
+ image: {{ common.addons.fluentd.logger.get('image', 'fluent/fluentd-kubernetes-daemonset:v1.2-debian-stackdriver') }}
env:
- name: FLUENTD_AGGREGATOR_HOST
value: "fluentd-aggregator"
- name: FLUENTD_AGGREGATOR_PORT
- value: "{{ common.addons.fluentd.aggregator.bind.get('port', '24224') }}"
+ value: "{{ common.addons.fluentd.aggregator.config.forward_input.bind.get('port', '24224') }}"
resources:
limits:
- memory: 500Mi
+ memory: {{ common.addons.fluentd.logger.resources.limits.get('memory', '500Mi') }}
requests:
- memory: 500Mi
+ memory: {{ common.addons.fluentd.logger.resources.requests.get('memory', '500Mi') }}
volumeMounts:
- name: varlog
mountPath: /var/log
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-logger-fluent-conf.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-logger-fluent-conf.yaml
index 3003504..621c0f1 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-logger-fluent-conf.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-logger-fluent-conf.yaml
@@ -1,9 +1,10 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-logger-cfg
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
@@ -28,16 +29,32 @@
@type tail
path /var/log/containers/*.log
pos_file /var/log/fluentd-containers.log.pos
- time_format %Y-%m-%dT%H:%M:%S.%NZ
+ time_format {{ common.addons.fluentd.logger.config.kubernetes_input.get('time_format') }}
tag temp.kubernetes.container.*
format json
read_from_head true
path_key log_path
</source>
+ <source>
+ @type tail
+ path /var/log/kubernetes.log
+ pos_file /var/log/fluentd-kubernetes.log.pos
+ time_format {{ common.addons.fluentd.logger.config.kubernetes_input.get('time_format') }}
+ tag temp.kubernetes.service
+ format json
+ <parse>
+ @type regexp
+ expression /^(?<service>[^\[]*)\[.*\]:\s(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
+ time_key time
+ </parse>
+ read_from_head true
+ path_key log_path
+ </source>
systemd-input.conf: |
<source>
@type systemd
+ matches [{"_SYSTEMD_UNIT": "docker.service"}]
path /run/log/journal
pos_file /var/log/fluentd-journald-systemd.pos
tag systemd.source
@@ -52,21 +69,21 @@
forward-output.conf: |
<match **>
@type forward
- require_ack_response true
- ack_response_timeout 30
- recover_wait 10s
- heartbeat_interval 1s
- phi_threshold 16
- send_timeout 10s
- hard_timeout 10s
- expire_dns_cache 15
- heartbeat_type tcp
- buffer_chunk_limit 2M
- buffer_queue_limit 32
- flush_interval 5s
- max_retry_wait 15
+ require_ack_response {{ common.addons.fluentd.logger.config.forward_output.get('require_ack_response', 'true') | lower }}
+ ack_response_timeout {{ common.addons.fluentd.logger.config.forward_output.get('ack_response_timeout', '30') }}
+ recover_wait {{ common.addons.fluentd.logger.config.forward_output.get('recover_wait', '10s') }}
+ heartbeat_interval {{ common.addons.fluentd.logger.config.forward_output.get('heartbeat_interval', '1s') }}
+ phi_threshold {{ common.addons.fluentd.logger.config.forward_output.get('phi_threshold', '16') }}
+ send_timeout {{ common.addons.fluentd.logger.config.forward_output.get('send_timeout', '10s') }}
+ hard_timeout {{ common.addons.fluentd.logger.config.forward_output.get('hard_timeout', '10s') }}
+ expire_dns_cache {{ common.addons.fluentd.logger.config.forward_output.get('expire_dns_cache', '15') }}
+ heartbeat_type {{ common.addons.fluentd.logger.config.forward_output.get('heartbeat_type', 'tcp') }}
+ buffer_chunk_limit {{ common.addons.fluentd.logger.config.forward_output.get('buffer_chunk_limit', '2M') }}
+ buffer_queue_limit {{ common.addons.fluentd.logger.config.forward_output.get('buffer_queue_limit', '32') }}
+ flush_interval {{ common.addons.fluentd.logger.config.forward_output.get('flush_interval', '5s') }}
+ max_retry_wait {{ common.addons.fluentd.logger.config.forward_output.get('max_retry_wait', '15') }}
disable_retry_limit
- num_threads 8
+ num_threads {{ common.addons.fluentd.logger.config.forward_output.get('num_threads', '8') }}
<server>
name fluentd-aggregator
host fluentd-aggregator
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-ns.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-ns.yaml
index 1d454eb..5c6a50d 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-ns.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-ns.yaml
@@ -1,8 +1,9 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
---
kind: Namespace
apiVersion: v1
metadata:
- name: stacklight
+ name: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
labels:
k8s-app: fluentd
addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-sa.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-sa.yaml
index 5d0b262..d3ce2ef 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-sa.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-sa.yaml
@@ -1,3 +1,4 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
---
apiVersion: v1
kind: ServiceAccount
@@ -5,7 +6,7 @@
labels:
addonmanager.kubernetes.io/mode: Reconcile
name: fluentd
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
@@ -39,4 +40,4 @@
subjects:
- kind: ServiceAccount
name: fluentd
- namespace: stacklight
+ namespace: {{ common.addons.fluentd.get('namespace', 'stacklight') }}
diff --git a/kubernetes/files/kube-addons/telegraf/telegraf-conf.yaml b/kubernetes/files/kube-addons/telegraf/telegraf-conf.yaml
new file mode 100644
index 0000000..4e33a7e
--- /dev/null
+++ b/kubernetes/files/kube-addons/telegraf/telegraf-conf.yaml
@@ -0,0 +1,82 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: telegraf-cfg
+ namespace: {{ common.addons.telegraf.get('namespace', 'stacklight') }}
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+data:
+ telegraf.conf: |+
+ [global_tags]
+
+ {%- if common.addons.telegraf.global_tags is defined %}
+ {%- for tag_name, tag_value in common.addons.telegraf.global_tags.iteritems() %}
+ {{ tag_name }} = "{{ tag_value }}"
+ {%- endfor %}
+ {%- endif %}
+
+ [agent]
+ {%- if common.addons.telegraf.agent.interval is defined %}
+ interval = "{{ common.addons.telegraf.agent.interval }}s"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.round_interval is defined %}
+ round_interval = {{ common.addons.telegraf.agent.round_interval | lower }}
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.metric_batch_size is defined %}
+ metric_batch_size = {{ common.addons.telegraf.agent.metric_batch_size }}
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.metric_buffer_limit is defined %}
+ metric_buffer_limit = {{ common.addons.telegraf.agent.metric_buffer_limit }}
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.collection_jitter is defined %}
+ collection_jitter = "{{ common.addons.telegraf.agent.collection_jitter }}s"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.flush_interval is defined %}
+ flush_interval = "{{ common.addons.telegraf.agent.flush_interval }}s"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.flush_jitter is defined %}
+ flush_jitter = "{{ common.addons.telegraf.agent.flush_jitter }}s"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.precision is defined %}
+ precision = "{{ common.addons.telegraf.agent.precision }}"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.logfile is defined %}
+ logfile = "{{ common.addons.telegraf.agent.logfile }}"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.debug is defined %}
+ debug = {{ common.addons.telegraf.agent.debug | lower }}
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.quiet is defined %}
+ quiet = {{ common.addons.telegraf.agent.quiet | lower }}
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.hostname is defined %}
+ hostname = "{{ common.addons.telegraf.agent.hostname }}"
+ {%- endif %}
+
+ {%- if common.addons.telegraf.agent.omit_hostname is defined %}
+ omit_hostname = {{ common.addons.telegraf.agent.omit_hostname | lower }}
+ {%- endif %}
+
+ [[inputs.disk]]
+ ignore_fs = ["tmpfs", "devtmpfs"]
+ [[inputs.diskio]]
+ [[inputs.kernel]]
+ [[inputs.mem]]
+ [[inputs.processes]]
+ [[inputs.swap]]
+ [[inputs.system]]
+ [[outputs.prometheus_client]]
+ listen = "0.0.0.0:9126"
diff --git a/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml b/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml
new file mode 100644
index 0000000..3614124
--- /dev/null
+++ b/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml
@@ -0,0 +1,76 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: telegraf
+ namespace: {{ common.addons.telegraf.get('namespace', 'stacklight') }}
+ labels:
+ k8s-app: telegraf
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ beta.kubernetes.io/telegraf-ds-ready: "true"
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: telegraf
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
+ spec:
+ priorityClassName: system-node-critical
+ serviceAccountName: telegraf
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ containers:
+ - name: telegraf
+ image: {{ common.addons.telegraf.image }}
+ env:
+ - name: HOSTNAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: "HOST_PROC"
+ value: "/rootfs/proc"
+ - name: "HOST_SYS"
+ value: "/rootfs/sys"
+ resources:
+ limits:
+ memory: {{ common.addons.telegraf.resources.limits.memory }}
+ requests:
+ memory: {{ common.addons.telegraf.resources.requests.memory }}
+ volumeMounts:
+ - name: sys
+ mountPath: /rootfs/sys
+ readOnly: true
+ - name: docker-socket
+ mountPath: /var/run/docker.sock
+ readOnly: true
+ - name: proc
+ mountPath: /rootfs/proc
+ readOnly: true
+ - name: utmp
+ mountPath: /var/run/utmp
+ readOnly: true
+ - name: telegraf-cfg
+ mountPath: /etc/telegraf
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: sys
+ hostPath:
+ path: /sys
+ - name: docker-socket
+ hostPath:
+ path: /var/run/docker.sock
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: utmp
+ hostPath:
+ path: /var/run/utmp
+ - name: telegraf-cfg
+ configMap:
+ name: telegraf-cfg
diff --git a/kubernetes/files/kube-addons/telegraf/telegraf-ns.yaml b/kubernetes/files/kube-addons/telegraf/telegraf-ns.yaml
new file mode 100644
index 0000000..473feab
--- /dev/null
+++ b/kubernetes/files/kube-addons/telegraf/telegraf-ns.yaml
@@ -0,0 +1,9 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+kind: Namespace
+apiVersion: v1
+metadata:
+ name: {{ common.addons.telegraf.get('namespace', 'stacklight') }}
+ labels:
+ k8s-app: telegraf
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/kubernetes/files/kube-addons/telegraf/telegraf-sa.yaml b/kubernetes/files/kube-addons/telegraf/telegraf-sa.yaml
new file mode 100644
index 0000000..e342827
--- /dev/null
+++ b/kubernetes/files/kube-addons/telegraf/telegraf-sa.yaml
@@ -0,0 +1,43 @@
+{%- from "kubernetes/map.jinja" import common with context -%}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ name: telegraf
+ namespace: {{ common.addons.telegraf.get('namespace', 'stacklight') }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: telegraf
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ verbs:
+ - "get"
+ - "watch"
+ - "list"
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: telegraf
+ labels:
+ k8s-app: telegraf
+ kubernetes.io/cluster-service: "true"
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ kind: ClusterRole
+ name: telegraf
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: telegraf
+ namespace: {{ common.addons.telegraf.get('namespace', 'stacklight') }}
diff --git a/kubernetes/master/kube-addons.sls b/kubernetes/master/kube-addons.sls
index 86a6c4f..5bc5acc 100644
--- a/kubernetes/master/kube-addons.sls
+++ b/kubernetes/master/kube-addons.sls
@@ -77,10 +77,18 @@
{% endif %}
-{%- if common.addons.get('calico_policy', {}).get('enabled', False) and master.network.get('calico', {}).get('enabled', False) %}
-/etc/kubernetes/addons/calico_policy/calico-policy-controller.yml:
+{%- if master.network.get('calico', {}).get('enabled', False) %}
+/etc/kubernetes/addons/calico/calico-kube-controllers.yml:
file.managed:
- - source: salt://kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
+ - source: salt://kubernetes/files/kube-addons/calico/calico-kube-controllers.yml
+ - template: jinja
+ - group: root
+ - dir_mode: 755
+ - makedirs: True
+
+/etc/kubernetes/addons/calico/calico-rbac.yml:
+ file.managed:
+ - source: salt://kubernetes/files/kube-addons/calico/calico-rbac.yml
- template: jinja
- group: root
- dir_mode: 755
@@ -238,7 +246,11 @@
{% endif %}
-{%- if common.addons.coredns.enabled or master.federation.enabled %}
+{% endif %}
+
+{%- if common.addons.get('coredns', {}).get('enabled') %}
+
+{%- if master.get('federation', {}).get('enabled') or (common.addons.get('externaldns', {}).get('enabled') and common.addons.get('externaldns', {}).get('provider') == "coredns") %}
/etc/kubernetes/addons/coredns/coredns-etcd-operator-deployment.yaml:
file.managed:
- source: salt://kubernetes/files/kube-addons/coredns/coredns-etcd-operator-deployment.yaml
@@ -254,6 +266,7 @@
- group: root
- dir_mode: 755
- makedirs: True
+{% endif %}
/etc/kubernetes/addons/coredns/coredns-cm.yml:
file.managed:
@@ -278,8 +291,14 @@
- group: root
- dir_mode: 755
- makedirs: True
-{% endif %}
+/etc/kubernetes/addons/coredns/coredns-rbac.yml:
+ file.managed:
+ - source: salt://kubernetes/files/kube-addons/coredns/coredns-rbac.yml
+ - template: jinja
+ - group: root
+ - dir_mode: 755
+ - makedirs: True
{% endif %}
{%- if common.addons.get('externaldns', {}).get('enabled') %}
@@ -367,6 +386,21 @@
{% endif %}
+{%- if common.addons.get('telegraf', {}).get('enabled') %}
+{%- set telegraf_resources = ['conf', 'ns', 'sa', 'ds'] %}
+
+{%- for resource in telegraf_resources %}
+/etc/kubernetes/addons/telegraf/telegraf-{{ resource }}.yaml:
+ file.managed:
+ - source: salt://kubernetes/files/kube-addons/telegraf/telegraf-{{ resource }}.yaml
+ - template: jinja
+ - group: root
+ - dir_mode: 755
+ - makedirs: True
+{%- endfor %}
+
+{% endif %}
+
{%- if common.addons.get('dashboard', {'enabled': False}).enabled %}
{%- set dashboard_resources = ['deployment', 'secret', 'service', 'serviceaccount'] %}
diff --git a/kubernetes/pool/calico.sls b/kubernetes/pool/calico.sls
index ef47e2a..30f4956 100644
--- a/kubernetes/pool/calico.sls
+++ b/kubernetes/pool/calico.sls
@@ -29,7 +29,7 @@
copy-calico-node:
cmd.run:
- - name: docker run --rm -v /tmp/calico/:/tmp/calico/ --entrypoint cp {{ pool.network.calico.get('image', 'calico/node') }} -v /bin/birdcl /tmp/calico/
+ - name: docker run --rm -v /tmp/calico/:/tmp/calico/ --entrypoint cp {{ pool.network.calico.image }} -v /bin/birdcl /tmp/calico/
- require:
- file: /tmp/calico/
{%- if grains.get('noservices') %}
diff --git a/metadata/service/common.yml b/metadata/service/common.yml
index 0fa49df..5cdb2a0 100644
--- a/metadata/service/common.yml
+++ b/metadata/service/common.yml
@@ -33,10 +33,6 @@
port: 80
agent_image: mirantis/k8s-netchecker-agent:stable
server_image: mirantis/k8s-netchecker-server:stable
- calico_policy:
- enabled: False
- image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
- namespace: kube-system
contrail_network_controller:
enabled: False
namespace: kube-system
@@ -45,6 +41,8 @@
enabled: False
namespace: kube-system
image: coredns/coredns:latest
+ domain: cluster.local
+ server: 10.254.0.10
etcd:
operator_image: quay.io/coreos/etcd-operator:v0.5.2
version: 3.1.8
diff --git a/metadata/service/master/single.yml b/metadata/service/master/single.yml
index 223b4f0..f00bd2b 100644
--- a/metadata/service/master/single.yml
+++ b/metadata/service/master/single.yml
@@ -51,10 +51,6 @@
helm:
enabled: False
tiller_image: gcr.io/kubernetes-helm/tiller:v2.4.2
- calico_policy:
- enabled: False
- image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
- namespace: kube-system
contrail_network_controller:
enabled: False
namespace: kube-system
diff --git a/tests/pillar/common_storageclass.sls b/tests/pillar/common_storageclass.sls
index 11bae00..5cb7e30 100644
--- a/tests/pillar/common_storageclass.sls
+++ b/tests/pillar/common_storageclass.sls
@@ -19,6 +19,8 @@
enabled: true
calicoctl_image: calico/ctl
cni_image: calico/cni
+ image: calico/node
+ kube_controllers_image: calico/kube-controllers
etcd:
members:
- host: 127.0.0.1
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 1093758..a61ce7e 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -30,10 +30,6 @@
server_image: image
agent_image: image
agent_probeurls: "http://ipinfo.io"
- calico_policy:
- enabled: true
- namespace: kube-system
- image: image
virtlet:
enabled: true
namespace: kube-system
@@ -69,6 +65,8 @@
enabled: true
calicoctl_image: calico/ctl
cni_image: calico/cni
+ image: calico/node
+ kube_controllers_image: calico/kube-controllers
etcd:
members:
- host: 127.0.0.1
diff --git a/tests/pillar/master_contrail.sls b/tests/pillar/master_contrail.sls
index 8affa7d..d506d2f 100644
--- a/tests/pillar/master_contrail.sls
+++ b/tests/pillar/master_contrail.sls
@@ -30,10 +30,6 @@
server_image: image
agent_image: image
agent_probeurls: "http://ipinfo.io"
- calico_policy:
- enabled: true
- namespace: kube-system
- image: image
virtlet:
enabled: true
namespace: kube-system
diff --git a/tests/pillar/master_contrail4_0.sls b/tests/pillar/master_contrail4_0.sls
index 4b7bb31..60b6cb0 100644
--- a/tests/pillar/master_contrail4_0.sls
+++ b/tests/pillar/master_contrail4_0.sls
@@ -30,10 +30,6 @@
server_image: image
agent_image: image
agent_probeurls: "http://ipinfo.io"
- calico_policy:
- enabled: true
- namespace: kube-system
- image: image
virtlet:
enabled: true
namespace: kube-system
diff --git a/tests/pillar/pool_cluster.sls b/tests/pillar/pool_cluster.sls
index 1b7d364..5f34de4 100644
--- a/tests/pillar/pool_cluster.sls
+++ b/tests/pillar/pool_cluster.sls
@@ -49,6 +49,8 @@
enabled: true
calicoctl_image: calico/ctl
cni_image: calico/cni
+ image: calico/node
+ kube_controllers_image: calico/kube-controllers
etcd:
members:
- host: 127.0.0.1
diff --git a/tests/pillar/pool_cluster_with_domain.sls b/tests/pillar/pool_cluster_with_domain.sls
index 276c80f..4bfc733 100644
--- a/tests/pillar/pool_cluster_with_domain.sls
+++ b/tests/pillar/pool_cluster_with_domain.sls
@@ -50,6 +50,8 @@
enabled: true
calicoctl_image: calico/ctl
cni_image: calico/cni
+ image: calico/node
+ kube_controllers_image: calico/kube-controllers
etcd:
members:
- host: 127.0.0.1