Merge "Add link for OpenContrail 4.0 -> 4.1"
diff --git a/kubernetes/_common.sls b/kubernetes/_common.sls
index 88a9fcd..ae8a851 100644
--- a/kubernetes/_common.sls
+++ b/kubernetes/_common.sls
@@ -192,6 +192,7 @@
file.absent
{%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %}
+{%- set cloudconfig_type = 'external' %}
/etc/kubernetes/cloud-config:
file.managed:
- source: salt://kubernetes/files/cloudprovider/cloud-config-openstack.conf
@@ -199,6 +200,21 @@
- user: root
- group: root
- mode: 600
+ - defaults:
+ cloudconfig_type: {{ cloudconfig_type }}
+
+{%- if pillar.kubernetes.master is defined %}
+{%- set cloudconfig_type = 'intree' %}
+/etc/kubernetes/cloud-config.intree:
+ file.managed:
+ - source: salt://kubernetes/files/cloudprovider/cloud-config-openstack.conf
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 600
+ - defaults:
+ cloudconfig_type: {{ cloudconfig_type }}
+{% endif %}
{% endif %}
diff --git a/kubernetes/control/endpoint.sls b/kubernetes/control/endpoint.sls
new file mode 100644
index 0000000..4bb4df3
--- /dev/null
+++ b/kubernetes/control/endpoint.sls
@@ -0,0 +1,60 @@
+{% from "kubernetes/map.jinja" import control with context %}
+include:
+ - kubernetes.control
+
+{%- for endpoint_name, endpoint in control.endpoints.items() %}
+ {%- if endpoint.get('service_enabled', false) %}
+
+/srv/kubernetes/services/{{ endpoint.cluster }}/{{ endpoint.service }}-svc.yml:
+ file.managed:
+ - source: salt://kubernetes/files/svc.yml
+ - user: root
+ - group: root
+ - template: jinja
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ service: {{ endpoint|yaml }}
+
+ {%- if endpoint.get('create', false) %}
+ {%- set service_name = endpoint.service + '-' + endpoint.role if endpoint.role is defined else endpoint.service %}
+kubernetes_service_create_{{ endpoint.service }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/services/{{ endpoint.cluster }}/{{ endpoint.service }}-svc.yml
+ - unless: kubectl get service -o=custom-columns=NAME:.metadata.name --namespace {{ endpoint.namespace }} | grep -xq {{ endpoint.service }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/services/{{ endpoint.cluster }}/{{ endpoint.service }}-svc.yml
+ {%- endif %}
+
+ {%- endif %}
+
+/srv/kubernetes/endpoints/{{ endpoint.cluster }}/{{ endpoint_name }}.yml:
+ file.managed:
+ - source: salt://kubernetes/files/endpoint.yml
+ - user: root
+ - group: root
+ - template: jinja
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ endpoint: {{ endpoint|yaml }}
+ endpoint_name: {{ endpoint_name }}
+
+ {%- if endpoint.get('create', false) %}
+kubernetes_endpoint_create_{{ endpoint_name }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/endpoints/{{ endpoint.cluster }}/{{ endpoint_name }}.yml
+ - unless: kubectl get endpoint -o=custom-columns=NAME:.metadata.name --namespace {{ endpoint.namespace }} | grep -xq {{ endpoint_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/endpoints/{{ endpoint.cluster }}/{{ endpoint_name }}.yml
+ {%- endif %}
+
+{%- endfor %}
diff --git a/kubernetes/control/init.sls b/kubernetes/control/init.sls
index be31c21..508f167 100644
--- a/kubernetes/control/init.sls
+++ b/kubernetes/control/init.sls
@@ -12,6 +12,12 @@
{%- if control.role is defined %}
- kubernetes.control.role
{%- endif %}
+ {%- if control.priorityclass is defined %}
+ - kubernetes.control.priorityclass
+ {%- endif %}
+ {%- if control.endpoints is defined %}
+ - kubernetes.control.endpoint
+ {%- endif %}
/srv/kubernetes:
file.directory:
diff --git a/kubernetes/control/priorityclass.sls b/kubernetes/control/priorityclass.sls
new file mode 100644
index 0000000..247cc4e
--- /dev/null
+++ b/kubernetes/control/priorityclass.sls
@@ -0,0 +1,35 @@
+{% from "kubernetes/map.jinja" import control with context %}
+include:
+ - kubernetes.control
+
+{%- for priorityclass_name, priorityclass in control.priorityclass.iteritems() %}
+ {%- set priorityclass_name = priorityclass.name|default(priorityclass_name) %}
+ {%- set priorityclass_value = priorityclass.priority_value %}
+ {%- set is_default_priorityclass = priorityclass.is_default|default(False) %}
+ {%- set priorityclass_description = priorityclass.description|default(priorityclass_name) %}
+
+/srv/kubernetes/priorityclasses/{{ priorityclass_name }}.yml:
+ file.managed:
+ - source: salt://kubernetes/files/priorityclass.yml
+ - template: jinja
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ priorityclass: {{ priorityclass|yaml }}
+ priorityclass_name: {{ priorityclass_name }}
+ priorityclass_value: {{ priorityclass_value }}
+ is_default_priorityclass: {{ is_default_priorityclass }}
+ priorityclass_description: {{ priorityclass_description }}
+
+kubernetes_priorityclass_create_{{ priorityclass_name }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/priorityclasses/{{ priorityclass_name }}.yml
+ - unless: kubectl get priorityclass -o=custom-columns=NAME:.metadata.name | grep -xq {{ priorityclass_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - require:
+ - file: /srv/kubernetes/priorityclasses/{{ priorityclass_name }}.yml
+
+{%- endfor %}
diff --git a/kubernetes/control/service.sls b/kubernetes/control/service.sls
index e7e9330..3e28bf7 100644
--- a/kubernetes/control/service.sls
+++ b/kubernetes/control/service.sls
@@ -3,7 +3,7 @@
- kubernetes.control
{%- for service_name, service in control.service.items() %}
- {%- if service.enabled %}
+ {%- if service.get('enabled', false) %}
/srv/kubernetes/services/{{ service.cluster }}/{{ service_name }}-svc.yml:
file.managed:
@@ -17,6 +17,19 @@
- defaults:
service: {{ service|yaml }}
+ {%- if service.get('create', false) %}
+ {%- set service_real_name = service.service + '-' + service.role if service.role is defined else service.service %}
+kubernetes_service_create_{{ service_name }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/services/{{ service.cluster }}/{{ service_name }}-svc.yml
+ - unless: kubectl get service -o=custom-columns=NAME:.metadata.name --namespace {{ service.namespace }} | grep -xq {{ service_real_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/services/{{ service.cluster }}/{{ service_name }}-svc.yml
+ {%- endif %}
+
{%- endif %}
/srv/kubernetes/{{ service.kind|lower }}/{{ service_name }}-{{ service.kind }}.yml:
@@ -31,6 +44,19 @@
- defaults:
service: {{ service|yaml }}
+ {%- if service.get('create', false) %}
+ {%- set service_real_name = service.service + '-' + service.role if service.role is defined else service.service %}
+kubernetes_{{ service.kind|lower }}_create_{{ service_name }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/{{ service.kind|lower }}/{{ service_name }}-{{ service.kind }}.yml
+ - unless: kubectl get {{ service.kind|lower }} -o=custom-columns=NAME:.metadata.name --namespace {{ service.namespace }} | grep -xq {{ service_real_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/{{ service.kind|lower }}/{{ service_name }}-{{ service.kind }}.yml
+ {%- endif %}
+
{%- endfor %}
{%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').items() %}
@@ -51,6 +77,19 @@
- defaults:
service: {{ service|yaml }}
+ {%- if service.get('create', false) %}
+ {%- set service_real_name = service.service + '-' + service.role if service.role is defined else service.service %}
+kubernetes_service_create_{{ service.service }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/services/{{ node_name }}-svc.yml
+ - unless: kubectl get service -o=custom-columns=NAME:.metadata.name --namespace {{ service.namespace }} | grep -xq {{ service_real_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/services/{{ node_name }}-svc.yml
+ {%- endif %}
+
{%- endif %}
/srv/kubernetes/{{ service.kind|lower }}/{{ node_name }}-{{ service.kind }}.yml:
file.managed:
@@ -64,6 +103,19 @@
- defaults:
service: {{ service|yaml }}
+ {%- if service.get('create', false) %}
+ {%- set service_real_name = service.service + '-' + service.role if service.role is defined else service.service %}
+kubernetes_{{ service.kind|lower }}_create_{{ service_name }}:
+ cmd.run:
+ - name: kubectl apply -f /srv/kubernetes/{{ service.kind|lower }}/{{ node_name }}-{{ service.kind }}.yml
+ - unless: kubectl get {{ service.kind|lower }} -o=custom-columns=NAME:.metadata.name --namespace {{ service.namespace }} | grep -xq {{ service_real_name }}
+ {%- if grains.get('noservices') %}
+ - onlyif: /bin/false
+ {%- endif %}
+ - watch:
+ - file: /srv/kubernetes/{{ service.kind|lower }}/{{ node_name }}-{{ service.kind }}.yml
+ {%- endif %}
+
{%- endif %}
{%- endfor %}
diff --git a/kubernetes/files/cloudprovider/cloud-config-openstack.conf b/kubernetes/files/cloudprovider/cloud-config-openstack.conf
index 92a1cdd..83bdd8d 100644
--- a/kubernetes/files/cloudprovider/cloud-config-openstack.conf
+++ b/kubernetes/files/cloudprovider/cloud-config-openstack.conf
@@ -22,15 +22,15 @@
[LoadBalancer]
-{%- if common.cloudprovider.params.subnet_id is defined %}
use-octavia=true
+{%- if common.cloudprovider.params.subnet_id is defined %}
subnet-id={{ common.cloudprovider.params.subnet_id }}
{%- endif %}
{%- if common.cloudprovider.params.lb_method is defined %}
lb-method={{ common.cloudprovider.params.lb_method }}
{%- endif %}
-{%- if common.cloudprovider.params.floating_network_id is defined %}
-floating-network-id={{ common.cloudprovider.params.floating_network_id }}
+{%- if common.cloudprovider.params.floating_net_id is defined %}
+floating-network-id={{ common.cloudprovider.params.floating_net_id }}
{%- endif %}
{%- if common.cloudprovider.params.create_monitor is defined %}
create-monitor={{ common.cloudprovider.params.create_monitor }}
@@ -46,3 +46,17 @@
[BlockStorage]
ignore-volume-az=true
+
+
+{%- if pillar.kubernetes.master is defined and cloudconfig_type == "external" %}
+[Networking]
+{%- if common.cloudprovider.params.internal_net_name is defined %}
+internal-network-name={{ common.cloudprovider.params.internal_net_name }}
+{%- endif %}
+{%- if common.cloudprovider.params.public_net_name is defined %}
+public-network-name={{ common.cloudprovider.params.public_net_name }}
+{%- endif %}
+{%- if common.cloudprovider.params.ipv6_support_disabled is defined %}
+ipv6-support-disabled={{ common.cloudprovider.params.ipv6_support_disabled }}
+{%- endif %}
+{%- endif %}
diff --git a/kubernetes/files/endpoint.yml b/kubernetes/files/endpoint.yml
new file mode 100644
index 0000000..6109bf8
--- /dev/null
+++ b/kubernetes/files/endpoint.yml
@@ -0,0 +1,18 @@
+{% from "kubernetes/map.jinja" import control with context %}
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: {{ endpoint_name }}
+ {%- if endpoint.namespace is defined %}
+ namespace: {{ endpoint.namespace }}
+ {%- endif %}
+subsets:
+{%- for subset in endpoint.subsets %}
+ - addresses:
+ - ip: {{ subset.ip }}
+ ports:
+ - port: {{ subset.port.number }}
+ {%- if subset.port.name is defined %}
+ name: {{ subset.port.name }}
+ {%- endif %}
+{%- endfor %}
diff --git a/kubernetes/files/kube-addons/alertmanager/alertmanager-deploy.yml b/kubernetes/files/kube-addons/alertmanager/alertmanager-deploy.yml
index eeb192d..7732239 100644
--- a/kubernetes/files/kube-addons/alertmanager/alertmanager-deploy.yml
+++ b/kubernetes/files/kube-addons/alertmanager/alertmanager-deploy.yml
@@ -19,10 +19,10 @@
labels:
k8s-app: alertmanager
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
serviceAccountName: alertmanager
+ priorityClassName: system-cluster-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
diff --git a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
index dcc2eb9..718f7c5 100644
--- a/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
+++ b/kubernetes/files/kube-addons/dns/kubedns-rc.yaml
@@ -42,7 +42,6 @@
labels:
k8s-app: kube-dns
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
{%- if common.addons.dns.cni is defined %}
cni: {{ common.addons.dns.cni }}
@@ -51,6 +50,7 @@
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
+ priorityClassName: system-cluster-critical
containers:
- name: kubedns
image: {{ common.addons.dns.get('kubedns_image', 'gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5') }}
diff --git a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
index cae9584..5cdeb08 100644
--- a/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
+++ b/kubernetes/files/kube-addons/fluentd/fluentd-aggregator-deploy.yaml
@@ -22,7 +22,6 @@
k8s-app: fluentd-aggregator
version: v1
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
serviceAccountName: fluentd
@@ -30,6 +29,7 @@
- key: "node-role.kubernetes.io/master"
effect: "NoSchedule"
operator: "Exists"
+ priorityClassName: system-cluster-critical
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
diff --git a/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml b/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
index ef8df02..ffd159e 100644
--- a/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
+++ b/kubernetes/files/kube-addons/helm/helm-tiller-deploy.yml
@@ -3,7 +3,6 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
- annotations:
labels:
app: helm
name: tiller
@@ -12,10 +11,7 @@
namespace: kube-system
spec:
replicas: 1
- selector:
- matchLabels:
- app: helm
- name: tiller
+ strategy: {}
template:
metadata:
labels:
@@ -27,14 +23,17 @@
{%- endif %}
spec:
{%- if 'RBAC' in master.auth.get('mode', "") %}
+ automountServiceAccountToken: true
serviceAccountName: tiller
{%- endif %}
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
containers:
- image: {{ common.addons.helm.tiller_image }}
imagePullPolicy: IfNotPresent
+ env:
+ - name: TILLER_NAMESPACE
+ value: kube-system
+ - name: TILLER_HISTORY_MAX
+ value: "0"
resources:
requests:
memory: "128Mi"
@@ -57,6 +56,9 @@
- containerPort: 44134
name: tiller
protocol: TCP
+ - containerPort: 44135
+ name: http
+ protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
@@ -68,7 +70,6 @@
successThreshold: 1
timeoutSeconds: 1
resources: {}
- terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
diff --git a/kubernetes/files/kube-addons/prometheus/prometheus-server-deploy.yaml b/kubernetes/files/kube-addons/prometheus/prometheus-server-deploy.yaml
index 4d232c7..79d8f25 100644
--- a/kubernetes/files/kube-addons/prometheus/prometheus-server-deploy.yaml
+++ b/kubernetes/files/kube-addons/prometheus/prometheus-server-deploy.yaml
@@ -22,10 +22,10 @@
k8s-app: prometheus-server
version: v1
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
serviceAccountName: prometheus-server
+ priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
effect: "NoSchedule"
diff --git a/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml b/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml
index 425e536..d0c33ea 100644
--- a/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml
+++ b/kubernetes/files/kube-addons/telegraf/telegraf-ds.yaml
@@ -16,7 +16,6 @@
k8s-app: telegraf
version: v1
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-node-critical
diff --git a/kubernetes/files/kubelet/default.pool b/kubernetes/files/kubelet/default.pool
index 8e2a1e9..4ba5328 100644
--- a/kubernetes/files/kubelet/default.pool
+++ b/kubernetes/files/kubelet/default.pool
@@ -56,7 +56,7 @@
{%- endif %}
--file-check-frequency={{ pool.kubelet.frequency }} \
{%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %}
---cloud-provider=openstack \
+--cloud-provider=external \
--cloud-config=/etc/kubernetes/cloud-config \
{%- endif %}
{%- if common.addons.get('virtlet', {}).get('enabled') %}
diff --git a/kubernetes/files/manifest/kube-addon-manager.yml b/kubernetes/files/manifest/kube-addon-manager.yml
index 93211b1..b4acb46 100644
--- a/kubernetes/files/manifest/kube-addon-manager.yml
+++ b/kubernetes/files/manifest/kube-addon-manager.yml
@@ -6,12 +6,12 @@
name: kube-addon-manager
namespace: kube-system
annotations:
- scheduler.alpha.kubernetes.io/critical-pod: ''
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
labels:
component: kube-addon-manager
spec:
hostNetwork: true
+ priorityClassName: system-cluster-critical
containers:
- name: kube-addon-manager
image: {{ common.get('addonmanager', {}).get('image', 'k8s.gcr.io/kube-addon-manager:v8.9') }}
diff --git a/kubernetes/files/manifest/kube-apiserver.manifest b/kubernetes/files/manifest/kube-apiserver.manifest
index afa0c4c..f948fce 100644
--- a/kubernetes/files/manifest/kube-apiserver.manifest
+++ b/kubernetes/files/manifest/kube-apiserver.manifest
@@ -42,7 +42,6 @@
{%- if master.auth.get('token', {}).enabled|default(True) %}
--token-auth-file={{ master.auth.token.file|default("/srv/kubernetes/known_tokens.csv") }}
{%- endif %}
- --etcd-quorum-read=true
--v={{ master.get('verbosity', 2) }}
--allow-privileged=True
{%- if common.addons.get('virtlet', {}).get('enabled') %}
diff --git a/kubernetes/files/priorityclass.yml b/kubernetes/files/priorityclass.yml
new file mode 100644
index 0000000..a017972
--- /dev/null
+++ b/kubernetes/files/priorityclass.yml
@@ -0,0 +1,11 @@
+kind: PriorityClass
+apiVersion: scheduling.k8s.io/v1beta1
+metadata:
+ name: {{ priorityclass_name }}
+value: {{ priorityclass_value }}
+globalDefault: {{ is_default_priorityclass }}
+description: "{{ priorityclass_description }}"
+
+{#-
+vim: syntax=jinja
+-#}
diff --git a/kubernetes/files/rc.yml b/kubernetes/files/rc.yml
index 0a31b6c..d793510 100644
--- a/kubernetes/files/rc.yml
+++ b/kubernetes/files/rc.yml
@@ -2,10 +2,18 @@
apiVersion: {{ service.apiVersion }}
kind: {{ service.kind }}
metadata:
- name: {{ service.service }}-{{ service.role }}
- namespace: {{ service.namespace }}
+ {%- if service.role is defined %}
labels:
+ name: {{ service.service }}-{{ service.role }}
app: {{ service.service }}-{{ service.role }}
+ name: {{ service.service }}-{{ service.role }}
+ {%- else %}
+ labels:
+ name: {{ service.service }}
+ app: {{ service.service }}
+ name: {{ service.service }}
+ {%- endif %}
+ namespace: {{ service.namespace }}
spec:
replicas: {{ service.replicas }}
{%- if service.kind == 'PetSet' %}
@@ -14,7 +22,11 @@
template:
metadata:
labels:
+ {%- if service.role is defined %}
app: {{ service.service }}-{{ service.role }}
+ {%- else %}
+ app: {{ service.service }}
+ {%- endif %}
annotations:
{%- if service.hostname is defined %}
pod.beta.kubernetes.io/hostname: {{ service.hostname }}
diff --git a/kubernetes/files/svc.yml b/kubernetes/files/svc.yml
index b236417..d730639 100644
--- a/kubernetes/files/svc.yml
+++ b/kubernetes/files/svc.yml
@@ -2,10 +2,17 @@
apiVersion: v1
kind: Service
metadata:
+ {%- if service.role is defined %}
labels:
name: {{ service.service }}-{{ service.role }}
app: {{ service.service }}-{{ service.role }}
name: {{ service.service }}-{{ service.role }}
+ {%- else %}
+ labels:
+ name: {{ service.service }}
+ app: {{ service.service }}
+ name: {{ service.service }}
+ {%- endif %}
namespace: {{ service.namespace }}
spec:
ports:
@@ -15,7 +22,11 @@
{%- endfor %}
type: {{ service.type }}
selector:
+ {%- if service.role is defined %}
app: {{ service.service }}-{{ service.role }}
+ {%- else %}
+ app: {{ service.service }}
+ {%- endif %}
{%- if service.cluster_ip is defined %}
clusterIP: {{ service.cluster_ip }}
{%- endif %}
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index 9d92467..ce31e34 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -163,7 +163,6 @@
--proxy-client-key-file={{ master.auth.proxy.client_key|default("/etc/kubernetes/ssl/kube-aggregator-proxy-client.key") }}
{%- endif %}
--anonymous-auth={{ master.auth.get('anonymous', {}).enabled|default(False) }}
- --etcd-quorum-read=true
--insecure-bind-address={{ master.apiserver.insecure_address }}
--insecure-port={{ master.apiserver.insecure_port }}
--secure-port={{ master.apiserver.secure_port }}
@@ -232,6 +231,11 @@
--root-ca-file=/etc/kubernetes/ssl/ca-{{ master.ca }}.crt
--service-account-private-key-file=/etc/kubernetes/ssl/kubernetes-server.key
--use-service-account-credentials
+{%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %}
+ --external-cloud-volume-plugin=openstack
+ --cloud-config /etc/kubernetes/cloud-config.intree
+ --cloud-provider external
+{%- endif %}
--v={{ master.get('verbosity', 2) }}
{%- if master.network.get('flannel', {}).get('enabled', False) %}
--allocate-node-cidrs=true
@@ -318,6 +322,13 @@
- file: /etc/kubernetes/cloud-config
- file: /etc/default/openstack-cloud-controller-manager
- file: /etc/kubernetes/controller-manager.kubeconfig
+ - file: /usr/bin/openstack-cloud-controller-manager
+
+kube_controller_mnanager_service:
+ service.running:
+ - name: kube-controller-manager
+ - watch:
+ - file: /etc/kubernetes/cloud-config.intree
{%- endif %}
{%- endif %}
diff --git a/kubernetes/master/kube-addons.sls b/kubernetes/master/kube-addons.sls
index 33140e0..8b22d57 100644
--- a/kubernetes/master/kube-addons.sls
+++ b/kubernetes/master/kube-addons.sls
@@ -375,6 +375,8 @@
- makedirs: True
{% endif %}
+{% endif %}
+
{%- if common.addons.get('metrics-server', {}).get('enabled', False) %}
{%- set metrics_server_resources = ['aggregated-metrics-reader.yaml','auth-delegator.yaml','auth-reader.yaml','metrics-apiservice.yaml','metrics-server-deployment.yaml','metrics-server-service.yaml','resource-reader.yaml'] %}
@@ -398,8 +400,6 @@
{% endif %}
-{% endif %}
-
{%- if common.addons.get('fluentd', {}).get('enabled') %}
/etc/kubernetes/addons/fluentd/fluentd-ns.yaml:
diff --git a/kubernetes/meta/fluentd.yml b/kubernetes/meta/fluentd.yml
index 623fcea..3a88acf 100644
--- a/kubernetes/meta/fluentd.yml
+++ b/kubernetes/meta/fluentd.yml
@@ -1,22 +1,21 @@
-{%- from "kubernetes/map.jinja" import common with context -%}
+{%- from "kubernetes/map.jinja" import common, master, pool with context -%}
{%- if pillar.get('fluentd', {}).get('agent', {}).get('enabled', False) %}
-{%- from "kubernetes/map.jinja" import pool, master %}
-{%- if pool.get('enabled', False) %}
-{% set network = pool.get('network', {}) %}
-{%- else %}
-{%- if master.get('enabled', False) %}
-{% set network = master.get('network', {}) %}
-{% endif %}
-{% endif %}
+ {%- if pool.get('enabled', False) %}
+ {% set network = pool.get('network', {}) %}
+ {%- else %}
+ {%- if master.get('enabled', False) %}
+ {% set network = master.get('network', {}) %}
+ {% endif %}
+ {% endif %}
-{%- set positiondb = pillar.fluentd.agent.dir.positiondb %}
+ {%- set positiondb = pillar.fluentd.agent.dir.positiondb %}
agent:
plugin:
fluent-plugin-kubernetes_metadata_filter:
deb: ['td-agent-additional-plugins']
config:
label:
- {%- if pillar.docker is defined %}
+ {%- if pillar.docker is defined %}
docker:
filter:
add_drop_tag:
@@ -26,7 +25,7 @@
record:
- name: drop_event
value: ${ record.fetch('attrs', {}).fetch('io.kubernetes.pod.name', '') }
- {%- endif %}
+ {%- endif %}
kubernetes:
input:
container:
@@ -36,10 +35,16 @@
path_key: log_path
pos_file: {{ positiondb }}/kubernetes.pos
parser:
+ {%- if pillar.docker is defined %}
type: json
+ {%- else %}
+ # Containerd CRI log format https://regex101.com/r/BAw3bQ/1
+ type: regexp
+ format: /^(?<time>.+) (?<stream>stdout|stderr)( (?<logtag>.))? (?<Payload>.*)$/
+ {%- endif %}
time_format: '%Y-%m-%dT%H:%M:%S.%NZ'
keep_time_key: false
- {%- if network is defined and network.get('calico', {}).get('enabled', False) %}
+ {%- if network is defined and network.get('calico', {}).get('enabled', False) %}
bird:
type: tail
tag: kubernetes.calico.bird
@@ -76,19 +81,19 @@
time_key: Timestamp
keep_time_key: false
format: '/^(?<Timestamp>[^ ]+ [^ ]+) \[(?<orig_severity_label>[^ ]+)\]\[\d+?\] (?<Payload>.*)$/'
- {%- endif %}
+ {%- endif %}
filter:
add_kubernetes_meta:
tag: 'temp.kubernetes.container.**'
type: kubernetes_metadata
kubernetes_url: https://{{ pool.apiserver.host }}:{{ pool.apiserver.secure_port }}
- {%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %}
+ {%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %}
client_cert: /etc/kubernetes/ssl/kubelet-client-fqdn.crt
client_key: /etc/kubernetes/ssl/kubelet-client-fqdn.key
- {%- else %}
+ {%- else %}
client_cert: /etc/kubernetes/ssl/kubelet-client.crt
client_key: /etc/kubernetes/ssl/kubelet-client.key
- {%- endif %}
+ {%- endif %}
ca_file: /etc/kubernetes/ssl/ca-kubernetes.crt
verify_ssl: True
enrich_container:
@@ -104,7 +109,7 @@
value: 6
- name: programname
value: ${ record['kubernetes']['container_name'] }
- {%- if network is defined and network.get('calico', {}).get('enabled', False) %}
+ {%- if network is defined and network.get('calico', {}).get('enabled', False) %}
enrich_bird:
tag: 'kubernetes.calico.bird'
type: record_transformer
@@ -140,7 +145,7 @@
value: ${ {'DEBUG'=>7,'INFO'=>6,'WARNING'=>4,'ERROR'=>3,'FATAL'=>2,'PANIC'=>1}[record['orig_severity_label']].to_i }
- name: programname
value: calico-felix
- {%- endif %}
+ {%- endif %}
match:
cast_service_tag:
tag: 'temp.kubernetes.container.**'
diff --git a/metadata/service/control/priorityclasses/critical-priority.yml b/metadata/service/control/priorityclasses/critical-priority.yml
new file mode 100644
index 0000000..b24d213
--- /dev/null
+++ b/metadata/service/control/priorityclasses/critical-priority.yml
@@ -0,0 +1,6 @@
+parameters:
+ kubernetes:
+ control:
+ priorityclass:
+ critical-priority:
+ priority_value: 1000000000