| {%- from "kubernetes/map.jinja" import common with context -%} |
| {%- from "kubernetes/map.jinja" import pool with context -%} |
| {%- from "kubernetes/map.jinja" import version %} |
| |
| {%- if pillar.kubernetes.get('master', {}).get('network', {}).get('opencontrail', {}).get('enabled', false) %} |
| {%- if pillar.kubernetes.get('master', {}).get('network', {}).get('opencontrail', {}).get('version', 3.0) >= 4.0 %} |
| {%- if pillar.kubernetes.get('master', {}).get('enabled', false) %} |
| {# in case of OC4 there should not be CNI used #} |
| {%- set add_networking_plugin_conf = False %} |
| {%- endif %} |
| {%- else %} |
| {# in case of OC3 CNI should be everywhere #} |
| {%- set add_networking_plugin_conf = True %} |
| {%- endif %} |
| {%- else %} |
| {# In case of calico or any compute node - CNI should be everywhere #} |
| {%- set add_networking_plugin_conf = True %} |
| {%- endif %} |
| |
| # test_args has to be kept at the end, so they'll overwrite any prior configuration |
| DAEMON_ARGS="\ |
| --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ |
| --pod-manifest-path=/etc/kubernetes/manifests \ |
| --address={{ pool.kubelet.address }} \ |
| --allow-privileged={{ pool.kubelet.allow_privileged }} \ |
| --node-ip={{ pool.kubelet.address }} \ |
| --cluster_dns={{ common.addons.dns.server }} \ |
| --cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \ |
| {%- if common.get('cloudprovider', {}).get('enabled') and common.get('cloudprovider', {}).get('provider') == 'openstack' %} |
| --hostname-override={{ pool.host.name }}.{{ common.cluster_domain }} \ |
| {%- else %} |
| --hostname-override={{ pool.host.name }} \ |
| {%- endif %} |
| --v={{ pool.get('verbosity', 2) }} \ |
| {%- if common.hyperkube.pause_image is defined %} |
| --pod-infra-container-image={{ common.hyperkube.pause_image }} \ |
| {%- endif %} |
| {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %} |
| --fail-swap-on={{ pool.kubelet.fail_on_swap }} \ |
| {%- if common.addons.get('virtlet', {}).get('enabled') %} |
| --feature-gates=MountPropagation=true \ |
| {%- endif %} |
| {%- endif %} |
| {%- if pillar.kubernetes.master is defined %} |
| --node-labels=node-role.kubernetes.io/master=true \ |
| {%- if pillar.kubernetes.get('master', {}).get('unschedulable', 'false') %} |
| --register-with-taints=node-role.kubernetes.io/master=:NoSchedule \ |
| {%- endif -%} |
| {%- else %} |
| --node-labels=node-role.kubernetes.io/node=true \ |
| --node-labels=extraRuntime=virtlet \ |
| {%- endif %} |
| {%- if add_networking_plugin_conf %} |
| --network-plugin=cni \ |
| --cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \ |
| {%- endif %} |
| --file-check-frequency={{ pool.kubelet.frequency }} \ |
| {%- if common.get('cloudprovider', {}).get('enabled') %} |
| --cloud-provider=external \ |
| {%- if common.get('cloudprovider', {}).get('provider') == 'openstack' %} |
| --cloud-config=/etc/kubernetes/cloud-config \ |
| {%- endif %} |
| {%- endif %} |
| {%- if common.addons.get('virtlet', {}).get('enabled') %} |
| --container-runtime={{ pool.get('container-runtime', 'remote') }} \ |
| {%- if salt['pkg.version_cmp'](version,'1.7') < 0 %} |
| --container-runtime-endpoint={{ pool.get('container-runtime-endpoint', '/var/run/criproxy.sock') }} \ |
| --image-service-endpoint={{ pool.get('image-service-endpoint', '/var/run/criproxy.sock') }} \ |
| {%- else %} |
| --container-runtime-endpoint={{ pool.get('container-runtime-endpoint', 'unix:///var/run/criproxy.sock') }} \ |
| --image-service-endpoint={{ pool.get('image-service-endpoint', 'unix:///var/run/criproxy.sock') }} \ |
| {%- endif %} |
| --enable-controller-attach-detach={{ pool.get('enable-controller-attach-detach', 'false') }} \ |
| {%- endif %} |
| {%- if common.get('containerd', {}).get('enabled') and not common.addons.get('virtlet', {}).get('enabled') %} |
| --container-runtime={{ pool.get('container-runtime', 'remote') }} \ |
| --container-runtime-endpoint={{ pool.get('image-service-endpoint', 'unix:///run/containerd/containerd.sock') }} \ |
| {%- endif %} |
| {%- for key, value in pool.get('kubelet', {}).get('daemon_opts', {}).items() %} |
| --{{ key }}={{ value }} \ |
| {%- endfor %} |
| " |