Merge pull request #14 from tcpcloud/systemd
enable systemd for kubernetes controller
diff --git a/README.rst b/README.rst
index adfa3cd..f366287 100644
--- a/README.rst
+++ b/README.rst
@@ -174,6 +174,22 @@
network:
engine: opencontrail
+Kubernetes control plane running in systemd
+-------------------------------------------
+
+By default kube-apiserver, kube-scheduler, kube-controllermanager, kube-proxy, etcd running in docker containers through manifests. For stable production environment this should be run in systemd.
+
+.. code-block:: yaml
+
+ kubernetes:
+ master:
+ container: false
+
+ kubernetes:
+ pool:
+ container: false
+
+
Kubernetes with Flannel
-----------------------
diff --git a/kubernetes/files/etcd/default b/kubernetes/files/etcd/default
new file mode 100644
index 0000000..78e57ea
--- /dev/null
+++ b/kubernetes/files/etcd/default
@@ -0,0 +1,264 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+## etcd(1) daemon options
+## See "/usr/share/doc/etcd/Documentation/configuration.md.gz".
+
+### Member Flags
+
+##### -name
+## Human-readable name for this member.
+## default: host name returned by `hostname`.
+## This value is referenced as this node's own entries listed in the `-initial-cluster`
+## flag (Ex: `default=http://localhost:2380` or `default=http://localhost:2380,default=http://localhost:7001`).
+## This needs to match the key used in the flag if you're using [static boostrapping](clustering.md#static).
+# ETCD_NAME="hostname"
+ETCD_NAME="{{ master.etcd.name }}"
+
+##### -data-dir
+## Path to the data directory.
+# ETCD_DATA_DIR="/var/lib/etcd/default"
+ETCD_DATA_DIR="/var/lib/etcd/default"
+
+##### -wal-dir
+## Path to the dedicated wal directory. If this flag is set, etcd will write the
+## WAL files to the walDir rather than the dataDir. This allows a dedicated disk
+## to be used, and helps avoid io competition between logging and other IO operations.
+## default: ""
+# ETCD_WAL_DIR
+
+##### -snapshot-count
+## Number of committed transactions to trigger a snapshot to disk.
+## default: "10000"
+# ETCD_SNAPSHOT_COUNT="10000"
+
+##### -heartbeat-interval
+## Time (in milliseconds) of a heartbeat interval.
+## default: "100"
+# ETCD_HEARTBEAT_INTERVAL="100"
+
+##### -election-timeout
+## Time (in milliseconds) for an election to timeout.
+## See /usr/share/doc/etcd/Documentation/tuning.md
+## default: "1000"
+# ETCD_ELECTION_TIMEOUT="1000"
+
+##### -listen-peer-urls
+## List of URLs to listen on for peer traffic. This flag tells the etcd to accept
+## incoming requests from its peers on the specified scheme://IP:port combinations.
+## Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd
+## listens to the given port on all interfaces. If an IP address is given as
+## well as a port, etcd will listen on the given port and interface.
+## Multiple URLs may be used to specify a number of addresses and ports to listen on.
+## The etcd will respond to requests from any of the listed addresses and ports.
+## example: "http://10.0.0.1:2380"
+## invalid example: "http://example.com:2380" (domain name is invalid for binding)
+## default: "http://localhost:2380,http://localhost:7001"
+# ETCD_LISTEN_PEER_URLS="http://localhost:2380,http://localhost:7001"
+ETCD_LISTEN_PEER_URLS="http://{{ master.etcd.host }}:2380"
+
+##### -listen-client-urls
+## List of URLs to listen on for client traffic. This flag tells the etcd to accept
+## incoming requests from the clients on the specified scheme://IP:port combinations.
+## Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd
+## listens to the given port on all interfaces. If an IP address is given as
+## well as a port, etcd will listen on the given port and interface.
+## Multiple URLs may be used to specify a number of addresses and ports to listen on.
+## The etcd will respond to requests from any of the listed addresses and ports.
+## (ADVERTISE_CLIENT_URLS is required when LISTEN_CLIENT_URLS is set explicitly).
+## example: "http://10.0.0.1:2379"
+## invalid example: "http://example.com:2379" (domain name is invalid for binding)
+## default: "http://localhost:2379,http://localhost:4001"
+# ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://localhost:4001"
+ETCD_LISTEN_CLIENT_URLS="{%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001"
+##### -max-snapshots
+## Maximum number of snapshot files to retain (0 is unlimited)
+## default: 5
+# ETCD_MAX_SNAPSHOTS="5"
+
+##### -max-wals
+## Maximum number of wal files to retain (0 is unlimited)
+## default: 5
+# ETCD_MAX_WALS="5"
+
+##### -cors
+## Comma-separated whitelist of origins for CORS (cross-origin resource sharing).
+## default: none
+# ETCD_CORS
+
+### Clustering Flags
+## For an explanation of the various ways to do cluster setup, see:
+## /usr/share/doc/etcd/Documentation/clustering.md.gz
+##
+## The command line parameters starting with -initial-cluster will be
+## ignored on subsequent runs of etcd as they are used only during initial
+## bootstrap process.
+
+##### -initial-advertise-peer-urls
+## List of this member's peer URLs to advertise to the rest of the cluster.
+## These addresses are used for communicating etcd data around the cluster.
+## At least one must be routable to all cluster members.
+## These URLs can contain domain names.
+## example: "http://example.com:2380, http://10.0.0.1:2380"
+## default: "http://localhost:2380,http://localhost:7001"
+# ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001"
+ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ master.etcd.host }}:2380"
+
+##### -initial-cluster
+## initial cluster configuration for bootstrapping.
+## The key is the value of the `-name` flag for each node provided.
+## The default uses `default` for the key because this is the default for the `-name` flag.
+## default: "default=http://localhost:2380,default=http://localhost:7001"
+# ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001"
+ETCD_INITIAL_CLUSTER="{% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %}"
+##### -initial-cluster-state
+## Initial cluster state ("new" or "existing"). Set to `new` for all members
+## present during initial static or DNS bootstrapping. If this option is set to
+## `existing`, etcd will attempt to join the existing cluster. If the wrong
+## value is set, etcd will attempt to start but fail safely.
+## default: "new"
+# ETCD_INITIAL_CLUSTER_STATE="existing"
+ETCD_INITIAL_CLUSTER_STATE="new"
+
+##### -initial-cluster-token
+## Initial cluster token for the etcd cluster during bootstrap.
+## If you are spinning up multiple clusters (or creating and destroying a
+## single cluster) with same configuration for testing purpose, it is highly
+## recommended that you specify a unique initial-cluster-token for the
+## different clusters.
+## default: "etcd-cluster"
+# ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
+ETCD_INITIAL_CLUSTER_TOKEN="{{ master.etcd.token }}"
+
+##### -advertise-client-urls
+## List of this member's client URLs to advertise to the rest of the cluster.
+## These URLs can contain domain names.
+## example: "http://example.com:2379, http://10.0.0.1:2379"
+## Be careful if you are advertising URLs such as http://localhost:2379 from a
+## cluster member and are using the proxy feature of etcd. This will cause loops,
+## because the proxy will be forwarding requests to itself until its resources
+## (memory, file descriptors) are eventually depleted.
+## default: "http://localhost:2379,http://localhost:4001"
+# ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://localhost:4001"
+ETCD_ADVERTISE_CLIENT_URLS="http://{{ master.etcd.host }}:4001"
+
+##### -discovery
+## Discovery URL used to bootstrap the cluster.
+## default: none
+# ETCD_DISCOVERY
+
+##### -discovery-srv
+## DNS srv domain used to bootstrap the cluster.
+## default: none
+# ETCD_DISCOVERY_SRV
+
+##### -discovery-fallback
+## Expected behavior ("exit" or "proxy") when discovery services fails.
+## default: "proxy"
+# ETCD_DISCOVERY_FALLBACK="proxy"
+
+##### -discovery-proxy
+## HTTP proxy to use for traffic to discovery service.
+## default: none
+# ETCD_DISCOVERY_PROXY
+
+### Proxy Flags
+
+##### -proxy
+## Proxy mode setting ("off", "readonly" or "on").
+## default: "off"
+# ETCD_PROXY="on"
+
+##### -proxy-failure-wait
+## Time (in milliseconds) an endpoint will be held in a failed state before being
+## reconsidered for proxied requests.
+## default: 5000
+# ETCD_PROXY_FAILURE_WAIT="5000"
+
+##### -proxy-refresh-interval
+## Time (in milliseconds) of the endpoints refresh interval.
+## default: 30000
+# ETCD_PROXY_REFRESH_INTERVAL="30000"
+
+##### -proxy-dial-timeout
+## Time (in milliseconds) for a dial to timeout or 0 to disable the timeout
+## default: 1000
+# ETCD_PROXY_DIAL_TIMEOUT="1000"
+
+##### -proxy-write-timeout
+## Time (in milliseconds) for a write to timeout or 0 to disable the timeout.
+## default: 5000
+# ETCD_PROXY_WRITE_TIMEOUT="5000"
+
+##### -proxy-read-timeout
+## Time (in milliseconds) for a read to timeout or 0 to disable the timeout.
+## Don't change this value if you use watches because they are using long polling requests.
+## default: 0
+# ETCD_PROXY_READ_TIMEOUT="0"
+
+### Security Flags
+
+##### -ca-file [DEPRECATED]
+## Path to the client server TLS CA file.
+## default: none
+# ETCD_CA_FILE=""
+
+##### -cert-file
+## Path to the client server TLS cert file.
+## default: none
+# ETCD_CERT_FILE=""
+
+##### -key-file
+## Path to the client server TLS key file.
+## default: none
+# ETCD_KEY_FILE=""
+
+##### -client-cert-auth
+## Enable client cert authentication.
+## default: false
+# ETCD_CLIENT_CERT_AUTH
+
+##### -trusted-ca-file
+## Path to the client server TLS trusted CA key file.
+## default: none
+# ETCD_TRUSTED_CA_FILE
+
+##### -peer-ca-file [DEPRECATED]
+## Path to the peer server TLS CA file. `-peer-ca-file ca.crt` could be replaced
+## by `-peer-trusted-ca-file ca.crt -peer-client-cert-auth` and etcd will perform the same.
+## default: none
+# ETCD_PEER_CA_FILE
+
+##### -peer-cert-file
+## Path to the peer server TLS cert file.
+## default: none
+# ETCD_PEER_CERT_FILE
+
+##### -peer-key-file
+## Path to the peer server TLS key file.
+## default: none
+# ETCD_PEER_KEY_FILE
+
+##### -peer-client-cert-auth
+## Enable peer client cert authentication.
+## default: false
+# ETCD_PEER_CLIENT_CERT_AUTH
+
+##### -peer-trusted-ca-file
+## Path to the peer server TLS trusted CA file.
+## default: none
+# ETCD_PEER_TRUSTED_CA_FILE
+
+### Logging Flags
+##### -debug
+## Drop the default log level to DEBUG for all subpackages.
+## default: false (INFO for all packages)
+# ETCD_DEBUG
+
+##### -log-package-levels
+## Set individual etcd subpackages to specific log levels.
+## An example being `etcdserver=WARNING,security=DEBUG`
+## default: none (INFO for all packages)
+# ETCD_LOG_PACKAGE_LEVELS
+
+
+#### Daemon parameters:
+# DAEMON_ARGS=""
diff --git a/kubernetes/files/manifest/etcd.manifest b/kubernetes/files/manifest/etcd.manifest
index 8984806..b3bca45 100644
--- a/kubernetes/files/manifest/etcd.manifest
+++ b/kubernetes/files/manifest/etcd.manifest
@@ -19,7 +19,7 @@
"command": [
"/bin/sh",
"-c",
- "/usr/local/bin/etcd --name {{ master.etcd.name }} --initial-cluster-state new --initial-advertise-peer-urls http://{{ master.etcd.host }}:2380 --listen-peer-urls http://{{ master.etcd.host }}:2380 --advertise-client-urls http://{{ master.etcd.host }}:4001 --listen-client-urls {%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001 --initial-cluster {% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %} --initial-cluster-token {{ master.etcd.token }} --data-dir /var/etcd/data 1>>/var/log/etcd.log 2>&1"
+ "/usr/local/bin/etcd --name {{ master.etcd.name }} --initial-cluster-state new --initial-advertise-peer-urls http://{{ master.etcd.host }}:2380 --listen-peer-urls http://{{ master.etcd.host }}:2380 --advertise-client-urls http://{{ master.etcd.host }}:4001 --listen-client-urls {%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001 --initial-cluster {% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %} --initial-cluster-token {{ master.etcd.token }} --data-dir /var/lib/etcd/default 1>>/var/log/etcd.log 2>&1"
],
"livenessProbe": {
"httpGet": {
@@ -42,7 +42,7 @@
],
"volumeMounts": [
{"name": "varetcd",
- "mountPath": "/var/etcd",
+ "mountPath": "/var/lib/etcd",
"readOnly": false
},
{"name": "varlogetcd",
@@ -55,7 +55,7 @@
"volumes":[
{ "name": "varetcd",
"hostPath": {
- "path": "/mnt/master-pd/var/etcd"}
+ "path": "/var/lib/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
diff --git a/kubernetes/map.jinja b/kubernetes/map.jinja
index 1f4afcb..ac5d008 100644
--- a/kubernetes/map.jinja
+++ b/kubernetes/map.jinja
@@ -12,7 +12,7 @@
{% set master = salt['grains.filter_by']({
'Debian': {
'pkgs': ['kubernetes-master'],
- 'services': ['kubelet'],
+ 'services': ['kube-apiserver','kube-scheduler','kube-controller-manager'],
},
'RedHat': {
'pkgs': [],
@@ -23,7 +23,7 @@
{% set pool = salt['grains.filter_by']({
'Debian': {
'pkgs': [],
- 'services': ['kubelet'],
+ 'services': ['kube-proxy'],
},
'RedHat': {
'pkgs': [],
diff --git a/kubernetes/master/api.sls b/kubernetes/master/api.sls
deleted file mode 100644
index f153e75..0000000
--- a/kubernetes/master/api.sls
+++ /dev/null
@@ -1,38 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{%- if master.enabled %}
-
-/srv/kubernetes/known_tokens.csv:
- file.managed:
- - source: salt://kubernetes/files/known_tokens.csv
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
-
-/srv/kubernetes/basic_auth.csv:
- file.managed:
- - source: salt://kubernetes/files/basic_auth.csv
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
-
-/var/log/kube-apiserver.log:
- file.managed:
- - user: root
- - group: root
- - mode: 644
-
-/etc/kubernetes/manifests/kube-apiserver.manifest:
- file.managed:
- - source: salt://kubernetes/files/manifest/kube-apiserver.manifest
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
- - dir_mode: 755
-
-{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/controller-manager.sls b/kubernetes/master/controller-manager.sls
deleted file mode 100644
index a0c93f6..0000000
--- a/kubernetes/master/controller-manager.sls
+++ /dev/null
@@ -1,20 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{%- if master.enabled %}
-
-/etc/kubernetes/manifests/kube-controller-manager.manifest:
- file.managed:
- - source: salt://kubernetes/files/manifest/kube-controller-manager.manifest
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
- - dir_mode: 755
-
-/var/log/kube-controller-manager.log:
- file.managed:
- - user: root
- - group: root
- - mode: 644
-
-{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
new file mode 100644
index 0000000..f09fa29
--- /dev/null
+++ b/kubernetes/master/controller.sls
@@ -0,0 +1,166 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+{%- if master.enabled %}
+
+/srv/kubernetes/known_tokens.csv:
+ file.managed:
+ - source: salt://kubernetes/files/known_tokens.csv
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+
+/srv/kubernetes/basic_auth.csv:
+ file.managed:
+ - source: salt://kubernetes/files/basic_auth.csv
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+
+{%- if master.get('container', 'true') %}
+
+/var/log/kube-apiserver.log:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/kubernetes/manifests/kube-apiserver.manifest:
+ file.managed:
+ - source: salt://kubernetes/files/manifest/kube-apiserver.manifest
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+ - dir_mode: 755
+
+/etc/kubernetes/manifests/kube-controller-manager.manifest:
+ file.managed:
+ - source: salt://kubernetes/files/manifest/kube-controller-manager.manifest
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+ - dir_mode: 755
+
+/var/log/kube-controller-manager.log:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/kubernetes/manifests/kube-scheduler.manifest:
+ file.managed:
+ - source: salt://kubernetes/files/manifest/kube-scheduler.manifest
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+ - dir_mode: 755
+
+/var/log/kube-scheduler.log:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+
+{%- else %}
+
+/etc/default/kube-apiserver:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+ - contents: DAEMON_ARGS=" --insecure-bind-address={{ master.apiserver.insecure_address }} --etcd-servers={% for member in master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %} --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota --service-cluster-ip-range={{ master.service_addresses }} --client-ca-file=/etc/ssl/certs/ca-{{ master.ca }}.crt --basic-auth-file=/srv/kubernetes/basic_auth.csv --tls-cert-file=/etc/ssl/certs/kubernetes-server.crt --tls-private-key-file=/etc/ssl/private/kubernetes-server.key --secure-port=443 --bind-address={{ master.apiserver.address }} --token-auth-file=/srv/kubernetes/known_tokens.csv --v=2 --allow-privileged=True"
+
+/etc/default/kube-controller-manager:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+ - contents: DAEMON_ARGS=" --master={{ master.apiserver.insecure_address }}:8080 --cluster-name=kubernetes --service-account-private-key-file=/etc/ssl/private/kubernetes-server.key --v=2 --root-ca-file=/etc/ssl/certs/ca-{{ master.ca }}.crt --leader-elect=true"
+
+/etc/default/kube-scheduler:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+ - contents: DAEMON_ARGS=" --master={{ master.apiserver.insecure_address }}:8080 --v=2 --leader-elect=true"
+
+master_services:
+ service.running:
+ - names: {{ master.services }}
+ - enable: True
+ - watch:
+ - file: /etc/default/kube-apiserver
+ - file: /etc/default/kube-scheduler
+ - file: /etc/default/kube-controller-manager
+
+{%- endif %}
+
+{%- if not pillar.kubernetes.pool is defined %}
+
+/etc/default/kubelet:
+ file.managed:
+ - source: salt://kubernetes/files/kubelet/default.master
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/kubernetes/config:
+ file.absent
+
+kubelet_service:
+ service.running:
+ - name: kubelet
+ - enable: True
+ - watch:
+ - file: /etc/default/kubelet
+
+{%- endif %}
+
+{%- for name,namespace in master.namespace.iteritems() %}
+
+{%- if namespace.enabled %}
+
+/registry/namespaces/{{ name }}:
+ etcd.set:
+ - value: '{"kind":"Namespace","apiVersion":"v1","metadata":{"name":"{{ name }}"},"spec":{"finalizers":["kubernetes"]},"status":{"phase":"Active"}}'
+
+{%- else %}
+
+/registry/namespaces/{{ name }}:
+ etcd.rm
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- if master.registry.secret is defined %}
+
+{%- for name,registry in master.registry.secret.iteritems() %}
+
+{%- if registry.enabled %}
+
+/registry/secrets/{{ registry.namespace }}/{{ name }}:
+ etcd.set:
+ - value: '{"kind":"Secret","apiVersion":"v1","metadata":{"name":"{{ name }}","namespace":"{{ registry.namespace }}"},"data":{".dockerconfigjson":"{{ registry.key }}"},"type":"kubernetes.io/dockerconfigjson"}'
+
+{%- else %}
+
+/registry/secrets/{{ registry.namespace }}/{{ name }}:
+ etcd.rm
+
+{%- endif %}
+
+{%- endfor %}
+
+{%- endif %}
+
+{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/etcd.sls b/kubernetes/master/etcd.sls
index 6a68ecc..3e297e2 100644
--- a/kubernetes/master/etcd.sls
+++ b/kubernetes/master/etcd.sls
@@ -1,11 +1,7 @@
{%- from "kubernetes/map.jinja" import master with context %}
{%- if master.enabled %}
-/var/log/etcd-events.log:
- file.managed:
- - user: root
- - group: root
- - mode: 644
+{%- if master.get('container', 'true') %}
/var/log/etcd.log:
file.managed:
@@ -13,16 +9,6 @@
- group: root
- mode: 644
-/var/etcd:
- file.directory:
- - user: root
- - group: root
- - dir_mode: 700
- - recurse:
- - user
- - group
- - mode
-
/etc/kubernetes/manifests/etcd.manifest:
file.managed:
- source: salt://kubernetes/files/manifest/etcd.manifest
@@ -33,4 +19,27 @@
- makedirs: true
- dir_mode: 755
+{%- else %}
+
+etcd_pkg:
+ pkg.installed:
+ - name: etcd
+
+/etc/default/etcd:
+ file.managed:
+ - source: salt://kubernetes/files/etcd/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+etcd_service:
+ service.running:
+ - name: etcd
+ - enable: True
+ - watch:
+ - file: /etc/default/etcd
+
+{%- endif %}
+
{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/init.sls b/kubernetes/master/init.sls
index 51a365d..cd7a77c 100644
--- a/kubernetes/master/init.sls
+++ b/kubernetes/master/init.sls
@@ -2,9 +2,6 @@
include:
- kubernetes.master.service
- kubernetes.master.etcd
-- kubernetes.master.api
-- kubernetes.master.controller-manager
-- kubernetes.master.scheduler
- kubernetes.master.kube-addons
{%- if master.network.engine == "opencontrail" %}
- kubernetes.master.opencontrail-network-manager
@@ -20,4 +17,4 @@
{%- if master.storage.get('engine', 'none') == 'glusterfs' %}
- kubernetes.master.glusterfs
{%- endif %}
-- kubernetes.master.kubelet
+- kubernetes.master.controller
diff --git a/kubernetes/master/kubelet.sls b/kubernetes/master/kubelet.sls
deleted file mode 100644
index 84b1bfe..0000000
--- a/kubernetes/master/kubelet.sls
+++ /dev/null
@@ -1,64 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{%- if master.enabled %}
-
-{%- if not pillar.kubernetes.pool is defined %}
-
-/etc/default/kubelet:
- file.managed:
- - source: salt://kubernetes/files/kubelet/default.master
- - template: jinja
- - user: root
- - group: root
- - mode: 644
-
-/etc/kubernetes/config:
- file.absent
-
-master_services:
- service.running:
- - names: {{ master.services }}
- - enable: True
- - watch:
- - file: /etc/default/kubelet
-
-{%- endif %}
-
-{%- for name,namespace in master.namespace.iteritems() %}
-
-{%- if namespace.enabled %}
-
-/registry/namespaces/{{ name }}:
- etcd.set:
- - value: '{"kind":"Namespace","apiVersion":"v1","metadata":{"name":"{{ name }}"},"spec":{"finalizers":["kubernetes"]},"status":{"phase":"Active"}}'
-
-{%- else %}
-
-/registry/namespaces/{{ name }}:
- etcd.rm
-
-{%- endif %}
-
-{%- endfor %}
-
-{%- if master.registry.secret is defined %}
-
-{%- for name,registry in master.registry.secret.iteritems() %}
-
-{%- if registry.enabled %}
-
-/registry/secrets/{{ registry.namespace }}/{{ name }}:
- etcd.set:
- - value: '{"kind":"Secret","apiVersion":"v1","metadata":{"name":"{{ name }}","namespace":"{{ registry.namespace }}"},"data":{".dockerconfigjson":"{{ registry.key }}"},"type":"kubernetes.io/dockerconfigjson"}'
-
-{%- else %}
-
-/registry/secrets/{{ registry.namespace }}/{{ name }}:
- etcd.rm
-
-{%- endif %}
-
-{%- endfor %}
-
-{%- endif %}
-
-{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/scheduler.sls b/kubernetes/master/scheduler.sls
deleted file mode 100644
index 235d9b4..0000000
--- a/kubernetes/master/scheduler.sls
+++ /dev/null
@@ -1,20 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{%- if master.enabled %}
-
-/etc/kubernetes/manifests/kube-scheduler.manifest:
- file.managed:
- - source: salt://kubernetes/files/manifest/kube-scheduler.manifest
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
- - dir_mode: 755
-
-/var/log/kube-scheduler.log:
- file.managed:
- - user: root
- - group: root
- - mode: 644
-
-{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/pool/kube-proxy.sls b/kubernetes/pool/kube-proxy.sls
index e444604..c53cb24 100644
--- a/kubernetes/pool/kube-proxy.sls
+++ b/kubernetes/pool/kube-proxy.sls
@@ -1,6 +1,8 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{%- if pool.enabled %}
+{%- if pool.get('container', 'true') %}
+
/etc/kubernetes/manifests/kube-proxy.manifest:
file.managed:
- source: salt://kubernetes/files/manifest/kube-proxy.manifest.pool
@@ -20,5 +22,22 @@
- mode: 644
- makedirs: true
+{%- else %}
+
+/etc/default/kube-proxy:
+ file.managed:
+ - user: root
+ - group: root
+ - mode: 644
+ - contents: DAEMON_ARGS=" --logtostderr=true --v=2 --kubeconfig=/etc/kubernetes/proxy.kubeconfig --master={%- if pool.apiserver.insecure.enabled %}http://{{ pool.apiserver.host }}:8080{%- else %}https://{{ pool.apiserver.host }}{%- endif %}{%- if pool.network.engine == 'calico' %} --proxy-mode=iptables{% endif %}
+
+pool_services:
+ service.running:
+ - names: {{ pool.services }}
+ - enable: True
+ - watch:
+ - file: /etc/default/kube-proxy
+
+{%- endif %}
{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/pool/kubelet.sls b/kubernetes/pool/kubelet.sls
index 7fb176b..bfacbb8 100644
--- a/kubernetes/pool/kubelet.sls
+++ b/kubernetes/pool/kubelet.sls
@@ -48,9 +48,9 @@
{%- endif %}
-pool_services:
+kubelet_service:
service.running:
- - names: {{ pool.services }}
+ - name: kubelet
- enable: True
- watch:
- file: /etc/default/kubelet