Merge tag '2016.12.1' into debian/unstable
2016.12.1
diff --git a/README.rst b/README.rst
index 7005d62..deff35b 100644
--- a/README.rst
+++ b/README.rst
@@ -57,12 +57,6 @@
enabled: true
replicas: 1
server: 10.254.0.10
- heapster_influxdb:
- enabled: true
- public_ip: 185.22.97.132
- ui:
- enabled: true
- public_ip: 185.22.97.131
admin:
password: password
username: admin
@@ -430,6 +424,164 @@
mount: /certs
path: /etc/certs
+PetSet manifest
+---------------------
+
+.. code-block:: yaml
+
+ service:
+ memcached:
+ apiVersion: apps/v1alpha1
+ kind: PetSet
+ service_name: 'memcached'
+ container:
+ memcached:
+ ...
+
+
+Configmap
+---------
+
+You are able to create configmaps using support layer between formulas.
+It works simple, eg. in nova formula there's file ``meta/config.yml`` which
+defines config files used by that service and roles.
+
+Kubernetes formula is able to generate these files using custom pillar and
+grains structure. This way you are able to run docker images built by any way
+while still re-using your configuration management.
+
+Example pillar:
+
+.. code-block:: bash
+
+ kubernetes:
+ control:
+ config_type: default|kubernetes # Output is yaml k8s or default single files
+ configmap:
+ nova-control:
+ grains:
+ # Alternate grains as OS running in container may differ from
+ # salt minion OS. Needed only if grains matters for config
+ # generation.
+ os_family: Debian
+ pillar:
+ # Generic pillar for nova controller
+ nova:
+ controller:
+ enabled: true
+ versionn: liberty
+ ...
+
+To tell which services supports config generation, you need to ensure pillar
+structure like this to determine support:
+
+.. code-block:: yaml
+
+ nova:
+ _support:
+ config:
+ enabled: true
+
+initContainers
+--------------
+
+Example pillar:
+
+.. code-block:: bash
+
+ kubernetes:
+ control:
+ service:
+ memcached:
+ init_containers:
+ - name: test-mysql
+ image: busybox
+ command:
+ - sleep
+ - 3600
+ volumes:
+ - name: config
+ mount: /test
+ - name: test-memcached
+ image: busybox
+ command:
+ - sleep
+ - 3600
+ volumes:
+ - name: config
+ mount: /test
+
+Affinity
+--------
+
+podAffinity
+===========
+
+Example pillar:
+
+.. code-block:: bash
+
+ kubernetes:
+ control:
+ service:
+ memcached:
+ affinity:
+ pod_affinity:
+ name: podAffinity
+ expression:
+ label_selector:
+ name: labelSelector
+ selectors:
+ - key: app
+ value: memcached
+ topology_key: kubernetes.io/hostname
+
+podAntiAffinity
+===============
+
+Example pillar:
+
+.. code-block:: bash
+
+ kubernetes:
+ control:
+ service:
+ memcached:
+ affinity:
+ anti_affinity:
+ name: podAntiAffinity
+ expression:
+ label_selector:
+ name: labelSelector
+ selectors:
+ - key: app
+ value: opencontrail-control
+ topology_key: kubernetes.io/hostname
+
+nodeAffinity
+===============
+
+Example pillar:
+
+.. code-block:: bash
+
+ kubernetes:
+ control:
+ service:
+ memcached:
+ affinity:
+ node_affinity:
+ name: nodeAffinity
+ expression:
+ match_expressions:
+ name: matchExpressions
+ selectors:
+ - key: key
+ operator: In
+ values:
+ - value1
+ - value2
+
Volumes
-------
@@ -438,27 +590,117 @@
.. code-block:: yaml
- container:
+ service:
memcached:
+ container:
+ memcached:
+ volumes:
+ - name: volume1
+ mountPath: /volume
+ readOnly: True
...
- volumes:
- - name: /etc/certs
- mount: /certs
- type: hostPath
- path: /etc/certs
+ volume:
+ volume1:
+ name: /etc/certs
+ type: hostPath
+ path: /etc/certs
emptyDir
========
.. code-block:: yaml
- container:
+ service:
memcached:
+ container:
+ memcached:
+ volumes:
+ - name: volume1
+ mountPath: /volume
+ readOnly: True
...
- volumes:
- - name: /etc/certs
- mount: /certs
- type: emptyDir
+ volume:
+ volume1:
+ name: /etc/certs
+ type: emptyDir
+
+configMap
+=========
+
+.. code-block:: yaml
+
+ service:
+ memcached:
+ container:
+ memcached:
+ volumes:
+ - name: volume1
+ mountPath: /volume
+ readOnly: True
+ ...
+ volume:
+ volume1:
+ type: config_map
+ item:
+ configMap1:
+ key: config.conf
+ path: config.conf
+ configMap2:
+ key: policy.json
+ path: policy.json
+
+To mount single configuration file instead of whole directory:
+
+.. code-block:: yaml
+
+ service:
+ memcached:
+ container:
+ memcached:
+ volumes:
+ - name: volume1
+ mountPath: /volume/config.conf
+ sub_path: config.conf
+
+Generating Jobs
+===============
+
+Example pillar:
+
+.. code-block:: yaml
+
+ kubernetes:
+ control:
+ job:
+ sleep:
+ job: sleep
+ restart_policy: Never
+ container:
+ sleep:
+ image: busybox
+ tag: latest
+ command:
+ - sleep
+ - "3600"
+
+Volumes and Variables can be used as the same way as during Deployment generation.
+
+Custom params:
+
+.. code-block:: yaml
+
+ kubernetes:
+ control:
+ job:
+ host_network: True
+ host_pid: True
+ container:
+ sleep:
+ privileged: True
+ node_selector:
+ key: node
+ value: one
+ image_pull_secretes: password
Documentation and Bugs
======================
diff --git a/other-requirements.txt b/bindep.txt
similarity index 100%
rename from other-requirements.txt
rename to bindep.txt
diff --git a/kubernetes/control/cluster.sls b/kubernetes/control/cluster.sls
index 8e3dd2d..f597c40 100644
--- a/kubernetes/control/cluster.sls
+++ b/kubernetes/control/cluster.sls
@@ -5,6 +5,26 @@
file.directory:
- makedirs: true
+{%- if control.job is defined %}
+
+{%- for job_name, job in control.job.iteritems() %}
+
+/srv/kubernetes/jobs/{{ job_name }}-job.yml:
+ file.managed:
+ - source: salt://kubernetes/files/job.yml
+ - user: root
+ - group: root
+ - template: jinja
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ job: {{ job|yaml }}
+
+{%- endfor %}
+
+{%- endif %}
+
{%- for service_name, service in control.service.iteritems() %}
{%- if service.enabled %}
@@ -23,7 +43,7 @@
{%- endif %}
-/srv/kubernetes/{{ service.cluster }}/{{ service_name }}-{{ service.kind }}.yml:
+/srv/kubernetes/{{ service.kind|lower }}/{{ service_name }}-{{ service.kind }}.yml:
file.managed:
- source: salt://kubernetes/files/rc.yml
- user: root
@@ -58,8 +78,7 @@
service: {{ service|yaml }}
{%- endif %}
-
-/srv/kubernetes/{{ service.cluster }}/{{ node_name }}-{{ service.kind }}.yml:
+/srv/kubernetes/{{ service.kind|lower }}/{{ node_name }}-{{ service.kind }}.yml:
file.managed:
- source: salt://kubernetes/files/rc.yml
- user: root
@@ -75,4 +94,60 @@
{%- endfor %}
-{%- endif %}
\ No newline at end of file
+{%- for configmap_name, configmap in control.get('configmap', {}).iteritems() %}
+{%- if configmap.enabled|default(True) %}
+
+{%- if configmap.pillar is defined %}
+{%- if control.config_type == "default" %}
+ {%- for service_name in configmap.pillar.keys() %}
+ {%- if pillar.get(service_name, {}).get('_support', {}).get('config', {}).get('enabled', False) %}
+
+ {%- set support_fragment_file = service_name+'/meta/config.yml' %}
+ {% macro load_support_file(pillar, grains) %}{% include support_fragment_file %}{% endmacro %}
+
+ {%- set service_config_files = load_support_file(configmap.pillar, configmap.get('grains', {}))|load_yaml %}
+ {%- for service_config_name, service_config in service_config_files.config.iteritems() %}
+
+/srv/kubernetes/configmap/{{ configmap_name }}/{{ service_config_name }}:
+ file.managed:
+ - source: {{ service_config.source }}
+ - user: root
+ - group: root
+ - template: {{ service_config.template }}
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ pillar: {{ configmap.pillar|yaml }}
+ grains: {{ configmap.get('grains', {}) }}
+
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+
+{%- else %}
+
+/srv/kubernetes/configmap/{{ configmap_name }}.yml:
+ file.managed:
+ - source: salt://kubernetes/files/configmap.yml
+ - user: root
+ - group: root
+ - template: jinja
+ - makedirs: true
+ - require:
+ - file: /srv/kubernetes
+ - defaults:
+ configmap_name: {{ configmap_name }}
+ configmap: {{ configmap|yaml }}
+ grains: {{ configmap.get('grains', {}) }}
+
+{%- endif %}
+
+{%- else %}
+{# TODO: configmap not using support between formulas #}
+{%- endif %}
+
+{%- endif %}
+{%- endfor %}
+
+{%- endif %}
diff --git a/kubernetes/files/calico/calico.conf b/kubernetes/files/calico/calico.conf
index dcd0c85..97810eb 100644
--- a/kubernetes/files/calico/calico.conf
+++ b/kubernetes/files/calico/calico.conf
@@ -6,5 +6,8 @@
"log_level": "info",
"ipam": {
"type": "calico-ipam"
+ },
+ "kubernetes": {
+ "kubeconfig": "/etc/kubernetes/kubelet.kubeconfig"
}
}
\ No newline at end of file
diff --git a/kubernetes/files/configmap.yml b/kubernetes/files/configmap.yml
new file mode 100644
index 0000000..d2469c0
--- /dev/null
+++ b/kubernetes/files/configmap.yml
@@ -0,0 +1,18 @@
+{%- from "kubernetes/map.jinja" import control with context %}
+{%- macro load_support_file(file, pillar, grains) %}{% include file %}{% endmacro %}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ configmap_name }}-{{ configmap.get('version', '1') }}
+ namespace: {{ configmap.get('namespace', 'default') }}
+data:
+ {%- for service_name in configmap.pillar.keys() %}
+ {%- if pillar.get(service_name, {}).get('_support', {}).get('config', {}).get('enabled', False) %}
+ {%- set support_fragment_file = service_name+'/meta/config.yml' %}
+ {%- set service_config_files = load_support_file(support_fragment_file, configmap.pillar, configmap.get('grains', {}))|load_yaml %}
+ {%- for service_config_name, service_config in service_config_files.config.iteritems() %}
+ {{ service_config_name }}: |
+ {{ load_support_file(service_config.source|replace('salt://', ''), configmap.pillar, configmap.get('grains', {}))|indent(4) }}
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
diff --git a/kubernetes/files/etcd/default b/kubernetes/files/etcd/default
deleted file mode 100644
index 78e57ea..0000000
--- a/kubernetes/files/etcd/default
+++ /dev/null
@@ -1,264 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-## etcd(1) daemon options
-## See "/usr/share/doc/etcd/Documentation/configuration.md.gz".
-
-### Member Flags
-
-##### -name
-## Human-readable name for this member.
-## default: host name returned by `hostname`.
-## This value is referenced as this node's own entries listed in the `-initial-cluster`
-## flag (Ex: `default=http://localhost:2380` or `default=http://localhost:2380,default=http://localhost:7001`).
-## This needs to match the key used in the flag if you're using [static boostrapping](clustering.md#static).
-# ETCD_NAME="hostname"
-ETCD_NAME="{{ master.etcd.name }}"
-
-##### -data-dir
-## Path to the data directory.
-# ETCD_DATA_DIR="/var/lib/etcd/default"
-ETCD_DATA_DIR="/var/lib/etcd/default"
-
-##### -wal-dir
-## Path to the dedicated wal directory. If this flag is set, etcd will write the
-## WAL files to the walDir rather than the dataDir. This allows a dedicated disk
-## to be used, and helps avoid io competition between logging and other IO operations.
-## default: ""
-# ETCD_WAL_DIR
-
-##### -snapshot-count
-## Number of committed transactions to trigger a snapshot to disk.
-## default: "10000"
-# ETCD_SNAPSHOT_COUNT="10000"
-
-##### -heartbeat-interval
-## Time (in milliseconds) of a heartbeat interval.
-## default: "100"
-# ETCD_HEARTBEAT_INTERVAL="100"
-
-##### -election-timeout
-## Time (in milliseconds) for an election to timeout.
-## See /usr/share/doc/etcd/Documentation/tuning.md
-## default: "1000"
-# ETCD_ELECTION_TIMEOUT="1000"
-
-##### -listen-peer-urls
-## List of URLs to listen on for peer traffic. This flag tells the etcd to accept
-## incoming requests from its peers on the specified scheme://IP:port combinations.
-## Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd
-## listens to the given port on all interfaces. If an IP address is given as
-## well as a port, etcd will listen on the given port and interface.
-## Multiple URLs may be used to specify a number of addresses and ports to listen on.
-## The etcd will respond to requests from any of the listed addresses and ports.
-## example: "http://10.0.0.1:2380"
-## invalid example: "http://example.com:2380" (domain name is invalid for binding)
-## default: "http://localhost:2380,http://localhost:7001"
-# ETCD_LISTEN_PEER_URLS="http://localhost:2380,http://localhost:7001"
-ETCD_LISTEN_PEER_URLS="http://{{ master.etcd.host }}:2380"
-
-##### -listen-client-urls
-## List of URLs to listen on for client traffic. This flag tells the etcd to accept
-## incoming requests from the clients on the specified scheme://IP:port combinations.
-## Scheme can be either http or https. If 0.0.0.0 is specified as the IP, etcd
-## listens to the given port on all interfaces. If an IP address is given as
-## well as a port, etcd will listen on the given port and interface.
-## Multiple URLs may be used to specify a number of addresses and ports to listen on.
-## The etcd will respond to requests from any of the listed addresses and ports.
-## (ADVERTISE_CLIENT_URLS is required when LISTEN_CLIENT_URLS is set explicitly).
-## example: "http://10.0.0.1:2379"
-## invalid example: "http://example.com:2379" (domain name is invalid for binding)
-## default: "http://localhost:2379,http://localhost:4001"
-# ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://localhost:4001"
-ETCD_LISTEN_CLIENT_URLS="{%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001"
-##### -max-snapshots
-## Maximum number of snapshot files to retain (0 is unlimited)
-## default: 5
-# ETCD_MAX_SNAPSHOTS="5"
-
-##### -max-wals
-## Maximum number of wal files to retain (0 is unlimited)
-## default: 5
-# ETCD_MAX_WALS="5"
-
-##### -cors
-## Comma-separated whitelist of origins for CORS (cross-origin resource sharing).
-## default: none
-# ETCD_CORS
-
-### Clustering Flags
-## For an explanation of the various ways to do cluster setup, see:
-## /usr/share/doc/etcd/Documentation/clustering.md.gz
-##
-## The command line parameters starting with -initial-cluster will be
-## ignored on subsequent runs of etcd as they are used only during initial
-## bootstrap process.
-
-##### -initial-advertise-peer-urls
-## List of this member's peer URLs to advertise to the rest of the cluster.
-## These addresses are used for communicating etcd data around the cluster.
-## At least one must be routable to all cluster members.
-## These URLs can contain domain names.
-## example: "http://example.com:2380, http://10.0.0.1:2380"
-## default: "http://localhost:2380,http://localhost:7001"
-# ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001"
-ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ master.etcd.host }}:2380"
-
-##### -initial-cluster
-## initial cluster configuration for bootstrapping.
-## The key is the value of the `-name` flag for each node provided.
-## The default uses `default` for the key because this is the default for the `-name` flag.
-## default: "default=http://localhost:2380,default=http://localhost:7001"
-# ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001"
-ETCD_INITIAL_CLUSTER="{% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %}"
-##### -initial-cluster-state
-## Initial cluster state ("new" or "existing"). Set to `new` for all members
-## present during initial static or DNS bootstrapping. If this option is set to
-## `existing`, etcd will attempt to join the existing cluster. If the wrong
-## value is set, etcd will attempt to start but fail safely.
-## default: "new"
-# ETCD_INITIAL_CLUSTER_STATE="existing"
-ETCD_INITIAL_CLUSTER_STATE="new"
-
-##### -initial-cluster-token
-## Initial cluster token for the etcd cluster during bootstrap.
-## If you are spinning up multiple clusters (or creating and destroying a
-## single cluster) with same configuration for testing purpose, it is highly
-## recommended that you specify a unique initial-cluster-token for the
-## different clusters.
-## default: "etcd-cluster"
-# ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
-ETCD_INITIAL_CLUSTER_TOKEN="{{ master.etcd.token }}"
-
-##### -advertise-client-urls
-## List of this member's client URLs to advertise to the rest of the cluster.
-## These URLs can contain domain names.
-## example: "http://example.com:2379, http://10.0.0.1:2379"
-## Be careful if you are advertising URLs such as http://localhost:2379 from a
-## cluster member and are using the proxy feature of etcd. This will cause loops,
-## because the proxy will be forwarding requests to itself until its resources
-## (memory, file descriptors) are eventually depleted.
-## default: "http://localhost:2379,http://localhost:4001"
-# ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://localhost:4001"
-ETCD_ADVERTISE_CLIENT_URLS="http://{{ master.etcd.host }}:4001"
-
-##### -discovery
-## Discovery URL used to bootstrap the cluster.
-## default: none
-# ETCD_DISCOVERY
-
-##### -discovery-srv
-## DNS srv domain used to bootstrap the cluster.
-## default: none
-# ETCD_DISCOVERY_SRV
-
-##### -discovery-fallback
-## Expected behavior ("exit" or "proxy") when discovery services fails.
-## default: "proxy"
-# ETCD_DISCOVERY_FALLBACK="proxy"
-
-##### -discovery-proxy
-## HTTP proxy to use for traffic to discovery service.
-## default: none
-# ETCD_DISCOVERY_PROXY
-
-### Proxy Flags
-
-##### -proxy
-## Proxy mode setting ("off", "readonly" or "on").
-## default: "off"
-# ETCD_PROXY="on"
-
-##### -proxy-failure-wait
-## Time (in milliseconds) an endpoint will be held in a failed state before being
-## reconsidered for proxied requests.
-## default: 5000
-# ETCD_PROXY_FAILURE_WAIT="5000"
-
-##### -proxy-refresh-interval
-## Time (in milliseconds) of the endpoints refresh interval.
-## default: 30000
-# ETCD_PROXY_REFRESH_INTERVAL="30000"
-
-##### -proxy-dial-timeout
-## Time (in milliseconds) for a dial to timeout or 0 to disable the timeout
-## default: 1000
-# ETCD_PROXY_DIAL_TIMEOUT="1000"
-
-##### -proxy-write-timeout
-## Time (in milliseconds) for a write to timeout or 0 to disable the timeout.
-## default: 5000
-# ETCD_PROXY_WRITE_TIMEOUT="5000"
-
-##### -proxy-read-timeout
-## Time (in milliseconds) for a read to timeout or 0 to disable the timeout.
-## Don't change this value if you use watches because they are using long polling requests.
-## default: 0
-# ETCD_PROXY_READ_TIMEOUT="0"
-
-### Security Flags
-
-##### -ca-file [DEPRECATED]
-## Path to the client server TLS CA file.
-## default: none
-# ETCD_CA_FILE=""
-
-##### -cert-file
-## Path to the client server TLS cert file.
-## default: none
-# ETCD_CERT_FILE=""
-
-##### -key-file
-## Path to the client server TLS key file.
-## default: none
-# ETCD_KEY_FILE=""
-
-##### -client-cert-auth
-## Enable client cert authentication.
-## default: false
-# ETCD_CLIENT_CERT_AUTH
-
-##### -trusted-ca-file
-## Path to the client server TLS trusted CA key file.
-## default: none
-# ETCD_TRUSTED_CA_FILE
-
-##### -peer-ca-file [DEPRECATED]
-## Path to the peer server TLS CA file. `-peer-ca-file ca.crt` could be replaced
-## by `-peer-trusted-ca-file ca.crt -peer-client-cert-auth` and etcd will perform the same.
-## default: none
-# ETCD_PEER_CA_FILE
-
-##### -peer-cert-file
-## Path to the peer server TLS cert file.
-## default: none
-# ETCD_PEER_CERT_FILE
-
-##### -peer-key-file
-## Path to the peer server TLS key file.
-## default: none
-# ETCD_PEER_KEY_FILE
-
-##### -peer-client-cert-auth
-## Enable peer client cert authentication.
-## default: false
-# ETCD_PEER_CLIENT_CERT_AUTH
-
-##### -peer-trusted-ca-file
-## Path to the peer server TLS trusted CA file.
-## default: none
-# ETCD_PEER_TRUSTED_CA_FILE
-
-### Logging Flags
-##### -debug
-## Drop the default log level to DEBUG for all subpackages.
-## default: false (INFO for all packages)
-# ETCD_DEBUG
-
-##### -log-package-levels
-## Set individual etcd subpackages to specific log levels.
-## An example being `etcdserver=WARNING,security=DEBUG`
-## default: none (INFO for all packages)
-# ETCD_LOG_PACKAGE_LEVELS
-
-
-#### Daemon parameters:
-# DAEMON_ARGS=""
diff --git a/kubernetes/files/job.yml b/kubernetes/files/job.yml
new file mode 100644
index 0000000..95103b9
--- /dev/null
+++ b/kubernetes/files/job.yml
@@ -0,0 +1,89 @@
+{% from "kubernetes/map.jinja" import control with context %}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ job.job }}-job
+ namespace: {{ job.get('namespace', 'default') }}
+spec:
+ template:
+ metadata:
+ spec:
+ {%- if job.host_network is defined %}
+ hostNetwork: True
+ {%- endif %}
+ {%- if job.host_pid is defined %}
+ hostPID: True
+ {%- endif %}
+ containers:
+ {%- for container_name, container in job.container.iteritems() %}
+ - name: {{ container_name }}
+ image: {% if container.registry is defined %}{{ container.registry }}/{%- endif %}{{ container.image }}{%- if container.tag is defined %}:{{ container.tag }}{%- endif %}
+ imagePullPolicy: {{ container.get('image_pull_policy', 'IfNotPresent') }}
+ {%- if container.privileged is defined %}
+ securityContext:
+ privileged: True
+ {%- endif %}
+ {%- if container.variables is defined %}
+ env:
+ {%- for variable in container.variables %}
+ - name: {{ variable.name }}
+ {%- if variable.field_path is defined %}
+ valueFrom:
+ fieldRef:
+ fieldPath: {{ variable.fieldPath }}
+ {%- else %}
+ value: {{ variable.value }}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+ {%- if container.command is defined %}
+ command:
+ {%- for command in container.command %}
+ - {{ command }}
+ {%- endfor %}
+ {%- endif %}
+ {%- if container.volumes is defined %}
+ volumeMounts:
+ {%- for volume in container.volumes %}
+ - name: {{ volume.name }}
+ mountPath: {{ volume.mount }}
+ readOnly: {{ volume.get('read_only', 'False') }}
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- if job.volume is defined %}
+ volumes:
+ {%- for volume_name, volume in job.volume.iteritems() %}
+ - name: {{ volume_name }}
+ {%- if volume.type == 'empty_dir' %}
+ emptyDir: {}
+ {%- elif volume.type == 'host_path' %}
+ hostPath:
+ path: {{ volume.path }}
+ {%- elif volume.type == 'glusterfs' %}
+ glusterfs:
+ endpoints: {{ volume.endpoints }}
+ path: {{ volume.path }}
+ readOnly: {{ volume.get('read_only', 'False') }}
+ {%- elif volume.type == 'config_map' %}
+ configMap:
+ name: {{ volume_name }}
+ items:
+ {%- for name, item in volume.item.iteritems() %}
+ - key: {{ item.key }}
+ path: {{ item.path }}
+ {%- endfor %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endif %}
+ restartPolicy: {{ job.restart_policy }}
+ {%- if job.node_selector is defined %}
+ nodeSelector:
+ {%- for selector in job.node_selector %}
+ {{ selector.key }}: {{ selector.value }}
+ {%- endfor %}
+ {%- endif %}
+ {%- if job.image_pull_secretes is defined %}
+ imagePullSecrets:
+ - name: {{ job.image_pull_secretes }}
+ {%- endif %}
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml b/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
index c2eba63..a4236ec 100644
--- a/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
+++ b/kubernetes/files/kube-addons/dashboard/dashboard-controller.yaml
@@ -3,10 +3,11 @@
metadata:
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterparts
- name: kubernetes-dashboard-v1.1.0-beta2
+ name: dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
+ version: v1.4.0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
@@ -17,10 +18,13 @@
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- name: kubernetes-dashboard
- image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0-beta2
+ image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
diff --git a/kubernetes/files/kube-addons/dns/skydns-rc.yaml b/kubernetes/files/kube-addons/dns/skydns-rc.yaml
index c7b7969..984bb5c 100644
--- a/kubernetes/files/kube-addons/dns/skydns-rc.yaml
+++ b/kubernetes/files/kube-addons/dns/skydns-rc.yaml
@@ -2,65 +2,79 @@
apiVersion: v1
kind: ReplicationController
metadata:
- name: kube-dns-v9
+ name: dns
namespace: kube-system
labels:
k8s-app: kube-dns
- version: v9
+ version: v20
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ master.addons.dns.replicas }}
selector:
k8s-app: kube-dns
- version: v9
+ version: v20
template:
metadata:
labels:
k8s-app: kube-dns
- version: v9
- kubernetes.io/cluster-service: "true"
+ version: v20
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- - name: etcd
- image: gcr.io/google_containers/etcd:2.0.9
+ - name: kubedns
+ image: gcr.io/google_containers/kubedns-amd64:1.8
resources:
limits:
+ memory: 170Mi
+ requests:
cpu: 100m
- memory: 50Mi
- command:
- - /usr/local/bin/etcd
- - -data-dir
- - /var/etcd/data
- - -listen-client-urls
- - http://127.0.0.1:2379,http://127.0.0.1:4001
- - -advertise-client-urls
- - http://127.0.0.1:2379,http://127.0.0.1:4001
- - -initial-cluster-token
- - skydns-etcd
- volumeMounts:
- - name: etcd-storage
- mountPath: /var/etcd/data
- - name: kube2sky
- image: gcr.io/google_containers/kube2sky:1.11
- resources:
- limits:
- cpu: 100m
- memory: 50Mi
+ memory: 70Mi
+ livenessProbe:
+ httpGet:
+ path: /healthz-kubedns
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 8081
+ scheme: HTTP
+ initialDelaySeconds: 3
+ timeoutSeconds: 5
args:
- # command = "/kube2sky"
- - -domain={{ master.addons.dns.domain }}
- - name: skydns
- image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
- resources:
- limits:
- cpu: 100m
- memory: 50Mi
+ # command = "/kube-dns"
+ - --domain={{ master.addons.dns.domain }}
+ - --dns-port=10053
+ - --kube-master-url=http://{{ master.apiserver.insecure_address }}:8080
+ ports:
+ - containerPort: 10053
+ name: dns-local
+ protocol: UDP
+ - containerPort: 10053
+ name: dns-tcp-local
+ protocol: TCP
+ - name: dnsmasq
+ image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
+ livenessProbe:
+ httpGet:
+ path: /healthz-dnsmasq
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 60
+ timeoutSeconds: 5
+ successThreshold: 1
+ failureThreshold: 5
args:
- # command = "/skydns"
- - -machines=http://127.0.0.1:4001
- - -addr=0.0.0.0:53
- - -ns-rotate=false
- - -domain={{ master.addons.dns.domain }}.
+ - --cache-size=1000
+ - --no-resolv
+ - --server=127.0.0.1#10053
+ - --log-facility=-
ports:
- containerPort: 53
name: dns
@@ -68,33 +82,22 @@
- containerPort: 53
name: dns-tcp
protocol: TCP
- livenessProbe:
- httpGet:
- path: /healthz
- port: 8080
- scheme: HTTP
- initialDelaySeconds: 30
- timeoutSeconds: 5
- readinessProbe:
- httpGet:
- path: /healthz
- port: 8080
- scheme: HTTP
- initialDelaySeconds: 1
- timeoutSeconds: 5
- name: healthz
- image: gcr.io/google_containers/exechealthz:1.0
+ image: gcr.io/google_containers/exechealthz-amd64:1.2
resources:
limits:
+ memory: 50Mi
+ requests:
cpu: 10m
- memory: 20Mi
+ memory: 50Mi
args:
- - -cmd=nslookup kubernetes.default.svc.{{ master.addons.dns.domain }} localhost >/dev/null
- - -port=8080
+ - --cmd=nslookup kubernetes.default.svc.{{ master.addons.dns.domain }} 127.0.0.1 >/dev/null
+ - --url=/healthz-dnsmasq
+ - --cmd=nslookup kubernetes.default.svc.{{ master.addons.dns.domain }} 127.0.0.1:10053 >/dev/null
+ - --url=/healthz-kubedns
+ - --port=8080
+ - --quiet
ports:
- containerPort: 8080
protocol: TCP
- volumes:
- - name: etcd-storage
- emptyDir: {}
dnsPolicy: Default # Don't use cluster DNS.
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml b/kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
deleted file mode 100644
index f9f3749..0000000
--- a/kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-apiVersion: v1
-kind: Service
-metadata:
- name: kube-ui-address
- namespace: kube-system
- labels:
- k8s-app: kube-ui
- kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "KubeUI"
-spec:
- selector:
- k8s-app: kube-ui
- deprecatedPublicIPs: ["{{ master.addons.ui.public_ip }}"]
- type: LoadBalancer
- ports:
- - port: 80
- targetPort: 8080
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml b/kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
deleted file mode 100644
index 9b22ebc..0000000
--- a/kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-apiVersion: v1
-kind: Endpoints
-metadata:
- name: kube-ui
- namespace: kube-system
- labels:
- k8s-app: kube-ui
- kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "KubeUI"
-subsets:
- - addresses:
- - ip: {{ master.addons.ui.public_ip }}
-
- ports:
- - port: 8080
- protocol: TCP
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml b/kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
deleted file mode 100644
index 9c43389..0000000
--- a/kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-apiVersion: v1
-kind: ReplicationController
-metadata:
- name: kube-ui-v4
- namespace: kube-system
- labels:
- k8s-app: kube-ui
- version: v4
- kubernetes.io/cluster-service: "true"
-spec:
- replicas: 1
- selector:
- k8s-app: kube-ui
- version: v4
- template:
- metadata:
- labels:
- k8s-app: kube-ui
- version: v4
- kubernetes.io/cluster-service: "true"
- spec:
- containers:
- - name: kube-ui
- image: gcr.io/google_containers/kube-ui:v4
- resources:
- limits:
- cpu: 100m
- memory: 50Mi
- ports:
- - containerPort: 8080
- livenessProbe:
- httpGet:
- path: /
- port: 8080
- initialDelaySeconds: 30
- timeoutSeconds: 5
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml b/kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
deleted file mode 100644
index 876be68..0000000
--- a/kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: kube-ui
- namespace: kube-system
- labels:
- k8s-app: kube-ui
- kubernetes.io/cluster-service: "true"
- kubernetes.io/name: "KubeUI"
-spec:
- ports:
- - port: 80
- targetPort: 8080
\ No newline at end of file
diff --git a/kubernetes/files/kube-addons/registry/registry-rc.yaml b/kubernetes/files/kube-addons/registry/registry-rc.yaml
new file mode 100644
index 0000000..9e1a1de
--- /dev/null
+++ b/kubernetes/files/kube-addons/registry/registry-rc.yaml
@@ -0,0 +1,59 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: registry
+ namespace: kube-system
+ labels:
+ k8s-app: kube-registry
+ version: v0
+ kubernetes.io/cluster-service: "true"
+spec:
+ replicas: 1
+ selector:
+ k8s-app: kube-registry
+ version: v0
+ template:
+ metadata:
+ labels:
+ k8s-app: kube-registry
+ version: v0
+ kubernetes.io/cluster-service: "true"
+ spec:
+ containers:
+ - name: registry
+ image: registry:2.5.1
+ resources:
+ limits:
+ cpu: 100m
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ env:
+ - name: REGISTRY_HTTP_ADDR
+ value: {{ master.addons.registry.bind.get('host', '0.0.0.0'}}:{{ master.addons.registry.bind.get('port', '5000'}}
+ - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
+ value: /var/lib/registry
+ ports:
+ - containerPort: {{ master.addons.registry.bind.get('port', '5000') }}
+ name: registry
+ protocol: TCP
+ {%- if master.addons.registry.volume is defined %}
+ volumeMounts:
+ - name: image-store
+ mountPath: /var/lib/registry
+ volumes:
+ - name: image-store
+ {%- if master.addons.registry.volume.get('type', 'emptyDir') %}
+ emptyDir: {}
+ {%- elif master.addons.registry.volume.type == 'hostPath' %}
+ hostPath:
+ path: {{ master.addons.registry.volume.path }}
+ {%- elif master.addons.registry.volume.type == 'glusterfs' %}
+ glusterfs:
+ endpoints: {{ master.addons.registry.volume.endpoints }}
+ path: {{ master.addons.registry.volume.path }}
+ readOnly: {{ master.addons.registry.volume.read_only }}
+ {%- endif %}
+ {%- endif %}
diff --git a/kubernetes/files/kube-addons/registry/registry.svc b/kubernetes/files/kube-addons/registry/registry.svc
new file mode 100644
index 0000000..708a1ba
--- /dev/null
+++ b/kubernetes/files/kube-addons/registry/registry.svc
@@ -0,0 +1,17 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+apiVersion: v1
+kind: Service
+metadata:
+ name: kube-registry
+ namespace: kube-system
+ labels:
+ k8s-app: kube-registry
+ kubernetes.io/cluster-service: "true"
+ kubernetes.io/name: "KubeRegistry"
+spec:
+ selector:
+ k8s-app: kube-registry
+ ports:
+ - name: registry
+ port: {{ master.addons.registry.bind.get('port', '5000') }}
+ protocol: TCP
\ No newline at end of file
diff --git a/kubernetes/files/kube-proxy/proxy.kubeconfig b/kubernetes/files/kube-proxy/proxy.kubeconfig
index 68e231c..4fb09d6 100644
--- a/kubernetes/files/kube-proxy/proxy.kubeconfig
+++ b/kubernetes/files/kube-proxy/proxy.kubeconfig
@@ -11,7 +11,7 @@
name: proxy-to-cluster.local
clusters:
- cluster:
- certificate-authority: /etc/kubernetes/ssl/ca-{{ pool.ca }}.crt
+ certificate-authority: /etc/kubernetes/ssl/kubelet-client.crt
# server: https://{{ pool.apiserver.host }}:443
name: cluster.local
users:
diff --git a/kubernetes/files/kubelet/kubelet.kubeconfig b/kubernetes/files/kubelet/kubelet.kubeconfig
index d2375a6..951288a 100644
--- a/kubernetes/files/kubelet/kubelet.kubeconfig
+++ b/kubernetes/files/kubelet/kubelet.kubeconfig
@@ -6,8 +6,8 @@
preferences: {}
clusters:
- cluster:
- certificate-authority: /etc/kubernetes/ssl/ca-{{ pool.ca }}.crt
-# server: https://{{ pool.apiserver.host }}:443
+ certificate-authority: /etc/kubernetes/ssl/kubelet-client.crt
+ server: https://{{ pool.apiserver.host }}:443
name: cluster.local
contexts:
- context:
diff --git a/kubernetes/files/manifest/etcd.manifest b/kubernetes/files/manifest/etcd.manifest
deleted file mode 100644
index b3bca45..0000000
--- a/kubernetes/files/manifest/etcd.manifest
+++ /dev/null
@@ -1,65 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{
-"apiVersion": "v1",
-"kind": "Pod",
-"metadata": {
- "name":"etcd-server",
- "namespace": "kube-system"
-},
-"spec":{
-"hostNetwork": true,
-"containers":[
- {
- "name": "etcd-container",
- "image": "{{ master.etcd.registry }}/etcd:{{ master.etcd.version }}",
- "resources": {
- "requests": {
- "cpu": "200m" }
- },
- "command": [
- "/bin/sh",
- "-c",
- "/usr/local/bin/etcd --name {{ master.etcd.name }} --initial-cluster-state new --initial-advertise-peer-urls http://{{ master.etcd.host }}:2380 --listen-peer-urls http://{{ master.etcd.host }}:2380 --advertise-client-urls http://{{ master.etcd.host }}:4001 --listen-client-urls {%- if master.etcd.host == '127.0.0.1' %}{% for member in master.etcd.members %} http://{{ member.host }}:4001{% endfor %}{% else %} http://{{ master.etcd.host }}:4001{% endif %},http://127.0.0.1:4001 --initial-cluster {% for member in master.etcd.members %}{{ member.name }}={%- if master.etcd.host == '127.0.0.1' %}http://127.0.0.1:2380{% else %}http://{{ member.host }}:2380{% if not loop.last %},{% endif %}{% endif %}{% endfor %} --initial-cluster-token {{ master.etcd.token }} --data-dir /var/lib/etcd/default 1>>/var/log/etcd.log 2>&1"
- ],
- "livenessProbe": {
- "httpGet": {
- "host": "127.0.0.1",
- "port": 4001,
- "path": "/health"
- },
- "initialDelaySeconds": 15,
- "timeoutSeconds": 15
- },
- "ports":[
- { "name": "serverport",
- "containerPort": 2380,
- "hostPort": 2380
- },{
- "name": "clientport",
- "containerPort": 4001,
- "hostPort": 4001
- }
- ],
- "volumeMounts": [
- {"name": "varetcd",
- "mountPath": "/var/lib/etcd",
- "readOnly": false
- },
- {"name": "varlogetcd",
- "mountPath": "/var/log/etcd.log",
- "readOnly": false
- }
- ]
- }
-],
-"volumes":[
- { "name": "varetcd",
- "hostPath": {
- "path": "/var/lib/etcd"}
- },
- { "name": "varlogetcd",
- "hostPath": {
- "path": "/var/log/etcd.log"}
- }
-]
-}}
diff --git a/kubernetes/files/manifest/kube-apiserver.manifest b/kubernetes/files/manifest/kube-apiserver.manifest
index db870d8..09835a2 100644
--- a/kubernetes/files/manifest/kube-apiserver.manifest
+++ b/kubernetes/files/manifest/kube-apiserver.manifest
@@ -11,11 +11,10 @@
terminationGracePeriodSeconds: 30
containers:
- name: kube-apiserver
- image: {{ master.registry.host }}/kube-master:{{ master.version }}
+ image: {{ master.registry.host }}/hyperkube:{{ master.version }}
command:
- - /bin/sh
- - -c
- - kube-apiserver
+ - /hyperkube
+ - apiserver
--insecure-bind-address={{ master.apiserver.insecure_address }}
--etcd-servers={% for member in master.etcd.members %}http://{{ member.host }}:4001{% if not loop.last %},{% endif %}{% endfor %}
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
diff --git a/kubernetes/files/manifest/kube-controller-manager.manifest b/kubernetes/files/manifest/kube-controller-manager.manifest
index 3cbbbd2..873d73e 100644
--- a/kubernetes/files/manifest/kube-controller-manager.manifest
+++ b/kubernetes/files/manifest/kube-controller-manager.manifest
@@ -11,11 +11,10 @@
terminationGracePeriodSeconds: 30
containers:
- name: kube-controller-manager
- image: {{ master.registry.host }}/kube-master:{{ master.version }}
+ image: {{ master.registry.host }}/hyperkube:{{ master.version }}
command:
- - /bin/sh
- - -c
- - kube-controller-manager
+ - /hyperkube
+ - controller-manager
--master={{ master.apiserver.insecure_address }}:8080
--cluster-name=kubernetes
--service-account-private-key-file=/etc/kubernetes/ssl/kubernetes-server.key
diff --git a/kubernetes/files/manifest/kube-proxy.manifest.pool b/kubernetes/files/manifest/kube-proxy.manifest.pool
index 359d9f7..3cf1c84 100644
--- a/kubernetes/files/manifest/kube-proxy.manifest.pool
+++ b/kubernetes/files/manifest/kube-proxy.manifest.pool
@@ -8,14 +8,13 @@
hostNetwork: true
containers:
- name: kube-proxy
- image: {{ pool.registry.host }}/kube-pool:{{ pool.version }}
+ image: {{ pool.registry.host }}/hyperkube:{{ pool.version }}
resources:
requests:
cpu: 200m
command:
- - /bin/sh
- - -c
- - kube-proxy
+ - /hyperkube
+ - proxy
--logtostderr=true
--v=2
--kubeconfig=/etc/kubernetes/proxy.kubeconfig
diff --git a/kubernetes/files/manifest/kube-scheduler.manifest b/kubernetes/files/manifest/kube-scheduler.manifest
index a0977a1..e2d5ba0 100644
--- a/kubernetes/files/manifest/kube-scheduler.manifest
+++ b/kubernetes/files/manifest/kube-scheduler.manifest
@@ -12,12 +12,11 @@
terminationGracePeriodSeconds: 30
containers:
- name: kube-scheduler
- image: {{ master.registry.host }}/kube-master:{{ master.version }}
+ image: {{ master.registry.host }}/hyperkube:{{ master.version }}
imagePullPolicy: IfNotPresent
command:
- - /bin/sh
- - -c
- - kube-scheduler
+ - hyperkube
+ - scheduler
--master={{ master.apiserver.insecure_address }}:8080
--v=2
--leader-elect=true
diff --git a/kubernetes/files/rc.yml b/kubernetes/files/rc.yml
index 1abd02d..d2cad43 100644
--- a/kubernetes/files/rc.yml
+++ b/kubernetes/files/rc.yml
@@ -8,14 +8,86 @@
app: {{ service.service }}-{{ service.role }}
spec:
replicas: {{ service.replicas }}
+ {%- if service.kind == 'PetSet' %}
+ serviceName: {{ service.service_name }}
+ {%- endif %}
template:
metadata:
labels:
app: {{ service.service }}-{{ service.role }}
- {%- if service.hostname is defined %}
annotations:
+ {%- if service.hostname is defined %}
pod.beta.kubernetes.io/hostname: {{ service.hostname }}
{%- endif %}
+ {%- if service.init_containers is defined %}
+ pod.alpha.kubernetes.io/init-containers: '[
+ {%- for container in service.init_containers %}
+ {
+ "name": "{{ container.name }}",
+ "image": "{% if container.registry is defined %}{{ container.registry }}/{%- endif %}{{ container.image }}{%- if container.tag is defined %}:{{ container.tag }}{%- endif %}",
+ "command": [{%- for command in container.command %}"{{ command }}"{% if not loop.last %},{% endif %}{%- endfor %}]
+ {%- if container.volumes is defined -%}
+ ,
+ "volumeMounts": [
+ {%- for volume in container.volumes %}
+ {
+ "name": "{{ volume.name }}",
+ {%- if volume.sub_path is defined %}
+ "subPath": "{{ volume.sub_path }}",
+ {%- endif %}
+ "mountPath": "{{ volume.mount }}"
+ }
+ {%- if not loop.last %},{% endif %}{%- endfor %}
+ ]
+ {%- endif %}
+ }
+ {%- if not loop.last %},{% endif %}{% endfor %}
+ ]'
+ {%- endif %}
+ {%- if service.affinity is defined %}
+ scheduler.alpha.kubernetes.io/affinity: >
+ {
+ {%- for affinity_name, affinity in service.affinity.iteritems() %}
+ "{{ affinity.name }}": {
+ {%- for expression_name, expression in affinity.expression.iteritems() %}
+ {%- if expression.name == 'matchExpressions' %}
+ "{{ affinity.get('type','required') }}DuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {%- for selector in expression.selectors %}
+ {
+ "key": "{{ selector.key }}",
+ "operator": "{{ selector.operator }}",
+ "values": [{%- for value in selector['values'] %}"{{ value }}"{%- if not loop.last %},{% endif %}{%- endfor %}]
+ }{%- if not loop.last %},{% endif %}
+ {% endfor %}
+ ]
+ }
+ ]
+ }
+ {%- elif expression.name == 'labelSelector' %}
+ "{{ affinity.get('type','required') }}DuringSchedulingIgnoredDuringExecution": [
+ {
+ "labelSelector": {
+ "matchLabels": {
+ {%- for selector in expression.selectors %}
+ "{{ selector.key }}": "{{ selector.value }}"
+ {%- if not loop.last %},{% endif %}{%- endfor %}
+ }
+ },
+ {%- if affinity.name == 'podAntiAffinity' or affinity.name == 'podAffinity' %}
+ "topologyKey": "{{ affinity.topology_key }}"
+ {%- endif %}
+ }
+ ]
+ {%- endif %}
+ {%- endfor %}
+ {%- if not loop.last %}},{% endif %}
+ {%- endfor %}
+ }
+ }
+ {%- endif %}
spec:
{%- if service.hostNetwork is defined %}
hostNetwork: True
@@ -27,7 +99,7 @@
{%- for container_name, container in service.container.iteritems() %}
- name: {{ container_name }}
image: {% if container.registry is defined %}{{ container.registry }}/{%- endif %}{{ container.image }}{%- if container.tag is defined %}:{{ container.tag }}{%- endif %}
- imagePullPolicy: IfNotPresent
+ imagePullPolicy: {{ container.get('image_pull_policy','IfNotPresent') }}
{%- if container.privileged is defined %}
securityContext:
privileged: True
@@ -63,7 +135,10 @@
{%- for volume in container.volumes %}
- name: {{ volume.name }}
mountPath: {{ volume.mount }}
- readOnly: {{ volume.read_only }}
+ readOnly: {{ volume.get('read_only', 'False') }}
+ {%- if volume.sub_path is defined %}
+ subPath: {{ volume.sub_path }}
+ {%- endif %}
{%- endfor %}
{%- endif %}
{%- if container.liveness_probe is defined %}
@@ -91,7 +166,7 @@
{%- elif container.readiness_probe.type == 'exec' %}
exec:
command:
- {%- for command in container.liveness_probe.command %}
+ {%- for command in container.readiness_probe.command %}
- {{ command }}
{%- endfor %}
{%- endif %}
@@ -113,6 +188,14 @@
endpoints: {{ volume.endpoints }}
path: {{ volume.path }}
readOnly: {{ volume.read_only }}
+ {%- elif volume.type == 'config_map' %}
+ configMap:
+ name: {{ volume_name }}-{{ volume.get('version', '1') }}
+ items:
+ {%- for name, item in volume.item.iteritems() %}
+ - key: {{ item.key }}
+ path: {{ item.path }}
+ {%- endfor %}
{%- endif %}
{%- endfor %}
{%- endif %}
@@ -125,4 +208,4 @@
{%- if service.image_pull_secretes is defined %}
imagePullSecrets:
- name: {{ service.image_pull_secretes }}
- {%- endif %}
\ No newline at end of file
+ {%- endif %}
diff --git a/kubernetes/files/systemd/kube-apiserver.service b/kubernetes/files/systemd/kube-apiserver.service
new file mode 100644
index 0000000..bfe7bd8
--- /dev/null
+++ b/kubernetes/files/systemd/kube-apiserver.service
@@ -0,0 +1,30 @@
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://github.com/kubernetes/kubernetes
+Documentation=man:kube-apiserver
+After=network.target
+After=etcd.service
+Wants=etcd.service
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/hyperkube \
+ apiserver \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBE_ALLOW_PRIV \
+ $KUBE_API_ADDRESS \
+ $KUBE_API_PORT \
+ $KUBELET_PORT \
+ $KUBE_ETCD_SERVERS \
+ $KUBE_SERVICE_ADDRESSES \
+ $KUBE_ADMISSION_CONTROL \
+ $DAEMON_ARGS
+Restart=on-failure
+Type=notify
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/kubernetes/files/systemd/kube-controller-manager.service b/kubernetes/files/systemd/kube-controller-manager.service
new file mode 100644
index 0000000..19c56fb
--- /dev/null
+++ b/kubernetes/files/systemd/kube-controller-manager.service
@@ -0,0 +1,21 @@
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://github.com/kubernetes/kubernetes
+Documentation=man:kube-controller-manager
+After=network.target
+
+[Service]
+Environment=KUBE_MASTER=--master=127.0.0.1:8080
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/hyperkube \
+ controller-manager \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $DAEMON_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/kubernetes/files/systemd/kube-proxy.service b/kubernetes/files/systemd/kube-proxy.service
new file mode 100644
index 0000000..0620223
--- /dev/null
+++ b/kubernetes/files/systemd/kube-proxy.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=Kubernetes Kube-Proxy Server
+Documentation=https://github.com/kubernetes/kubernetes
+Documentation=man:kube-proxy
+After=network.target
+
+[Service]
+Environment=KUBE_MASTER=--master=127.0.0.1:8080
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/hyperkube \
+ proxy \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBE_MASTER \
+ $DAEMON_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/kubernetes/files/systemd/kube-scheduler.service b/kubernetes/files/systemd/kube-scheduler.service
new file mode 100644
index 0000000..d29f9c9
--- /dev/null
+++ b/kubernetes/files/systemd/kube-scheduler.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=Kubernetes Scheduler Plugin
+Documentation=https://github.com/kubernetes/kubernetes
+Documentation=man:kube-scheduler
+After=network.target
+
+[Service]
+Environment=KUBE_MASTER=--master=127.0.0.1:8080
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/hyperkube \
+ scheduler \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBE_MASTER \
+ $DAEMON_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/kubernetes/files/systemd/kubelet.service b/kubernetes/files/systemd/kubelet.service
new file mode 100644
index 0000000..5d5f7dc
--- /dev/null
+++ b/kubernetes/files/systemd/kubelet.service
@@ -0,0 +1,30 @@
+[Unit]
+Description=Kubernetes Kubelet Server
+Documentation=https://github.com/kubernetes/kubernetes
+Documentation=man:kubelet
+After=network.target
+After=docker.service
+Requires=docker.service
+Conflicts=cadvisor.service
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/default/%p
+User=root
+ExecStart=/usr/bin/hyperkube \
+ kubelet \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBE_ALLOW_PRIV \
+ $KUBELET_ADDRESS \
+ $KUBELET_PORT \
+ $KUBELET_HOSTNAME \
+ $KUBELET_API_SERVER \
+ $DOCKER_ENDPOINT \
+ $CADVISOR_PORT \
+ $DAEMON_ARGS
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+Alias=cadvisor.service
\ No newline at end of file
diff --git a/kubernetes/map.jinja b/kubernetes/map.jinja
index ac5d008..f36f9e3 100644
--- a/kubernetes/map.jinja
+++ b/kubernetes/map.jinja
@@ -1,6 +1,6 @@
{% set common = salt['grains.filter_by']({
'Debian': {
- 'pkgs': ['curl', 'git', 'apt-transport-https', 'python-apt', 'nfs-common', 'socat', 'netcat-traditional', 'openssl', 'kubernetes-client', 'kubernetes-node', 'python-etcd'],
+ 'pkgs': ['curl', 'git', 'apt-transport-https', 'python-apt', 'nfs-common', 'socat', 'netcat-traditional', 'openssl'],
'services': [],
},
'RedHat': {
@@ -11,7 +11,7 @@
{% set master = salt['grains.filter_by']({
'Debian': {
- 'pkgs': ['kubernetes-master'],
+ 'pkgs': [],
'services': ['kube-apiserver','kube-scheduler','kube-controller-manager'],
},
'RedHat': {
@@ -34,9 +34,11 @@
{% set control = salt['grains.filter_by']({
'Debian': {
'service': {},
+ 'config_type': "default",
},
'RedHat': {
'service': {},
+ 'config_type': "default",
},
}, merge=salt['pillar.get']('kubernetes:control')) %}
diff --git a/kubernetes/master/calico.sls b/kubernetes/master/calico.sls
index e2833b4..ec20328 100644
--- a/kubernetes/master/calico.sls
+++ b/kubernetes/master/calico.sls
@@ -11,25 +11,25 @@
- dir_mode: 755
- template: jinja
-/etc/systemd/system/calico-node.service:
- file.managed:
- - source: salt://kubernetes/files/calico/calico-node.service
- - user: root
- - group: root
+# /etc/systemd/system/calico-node.service:
+# file.managed:
+# - source: salt://kubernetes/files/calico/calico-node.service
+# - user: root
+# - group: root
/usr/bin/calicoctl:
file.managed:
- - source: https://github.com/projectcalico/calico-containers/releases/download/{{ master.network.version }}/calicoctl
+ - source: {{ master.network.get('source', 'https://github.com/projectcalico/calico-containers/releases/download/') }}{{ master.network.version }}/calicoctl
- source_hash: md5={{ master.network.hash }}
- mode: 751
- user: root
- group: root
-calico_node:
- service.running:
- - name: calico-node
- - enable: True
- - watch:
- - file: /etc/systemd/system/calico-node.service
+# calico_node:
+# service.running:
+# - name: calico-node
+# - enable: True
+# - watch:
+# - file: /etc/systemd/system/calico-node.service
{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index 4526ed7..2d54236 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -92,6 +92,30 @@
- mode: 644
- contents: DAEMON_ARGS=" --master={{ master.apiserver.insecure_address }}:8080 --v=2 --leader-elect=true"
+/etc/systemd/system/kube-apiserver.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kube-apiserver.service
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/systemd/system/kube-scheduler.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kube-scheduler.service
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/systemd/system/kube-controller-manager.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kube-controller-manager.service
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
master_services:
service.running:
- names: {{ master.services }}
@@ -105,9 +129,18 @@
{%- if not pillar.kubernetes.pool is defined %}
-/etc/default/kubelet:
+/usr/bin/hyperkube:
file.managed:
- - source: salt://kubernetes/files/kubelet/default.master
+ - source: {{ master.hyperkube.get('source', 'http://apt.tcpcloud.eu/kubernetes/bin/') }}{{ master.version }}/hyperkube
+ - source_hash: md5={{ master.hyperkube.hash }}
+ - mode: 751
+ - makedirs: true
+ - user: root
+ - group: root
+
+/etc/systemd/system/kubelet.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kubelet.service
- template: jinja
- user: root
- group: root
@@ -116,6 +149,14 @@
/etc/kubernetes/config:
file.absent
+/etc/default/kubelet:
+ file.managed:
+ - source: salt://kubernetes/files/kubelet/default.master
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
kubelet_service:
service.running:
- name: kubelet
diff --git a/kubernetes/master/etcd.sls b/kubernetes/master/etcd.sls
deleted file mode 100644
index 3e297e2..0000000
--- a/kubernetes/master/etcd.sls
+++ /dev/null
@@ -1,45 +0,0 @@
-{%- from "kubernetes/map.jinja" import master with context %}
-{%- if master.enabled %}
-
-{%- if master.get('container', 'true') %}
-
-/var/log/etcd.log:
- file.managed:
- - user: root
- - group: root
- - mode: 644
-
-/etc/kubernetes/manifests/etcd.manifest:
- file.managed:
- - source: salt://kubernetes/files/manifest/etcd.manifest
- - template: jinja
- - user: root
- - group: root
- - mode: 644
- - makedirs: true
- - dir_mode: 755
-
-{%- else %}
-
-etcd_pkg:
- pkg.installed:
- - name: etcd
-
-/etc/default/etcd:
- file.managed:
- - source: salt://kubernetes/files/etcd/default
- - template: jinja
- - user: root
- - group: root
- - mode: 644
-
-etcd_service:
- service.running:
- - name: etcd
- - enable: True
- - watch:
- - file: /etc/default/etcd
-
-{%- endif %}
-
-{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/master/init.sls b/kubernetes/master/init.sls
index cd7a77c..cff687f 100644
--- a/kubernetes/master/init.sls
+++ b/kubernetes/master/init.sls
@@ -1,7 +1,6 @@
{%- from "kubernetes/map.jinja" import master with context %}
include:
- kubernetes.master.service
-- kubernetes.master.etcd
- kubernetes.master.kube-addons
{%- if master.network.engine == "opencontrail" %}
- kubernetes.master.opencontrail-network-manager
@@ -18,3 +17,4 @@
- kubernetes.master.glusterfs
{%- endif %}
- kubernetes.master.controller
+- kubernetes.master.setup
\ No newline at end of file
diff --git a/kubernetes/master/kube-addons.sls b/kubernetes/master/kube-addons.sls
index b5e04ff..6dc809c 100644
--- a/kubernetes/master/kube-addons.sls
+++ b/kubernetes/master/kube-addons.sls
@@ -28,43 +28,7 @@
{% endif %}
-{%- if master.addons.ui.enabled %}
-
-{%- if master.version == "v1.1.1" %}
-
-/etc/kubernetes/addons/kube-ui/kube-ui-svc.yaml:
- file.managed:
- - source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-svc.yaml
- - template: jinja
- - group: root
- - dir_mode: 755
- - makedirs: True
-
-/etc/kubernetes/addons/kube-ui/kube-ui-rc.yaml:
- file.managed:
- - source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-rc.yaml
- - template: jinja
- - group: root
- - dir_mode: 755
- - makedirs: True
-
-/etc/kubernetes/addons/kube-ui/kube-ui-address.yaml:
- file.managed:
- - source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-address.yaml
- - template: jinja
- - group: root
- - dir_mode: 755
- - makedirs: True
-
-/etc/kubernetes/addons/kube-ui/kube-ui-endpoint.yaml:
- file.managed:
- - source: salt://kubernetes/files/kube-addons/kube-ui/kube-ui-endpoint.yaml
- - template: jinja
- - group: root
- - dir_mode: 755
- - makedirs: True
-
-{% endif %}
+{%- if master.addons.dashboard.enabled %}
/etc/kubernetes/addons/dashboard/dashboard-service.yaml:
file.managed:
@@ -155,4 +119,4 @@
{% endif %}
-{% endif %}
\ No newline at end of file
+{% endif %}
diff --git a/kubernetes/master/service.sls b/kubernetes/master/service.sls
index b3ed467..46ada4d 100644
--- a/kubernetes/master/service.sls
+++ b/kubernetes/master/service.sls
@@ -5,8 +5,4 @@
include:
- kubernetes._common
-kubernetes_master_pkgs:
- pkg.installed:
- - names: {{ master.pkgs }}
-
{%- endif %}
diff --git a/kubernetes/master/setup.sls b/kubernetes/master/setup.sls
new file mode 100644
index 0000000..72b378b
--- /dev/null
+++ b/kubernetes/master/setup.sls
@@ -0,0 +1,15 @@
+{%- from "kubernetes/map.jinja" import master with context %}
+{%- if master.enabled %}
+
+{%- for addon_name, addon in master.addons.iteritems() %}
+{%- if addon.enabled %}
+
+kubernetes_addons_{{ addon_name }}:
+ cmd.run:
+ - name: |
+ hyperkube kubectl create -f /etc/kubernetes/addons/{{ addon_name }}
+ - unless: "hyperkube kubectl get rc {{ addon.get('name', addon_name) }} --namespace=kube-system"
+
+{%- endif %}
+{%- endfor %}
+{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/pool/calico.sls b/kubernetes/pool/calico.sls
index adcfb4d..864b48e 100644
--- a/kubernetes/pool/calico.sls
+++ b/kubernetes/pool/calico.sls
@@ -3,7 +3,7 @@
/usr/bin/calicoctl:
file.managed:
- - source: https://github.com/projectcalico/calico-containers/releases/download/{{ pool.network.version }}/calicoctl
+ - source: {{ pool.network.get('source', 'https://github.com/projectcalico/calico-containers/releases/download/') }}{{ pool.network.version }}/calicoctl
- source_hash: md5={{ pool.network.hash }}
- mode: 751
- user: root
@@ -11,7 +11,7 @@
/opt/cni/bin/calico:
file.managed:
- - source: https://github.com/projectcalico/calico-cni/releases/download/{{ pool.network.cni.version }}/calico
+ - source: {{ pool.network.cni.get('source', 'https://github.com/projectcalico/calico-cni/releases/download/') }}{{ pool.network.cni.version }}/calico
- source_hash: md5={{ pool.network.cni.hash }}
- mode: 751
- makedirs: true
@@ -20,7 +20,7 @@
/opt/cni/bin/calico-ipam:
file.managed:
- - source: https://github.com/projectcalico/calico-cni/releases/download/{{ pool.network.ipam.version }}/calico-ipam
+ - source: {{ pool.network.ipam.get('source', 'https://github.com/projectcalico/calico-cni/releases/download/') }}{{ pool.network.ipam.version }}/calico-ipam
- source_hash: md5={{ pool.network.ipam.hash }}
- mode: 751
- makedirs: true
@@ -47,17 +47,17 @@
- dir_mode: 755
- template: jinja
-/etc/systemd/system/calico-node.service:
- file.managed:
- - source: salt://kubernetes/files/calico/calico-node.service
- - user: root
- - group: root
+# /etc/systemd/system/calico-node.service:
+# file.managed:
+# - source: salt://kubernetes/files/calico/calico-node.service
+# - user: root
+# - group: root
-calico_node:
- service.running:
- - name: calico-node
- - enable: True
- - watch:
- - file: /etc/systemd/system/calico-node.service
+# calico_node:
+# service.running:
+# - name: calico-node
+# - enable: True
+# - watch:
+# - file: /etc/systemd/system/calico-node.service
{%- endif %}
\ No newline at end of file
diff --git a/kubernetes/pool/cni.sls b/kubernetes/pool/cni.sls
new file mode 100644
index 0000000..8c696bc
--- /dev/null
+++ b/kubernetes/pool/cni.sls
@@ -0,0 +1,14 @@
+{%- from "kubernetes/map.jinja" import pool with context %}
+{%- if pool.enabled %}
+
+cni-tar:
+ archive:
+ - extracted
+ - name: /opt/cni/bin
+ - source: https://github.com/containernetworking/cni/releases/download/{{ pool.cni.version }}/cni-{{ pool.cni.version }}.tgz
+ - tar_options: v
+ - source_hash: md5={{ pool.cni.hash }}
+ - archive_format: tar
+ - if_missing: /opt/cni/bin/loopback
+
+{%- endif %}
diff --git a/kubernetes/pool/init.sls b/kubernetes/pool/init.sls
index 81a7366..6f56c27 100644
--- a/kubernetes/pool/init.sls
+++ b/kubernetes/pool/init.sls
@@ -4,6 +4,7 @@
{%- if pool.network.engine == "calico" %}
- kubernetes.pool.calico
{%- endif %}
+- kubernetes.pool.cni
- kubernetes.pool.kubelet
{%- if pool.network.engine == "flannel" %}
- kubernetes.pool.flannel
diff --git a/kubernetes/pool/kube-proxy.sls b/kubernetes/pool/kube-proxy.sls
index d461af1..3e39c37 100644
--- a/kubernetes/pool/kube-proxy.sls
+++ b/kubernetes/pool/kube-proxy.sls
@@ -13,6 +13,8 @@
- makedirs: true
- dir_mode: 755
+{%- else %}
+
/etc/kubernetes/proxy.kubeconfig:
file.managed:
- source: salt://kubernetes/files/kube-proxy/proxy.kubeconfig
@@ -22,7 +24,13 @@
- mode: 644
- makedirs: true
-{%- else %}
+/etc/systemd/system/kube-proxy.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kube-proxy.service
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
/etc/default/kube-proxy:
file.managed:
diff --git a/kubernetes/pool/kubelet.sls b/kubernetes/pool/kubelet.sls
index bfacbb8..3fc3c2c 100644
--- a/kubernetes/pool/kubelet.sls
+++ b/kubernetes/pool/kubelet.sls
@@ -21,6 +21,13 @@
/etc/kubernetes/config:
file.absent
+manifest-dir-create:
+ file.directory:
+ - name: /etc/kubernetes/manifests
+ - user: root
+ - group: root
+ - mode: 0751
+
{%- if pool.host.label is defined %}
{%- for name,label in pool.host.label.iteritems() %}
@@ -48,6 +55,23 @@
{%- endif %}
+/usr/bin/hyperkube:
+ file.managed:
+ - source: {{ pool.hyperkube.get('source', {}).get('url', 'http://apt.tcpcloud.eu/kubernetes/bin/') }}{{ pool.version }}/hyperkube
+ - source_hash: md5={{ pool.hyperkube.hash }}
+ - mode: 751
+ - makedirs: true
+ - user: root
+ - group: root
+
+/etc/systemd/system/kubelet.service:
+ file.managed:
+ - source: salt://kubernetes/files/systemd/kubelet.service
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
kubelet_service:
service.running:
- name: kubelet
@@ -55,4 +79,4 @@
- watch:
- file: /etc/default/kubelet
-{%- endif %}
\ No newline at end of file
+{%- endif %}
diff --git a/metadata/service/master/cluster.yml b/metadata/service/master/cluster.yml
index fb4efd5..4076dd8 100644
--- a/metadata/service/master/cluster.yml
+++ b/metadata/service/master/cluster.yml
@@ -20,11 +20,6 @@
address: ${_param:cluster_local_address}
insecure_address: ${_param:cluster_local_address}
etcd:
- host: ${_param:cluster_local_address}
- registry: quay.io/coreos
- version: ${_param:etcd_version}
- token: ${_param:etcd_initial_token}
- name: ${linux:system:name}
members:
- host: ${_param:cluster_node01_address}
name: ${_param:cluster_node01_hostname}
diff --git a/metadata/service/master/single.yml b/metadata/service/master/single.yml
index 2e76deb..21f2eb7 100644
--- a/metadata/service/master/single.yml
+++ b/metadata/service/master/single.yml
@@ -20,11 +20,6 @@
address: ${_param:single_address}
insecure_address: 0.0.0.0
etcd:
- host: ${_param:single_address}
- registry: quay.io/coreos
- version: ${_param:etcd_version}
- token: ${_param:etcd_initial_token}
- name: ${linux:system:name}
members:
- host: ${_param:single_address}
name: ${linux:system:name}
diff --git a/metadata/service/pool/cluster.yml b/metadata/service/pool/cluster.yml
index 437fb34..ea0bc8a 100644
--- a/metadata/service/pool/cluster.yml
+++ b/metadata/service/pool/cluster.yml
@@ -41,4 +41,7 @@
- host: ${_param:cluster_node02_address}
port: 4001
- host: ${_param:cluster_node03_address}
- port: 4001
\ No newline at end of file
+ port: 4001
+ cni:
+ version: v0.3.0
+ hash: 58237532e1b2b1be1fb3d12457da85f5
\ No newline at end of file
diff --git a/metadata/service/pool/single.yml b/metadata/service/pool/single.yml
index 8c3f04b..f3cb7c0 100644
--- a/metadata/service/pool/single.yml
+++ b/metadata/service/pool/single.yml
@@ -34,4 +34,7 @@
etcd:
members:
- host: ${_param:master_address}
- port: 4001
\ No newline at end of file
+ port: 4001
+ cni:
+ version: v0.3.0
+ hash: 58237532e1b2b1be1fb3d12457da85f5
\ No newline at end of file
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 670e56a..f951eb3 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -12,7 +12,7 @@
heapster_influxdb:
enabled: true
public_ip: 185.22.97.132
- ui:
+ dashboard:
enabled: true
public_ip: 185.22.97.131
admin:
@@ -26,12 +26,9 @@
ca: kubernetes
enabled: true
etcd:
- host: 127.0.0.1
members:
- host: 10.0.175.100
name: node040
- name: node040
- token: ca939ec9c2a17b0786f6d411fe019e9b
kubelet:
allow_privileged: true
network:
@@ -63,3 +60,5 @@
namespace:
kube-system:
enabled: True
+ hyperkube:
+ hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
diff --git a/tests/pillar/pool_cluster.sls b/tests/pillar/pool_cluster.sls
index b792326..f639738 100644
--- a/tests/pillar/pool_cluster.sls
+++ b/tests/pillar/pool_cluster.sls
@@ -44,3 +44,8 @@
port: 4001
- host: 127.0.0.1
port: 4001
+ hyperkube:
+ hash: hnsj0XqABgrSww7Nqo7UVTSZLJUt2XRd
+ cni:
+ version: v0.3.0
+ hash: 58237532e1b2b1be1fb3d12457da85f5
\ No newline at end of file