Merge pull request #13 from tomkukral/apiserver-count
use lease endpoint-reconciler for apiserver endpoints
diff --git a/.kitchen.yml b/.kitchen.yml
index f939ef3..5bca6a9 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -53,7 +53,7 @@
platforms:
- name: <%= ENV['PLATFORM'] || 'saltstack-ubuntu-xenial-salt-stable' %>
driver_config:
- image: <%= ENV['PLATFORM'] || 'epcim/salt-formulas:saltstack-ubuntu-xenial-salt-stable' %>
+ image: <%= ENV['PLATFORM'] || 'epcim/salt:saltstack-ubuntu-xenial-salt-stable' %>
platform: ubuntu
suites:
diff --git a/.travis.yml b/.travis.yml
index 1c5139d..45062b7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,22 +18,33 @@
- bundle install
env:
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-cluster
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail4-0
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail4-0
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-contrail4-0
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-contrail4-0
- - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-xenial-salt-2016.3 SUITE=common-storageclass
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-cluster
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-cluster
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail4-0
-# - PLATFORM=epcim/salt-formulas:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=master-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=pool-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2016.3 SUITE=common-storageclass
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=master-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2017.7 SUITE=pool-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=pool-cluster
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-contrail
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=master-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=pool-contrail4-0
+ - PLATFORM=epcim/salt:saltstack-ubuntu-xenial-salt-2018.3 SUITE=common-storageclass
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=master-contrail4-0
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2017.7 SUITE=pool-contrail4-0
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=pool-cluster
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-contrail
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=master-contrail4-0
+# - PLATFORM=epcim/salt:saltstack-ubuntu-bionic-salt-2018.3 SUITE=pool-contrail4-0
before_script:
- set -o pipefail
diff --git a/README.rst b/README.rst
index 6b509c3..3b50609 100644
--- a/README.rst
+++ b/README.rst
@@ -70,7 +70,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02
diff --git a/kubernetes/_common.sls b/kubernetes/_common.sls
index 11f561e..be7c2a4 100644
--- a/kubernetes/_common.sls
+++ b/kubernetes/_common.sls
@@ -243,3 +243,13 @@
{% endif %}
{% endif %}
+
+{%- if common.cni is defined %}
+/opt/cni/bin:
+ archive.extracted:
+ - source: {{ common.cni.plugins.source }}
+ - source_hash: md5={{ common.cni.plugins.hash }}
+ - tar_options: xzf
+ - archive_format: tar
+ - if_missing: /opt/cni/bin/host-local
+{%- endif %}
diff --git a/kubernetes/files/calico/calico.conf b/kubernetes/files/calico/calico.conf
index 81e383f..0a231aa 100644
--- a/kubernetes/files/calico/calico.conf
+++ b/kubernetes/files/calico/calico.conf
@@ -3,7 +3,11 @@
{
"nodeName": "{{ pool.host.name }}{% if pool.host.get('domain') %}.{{ pool.host.domain }}{%- endif %}",
"name": "calico-k8s-network",
+ {%- if pool.network.get('genie', {}).get('enabled', False) %}
"cniVersion": "0.3.0",
+ {%- else %}
+ "cniVersion": "0.2.0",
+ {%- endif %}
"type": "calico",
"etcd_endpoints": "{% for member in pool.network.calico.etcd.members %}http{% if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}",
{%- if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}
diff --git a/kubernetes/files/calico/calicoctl.cfg.master b/kubernetes/files/calico/calicoctl.cfg.master
index 39e40d2..c0fd0ad 100644
--- a/kubernetes/files/calico/calicoctl.cfg.master
+++ b/kubernetes/files/calico/calicoctl.cfg.master
@@ -1,9 +1,9 @@
{%- from "kubernetes/map.jinja" import master with context %}
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
+apiVersion: v1
+kind: calicoApiConfig
metadata:
spec:
- datastoreType: "etcdv3"
+ datastoreType: "etcdv2"
etcdEndpoints: {% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
{%- if master.network.calico.etcd.get('ssl', {}).get('enabled') %}
etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/calico/calicoctl.cfg.pool b/kubernetes/files/calico/calicoctl.cfg.pool
index b31ba68..1d5f9f8 100644
--- a/kubernetes/files/calico/calicoctl.cfg.pool
+++ b/kubernetes/files/calico/calicoctl.cfg.pool
@@ -1,9 +1,9 @@
{%- from "kubernetes/map.jinja" import pool with context %}
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
+apiVersion: v1
+kind: calicoApiConfig
metadata:
spec:
- datastoreType: "etcdv3"
+ datastoreType: "etcdv2"
etcdEndpoints: {% for member in pool.network.calico.etcd.members %}http{% if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}
{%- if pool.network.calico.etcd.get('ssl', {}).get('enabled') %}
etcdKeyFile: /var/lib/etcd/etcd-client.pem
diff --git a/kubernetes/files/dockershim/default.master b/kubernetes/files/dockershim/default.master
index c5f3174..f224475 100644
--- a/kubernetes/files/dockershim/default.master
+++ b/kubernetes/files/dockershim/default.master
@@ -14,6 +14,9 @@
--hostname-override={{ master.host.name }} \
--v={{ master.get('verbosity', 2) }} \
--node-labels=node-role.kubernetes.io/master=true \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
{%- if master.get('unschedulable', 'false') %}
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
{%- endif %}
diff --git a/kubernetes/files/dockershim/default.pool b/kubernetes/files/dockershim/default.pool
index 308b5d6..1cbbbd7 100644
--- a/kubernetes/files/dockershim/default.pool
+++ b/kubernetes/files/dockershim/default.pool
@@ -13,6 +13,9 @@
--cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
--hostname-override={{ pool.host.name }} \
--v={{ pool.get('verbosity', 2) }} \
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
{%- if pillar.kubernetes.master is defined %}
--node-labels=node-role.kubernetes.io/master=true \
{%- if pillar.kubernetes.get('master', {}).get('unschedulable', 'false') %}
diff --git a/kubernetes/files/flannel/flannel.conflist b/kubernetes/files/flannel/flannel.conflist
index 583e5e6..b7cdd13 100644
--- a/kubernetes/files/flannel/flannel.conflist
+++ b/kubernetes/files/flannel/flannel.conflist
@@ -12,6 +12,7 @@
},
{
"type": "portmap",
+ "cniVersion": "0.3.0",
"capabilities": {
"portMappings": true
}
diff --git a/kubernetes/files/genie/genie.conf b/kubernetes/files/genie/genie.conf
index 819ee56..4f4bfa1 100644
--- a/kubernetes/files/genie/genie.conf
+++ b/kubernetes/files/genie/genie.conf
@@ -5,6 +5,9 @@
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "{{ hostname }}",
+ {%- if default_plugin is defined %}
+ "default_plugin": "{{ default_plugin }}",
+ {%- endif %}
"policy": {
"type": "k8s"
},
diff --git a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
index 19611f2..af68e14 100644
--- a/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
+++ b/kubernetes/files/kube-addons/calico-policy/calico-policy-controller.yml
@@ -1,36 +1,58 @@
{%- from "kubernetes/map.jinja" import common with context -%}
{%- from "kubernetes/map.jinja" import master with context -%}
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
-kind: ReplicaSet
+kind: Deployment
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: {{ common.addons.calico_policy.namespace }}
labels:
- k8s-app: calico-policy
- kubernetes.io/cluster-service: "true"
+ k8s-app: calico-kube-controllers
addonmanager.kubernetes.io/mode: Reconcile
spec:
+ # The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
- k8s-app: calico-policy
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
template:
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: {{ common.addons.calico_policy.namespace }}
labels:
- k8s-app: calico-policy
+ k8s-app: calico-kube-controllers
annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
{%- if common.addons.calico_policy.cni is defined %}
cni: {{ common.addons.calico_policy.cni }}
{%- endif %}
spec:
+ # The controllers must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ # Note. Rescheduler is deprecated in k8s v1.10 and is to be removed in k8s v1.11.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ serviceAccountName: calico-kube-controllers
containers:
- - name: calico-policy-controller
+ - name: calico-kube-controllers
image: {{ common.addons.calico_policy.image }}
imagePullPolicy: IfNotPresent
resources:
@@ -41,25 +63,25 @@
cpu: 30m
memory: 64M
env:
+ # The list of etcd nodes in the cluster.
- name: ETCD_ENDPOINTS
value: "{% for member in master.network.calico.etcd.members %}http{% if master.network.calico.etcd.get('ssl', {}).get('enabled') %}s{% endif %}://{{ member.host }}:{{ member.port }}{% if not loop.last %},{% endif %}{% endfor %}"
+ # CA certificate, client certificate, client key files for accessing the etcd cluster.
- name: ETCD_CA_CERT_FILE
value: "/var/lib/etcd/ca.pem"
- name: ETCD_CERT_FILE
value: "/var/lib/etcd/etcd-client.pem"
- name: ETCD_KEY_FILE
value: "/var/lib/etcd/etcd-client.pem"
- # Location of the Kubernetes API - this shouldn't need to be
- # changed so long as it is used in conjunction with
- # CONFIGURE_ETC_HOSTS="true".
- - name: K8S_API
- value: "https://kubernetes.default"
- # Configure /etc/hosts within the container to resolve
- # the kubernetes.default Service to the correct clusterIP
- # using the environment provided by the kubelet.
- # This removes the need for KubeDNS to resolve the Service.
- - name: CONFIGURE_ETC_HOSTS
- value: "true"
+ # Which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: "policy,profile,workloadendpoint,node"
+ # Minimum log level to be displayed.
+ - name: LOG_LEVEL
+ value: "info"
+ # Period to perform reconciliation with the Calico datastore. Default is 5m.
+ - name: RECONCILER_PERIOD
+ value: "1m"
volumeMounts:
- mountPath: /var/lib/etcd/
name: etcd-certs
@@ -69,3 +91,51 @@
path: /var/lib/etcd
name: etcd-certs
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: {{ common.addons.calico_policy.namespace }}
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - nodes
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: {{ common.addons.calico_policy.namespace }}
diff --git a/kubernetes/files/kube-addons/contrail/contrail.yaml b/kubernetes/files/kube-addons/contrail/contrail.yaml
index 7ef4e6f..18cd5cf 100644
--- a/kubernetes/files/kube-addons/contrail/contrail.yaml
+++ b/kubernetes/files/kube-addons/contrail/contrail.yaml
@@ -30,9 +30,9 @@
- /bin/sh
- -c
- >
- systemctl stop rabbitmq-server.service;
+ service rabbitmq-server stop;
systemctl disable rabbitmq-server.service;
- systemctl stop redis-server.service;
+ service redis-server stop;
systemctl disable redis-server.service;
volumeMounts:
diff --git a/kubernetes/files/kube-addons/flannel/flannel.yml b/kubernetes/files/kube-addons/flannel/flannel.yml
index 58d0217..6df6114 100644
--- a/kubernetes/files/kube-addons/flannel/flannel.yml
+++ b/kubernetes/files/kube-addons/flannel/flannel.yml
@@ -101,6 +101,9 @@
args:
- --ip-masq
- --kube-subnet-mgr
+ {%- for option, value in master.network.flannel.get('flanneld_options', {}).items() %}
+ - --{{ option }}={{ value }}
+ {%- endfor %}
resources:
requests:
cpu: "100m"
diff --git a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
index 17a73ba..a222703 100644
--- a/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
+++ b/kubernetes/files/kube-addons/virtlet/virtlet-ds.yml
@@ -5,6 +5,7 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
+ creationTimestamp: null
name: virtlet
namespace: {{ common.addons.virtlet.namespace }}
labels:
@@ -12,21 +13,11 @@
spec:
template:
metadata:
- name: virtlet
+ creationTimestamp: null
labels:
runtime: virtlet
+ name: virtlet
spec:
- hostNetwork: true
- {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
- dnsPolicy: ClusterFirstWithHostNet
- {%- endif %}
- # hostPID is true to (1) enable VMs to survive virtlet container restart
- # (to be checked) and (2) to enable the use of nsenter in init container
- hostPID: true
- # bootstrap procedure needs to create a configmap in kube-system namespace
- serviceAccountName: virtlet
-
- # only run Virtlet pods on the nodes with extraRuntime=virtlet label
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -36,50 +27,34 @@
operator: In
values:
- virtlet
-
- initContainers:
- # The init container copies virtlet's flexvolume driver
- # to the default kubelet plugin dir and ensures that
- # the directories needed by libvirt & virtlet exist on the host
- - name: prepare-node
- image: {{ common.addons.virtlet.image }}
- imagePullPolicy: IfNotPresent
- command:
- - /prepare-node.sh
- volumeMounts:
- - name: k8s-flexvolume-plugins-dir
- mountPath: /kubelet-volume-plugins
- - name: run
- {%- if version|float >= 1.8 %}
- mountPropagation: Bidirectional
- {%- endif %}
- mountPath: /run
- - name: dockersock
- mountPath: /var/run/docker.sock
- - name: log
- mountPath: /hostlog
- # for ensuring that /var/lib/libvirt/images exists on node
- - name: var-lib
- mountPath: /host-var-lib
- - name: dev
- mountPath: /dev
- securityContext:
- privileged: true
+ containers:
+ - command:
+ - /libvirt.sh
env:
+ - name: VIRTLET_SRIOV_SUPPORT
+ valueFrom:
+ configMapKeyRef:
+ key: sriov_support
+ name: virtlet-config
+ optional: true
- name: VIRTLET_DISABLE_KVM
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: disable_kvm
+ name: virtlet-config
optional: true
-
- containers:
- - name: libvirt
image: {{ common.addons.virtlet.image }}
- # In case we inject local virtlet image we want to use it not officially available one
imagePullPolicy: IfNotPresent
- command:
- - /libvirt.sh
+ name: libvirt
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/var/run/libvirt/libvirt-sock-ro </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
volumeMounts:
- mountPath: /sys/fs/cgroup
name: cgroup
@@ -90,43 +65,89 @@
name: boot
readOnly: true
- mountPath: /run
- name: run
- {%- if version|float >= 1.8 %}
mountPropagation: Bidirectional
- {%- endif %}
+ name: run
- mountPath: /var/lib/virtlet
name: virtlet
- mountPath: /var/lib/libvirt
name: libvirt
- mountPath: /var/run/libvirt
name: libvirt-sockets
- # the log dir is needed here because otherwise libvirt will produce errors
- # like this:
- # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
- securityContext:
- privileged: true
- env:
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /var/log/libvirt
+ name: libvirt-log
+ - mountPath: /dev
+ name: dev
+ - env:
- name: VIRTLET_DISABLE_KVM
valueFrom:
configMapKeyRef:
- name: virtlet-config
key: disable_kvm
+ name: virtlet-config
optional: true
- - name: virtlet
+ - name: VIRTLET_DOWNLOAD_PROTOCOL
+ valueFrom:
+ configMapKeyRef:
+ key: download_protocol
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_LOGLEVEL
+ valueFrom:
+ configMapKeyRef:
+ key: loglevel
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_CALICO_SUBNET
+ valueFrom:
+ configMapKeyRef:
+ key: calico-subnet
+ name: virtlet-config
+ optional: true
+ - name: IMAGE_REGEXP_TRANSLATION
+ valueFrom:
+ configMapKeyRef:
+ key: image_regexp_translation
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_DISABLE_LOGGING
+ valueFrom:
+ configMapKeyRef:
+ key: disable_logging
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_SRIOV_SUPPORT
+ valueFrom:
+ configMapKeyRef:
+ key: sriov_support
+ name: virtlet-config
+ optional: true
+ - name: VIRTLET_RAW_DEVICES
+ valueFrom:
+ configMapKeyRef:
+ key: raw_devices
+ name: virtlet-config
+ optional: true
+ - name: IMAGE_TRANSLATIONS_DIR
+ value: /etc/virtlet/images
+ - name: KUBERNETES_POD_LOGS
+ value: /kubernetes-log
image: {{ common.addons.virtlet.image }}
- # In case we inject local virtlet image we want to use it not officially available one
imagePullPolicy: IfNotPresent
+ name: virtlet
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - socat - UNIX:/run/virtlet.sock </dev/null
+ resources: {}
+ securityContext:
+ privileged: true
volumeMounts:
- mountPath: /run
- name: run
- {%- if version|float >= 1.8 %}
mountPropagation: Bidirectional
- {%- endif %}
- # /boot and /lib/modules are required by supermin
+ name: run
- mountPath: /lib/modules
name: modules
readOnly: true
@@ -142,87 +163,66 @@
- mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
name: k8s-flexvolume-plugins-dir
- mountPath: /var/lib/kubelet/pods
- name: k8s-pods-dir
- {%- if version|float >= 1.8 %}
mountPropagation: Bidirectional
- {%- endif %}
- - name: vms-log
- mountPath: /var/log/vms
- {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
- - name: contrail-log
- mountPath: /var/log/contrail
- - name: contrail-data
- mountPath: /var/lib/contrail
- {%- endif %}
+ name: k8s-pods-dir
+ - mountPath: /var/log/vms
+ name: vms-log
- mountPath: /etc/virtlet/images
name: image-name-translations
- - name: pods-log
- mountPath: /kubernetes-log
- securityContext:
- privileged: true
- env:
- - name: VIRTLET_DISABLE_KVM
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: disable_kvm
- optional: true
- - name: VIRTLET_DOWNLOAD_PROTOCOL
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: download_protocol
- optional: true
- - name: VIRTLET_LOGLEVEL
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: loglevel
- optional: true
- - name: VIRTLET_CALICO_SUBNET
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: calico-subnet
- optional: true
- - name: IMAGE_REGEXP_TRANSLATION
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: image_regexp_translation
- optional: true
- - name: VIRTLET_DISABLE_LOGGING
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: disable_logging
- optional: true
- - name: VIRTLET_SRIOV_SUPPORT
- valueFrom:
- configMapKeyRef:
- name: virtlet-config
- key: sriov_support
- optional: true
- - name: IMAGE_TRANSLATIONS_DIR
- value: /etc/virtlet/images
- - name: KUBERNETES_POD_LOGS
- value: "/kubernetes-log"
- - name: vms
+ - mountPath: /kubernetes-log
+ name: pods-log
+ - command:
+ - /vms.sh
image: {{ common.addons.virtlet.image }}
imagePullPolicy: IfNotPresent
- command:
- - /vms.sh
+ name: vms
+ resources: {}
volumeMounts:
- mountPath: /var/lib/virtlet
name: virtlet
- mountPath: /var/lib/libvirt
name: libvirt
- - name: vms-log
- mountPath: /var/log/vms
- - name: dev
- mountPath: /dev
+ - mountPath: /var/log/vms
+ name: vms-log
+ - mountPath: /dev
+ name: dev
+ - mountPath: /lib/modules
+ name: modules
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+ hostPID: true
+ initContainers:
+ - command:
+ - /prepare-node.sh
+ env:
+ - name: VIRTLET_DISABLE_KVM
+ valueFrom:
+ configMapKeyRef:
+ key: disable_kvm
+ name: virtlet-config
+ optional: true
+ image: {{ common.addons.virtlet.image }}
+ imagePullPolicy: IfNotPresent
+ name: prepare-node
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /kubelet-volume-plugins
+ name: k8s-flexvolume-plugins-dir
+ - mountPath: /run
+ mountPropagation: Bidirectional
+ name: run
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ - mountPath: /hostlog
+ name: log
+ - mountPath: /host-var-lib
+ name: var-lib
+ - mountPath: /dev
+ name: dev
+ serviceAccountName: virtlet
volumes:
- # /dev is needed for host raw device access
- hostPath:
path: /dev
name: dev
@@ -238,9 +238,6 @@
- hostPath:
path: /run
name: run
- # TODO: don't hardcode docker socket location here
- # This will require CRI proxy installation to run
- # in host mount namespace.
- hostPath:
path: /var/run/docker.sock
name: dockersock
@@ -266,6 +263,9 @@
path: /var/log/virtlet/vms
name: vms-log
- hostPath:
+ path: /var/log/libvirt
+ name: libvirt-log
+ - hostPath:
path: /var/run/libvirt
name: libvirt-sockets
- hostPath:
@@ -277,22 +277,36 @@
- configMap:
name: virtlet-image-translations
name: image-name-translations
- {%- if master.network.get('opencontrail', {}).get('enabled', False) %}
- - hostPath:
- path: /var/log/contrail
- name: contrail-log
- - hostPath:
- path: /var/lib/contrail
- name: contrail-data
- - hostPath:
- path: /virtlet
- name: virtlet-bin
- {%- endif %}
+ updateStrategy: {}
+status:
+ currentNumberScheduled: 0
+ desiredNumberScheduled: 0
+ numberMisscheduled: 0
+ numberReady: 0
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: {{ common.addons.virtlet.namespace }}
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
- name: configmap-reader
+ creationTimestamp: null
+ name: virtlet
namespace: {{ common.addons.virtlet.namespace }}
labels:
addonmanager.kubernetes.io/mode: Reconcile
@@ -302,13 +316,48 @@
resources:
- configmaps
verbs:
+ - create
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: configmap-reader
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
- get
- list
- watch
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-userdata-reader
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - secrets
+ verbs:
+ - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
+ creationTimestamp: null
name: kubelet-node-binding
labels:
addonmanager.kubernetes.io/mode: Reconcile
@@ -320,102 +369,12 @@
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
- namespace: {{ common.addons.virtlet.namespace }}
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: virtlet-crd
- namespace: {{ common.addons.virtlet.namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
- - apiGroups:
- - "apiextensions.k8s.io"
- resources:
- - customresourcedefinitions
- verbs:
- - create
- - apiGroups:
- - "virtlet.k8s"
- resources:
- - virtletimagemappings
- verbs:
- - list
- - get
+
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
- name: virtlet-crd
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: virtlet-crd
-subjects:
-- kind: ServiceAccount
- name: virtlet
- namespace: {{ common.addons.virtlet.namespace }}
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: virtlet
- namespace: {{ common.addons.virtlet.namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
- name: virtlet
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: virtlet
-subjects:
-- kind: ServiceAccount
- name: virtlet
- namespace: {{ common.addons.virtlet.namespace }}
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1beta1
-metadata:
- name: virtlet
- namespace: {{ common.addons.virtlet.namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
- - apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - create
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
- name: virtlet-userdata-reader
- namespace: {{ common.addons.virtlet.namespace }}
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - ""
- resources:
- - configmaps
- - secrets
- verbs:
- - get
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
+ creationTimestamp: null
name: vm-userdata-binding
labels:
addonmanager.kubernetes.io/mode: Reconcile
@@ -427,6 +386,54 @@
- kind: ServiceAccount
name: virtlet
namespace: {{ common.addons.virtlet.namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - create
+- apiGroups:
+ - virtlet.k8s
+ resources:
+ - virtletimagemappings
+ verbs:
+ - list
+ - get
+
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ creationTimestamp: null
+ name: virtlet-crd
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtlet-crd
+subjects:
+- kind: ServiceAccount
+ name: virtlet
+ namespace: {{ common.addons.virtlet.namespace }}
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ creationTimestamp: null
+ name: virtlet
+ namespace: {{ common.addons.virtlet.namespace }}
---
apiVersion: v1
data:
diff --git a/kubernetes/files/kubelet/default.master b/kubernetes/files/kubelet/default.master
index 2740e53..b3f0e41 100644
--- a/kubernetes/files/kubelet/default.master
+++ b/kubernetes/files/kubelet/default.master
@@ -13,7 +13,10 @@
--hostname-override={{ master.host.name }} \
--v={{ master.get('verbosity', 2) }} \
--node-labels=node-role.kubernetes.io/master=true \
-{%- if version|float >= 1.8 %}
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
+{%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
--fail-swap-on={{ master.kubelet.fail_on_swap }} \
{%- if common.addons.get('virtlet', {}).get('enabled') %}
--feature-gates=MountPropagation=true \
diff --git a/kubernetes/files/kubelet/default.pool b/kubernetes/files/kubelet/default.pool
index 7e2ce2f..06f2cf4 100644
--- a/kubernetes/files/kubelet/default.pool
+++ b/kubernetes/files/kubelet/default.pool
@@ -12,7 +12,10 @@
--cluster_domain={{ common.addons.dns.domain|replace('_', '-') }} \
--hostname-override={{ pool.host.name }} \
--v={{ pool.get('verbosity', 2) }} \
-{%- if version|float >= 1.8 %}
+{%- if common.hyperkube.pause_image is defined %}
+--pod-infra-container-image={{ common.hyperkube.pause_image }} \
+{%- endif %}
+{%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
--fail-swap-on={{ pool.kubelet.fail_on_swap }} \
{%- if common.addons.get('virtlet', {}).get('enabled') %}
--feature-gates=MountPropagation=true \
@@ -26,8 +29,10 @@
{%- else %}
--node-labels=node-role.kubernetes.io/node=true \
{%- endif %}
+{%- if pool.network.opencontrail is not defined or ( pool.network.opencontrail is defined and pool.network.opencontrail.enabled == True ) %}
--network-plugin=cni \
--cni-bin-dir={{ pool.apiserver.get('cni_bin_dir', '/opt/cni/bin') }} \
+{%- endif %}
--file-check-frequency={{ pool.kubelet.frequency }} \
{%- if common.get('cloudprovider', {}).get('enabled') %}
--cloud-provider={{ common.cloudprovider.provider }} \
@@ -37,7 +42,7 @@
{%- endif %}
{%- if common.addons.get('virtlet', {}).get('enabled') %}
--container-runtime={{ pool.get('container-runtime', 'remote') }} \
-{%- if version|float < 1.7 %}
+{%- if salt['pkg.version_cmp'](version,'1.7') < 0 %}
--container-runtime-endpoint={{ pool.get('container-runtime-endpoint', '/var/run/criproxy.sock') }} \
--image-service-endpoint={{ pool.get('image-service-endpoint', '/var/run/criproxy.sock') }} \
{%- else %}
diff --git a/kubernetes/files/manifest/kube-apiserver.manifest b/kubernetes/files/manifest/kube-apiserver.manifest
index 6a5aa5e..e22ac90 100644
--- a/kubernetes/files/manifest/kube-apiserver.manifest
+++ b/kubernetes/files/manifest/kube-apiserver.manifest
@@ -46,7 +46,7 @@
--v={{ master.get('verbosity', 2) }}
--allow-privileged=True
{%- if common.addons.get('virtlet', {}).get('enabled') %}
- {%- if version|float >= 1.8 %}
+ {%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
--feature-gates=MountPropagation=true
{%- endif %}
{%- if version|float >= 1.9 %}
diff --git a/kubernetes/files/opencontrail/opencontrail.conf b/kubernetes/files/opencontrail/opencontrail.conf
index 290cf2c..71f5881 100644
--- a/kubernetes/files/opencontrail/opencontrail.conf
+++ b/kubernetes/files/opencontrail/opencontrail.conf
@@ -1,6 +1,10 @@
{%- from "kubernetes/map.jinja" import pool with context %}
{
+ {%- if pool.network.get('genie', {}).get('enabled', False) %}
"cniVersion": "0.3.0",
+ {%- else %}
+ "cniVersion": "0.2.0",
+ {%- endif %}
"contrail" : {
"apiserver": {"address": "{{ pool.network.opencontrail.config.api.host }}", "port": {{ pool.network.opencontrail.config.api.get('port', 8082) }}, "default-domain": "default-domain"},
"vrouter" : {
diff --git a/kubernetes/files/systemd/criproxy.service b/kubernetes/files/systemd/criproxy.service
index 9d89b4e..894070e 100644
--- a/kubernetes/files/systemd/criproxy.service
+++ b/kubernetes/files/systemd/criproxy.service
@@ -15,7 +15,7 @@
-listen /var/run/criproxy.sock \
-v 3 \
-alsologtostderr \
-{%- if version|float < 1.8 %}
+{%- if salt['pkg.version_cmp'](version,'1.8') < 0 %}
-apiVersion {{ version }} \
{%- endif %}
{%- if pool.get('enabled', False) %}
diff --git a/kubernetes/map.jinja b/kubernetes/map.jinja
index e243a77..99e33cc 100644
--- a/kubernetes/map.jinja
+++ b/kubernetes/map.jinja
@@ -1,4 +1,4 @@
-{% set version = salt['cmd.shell']("(hyperkube --version 2> /dev/null || echo '0.0') | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g' | awk -F'.' '{print $1 \".\" $2}'") %}
+{% set version = salt['cmd.shell']("(hyperkube --version kubelet 2> /dev/null || echo '0.0') | sed -e 's/-.*//g' -e 's/v//g' -e 's/Kubernetes //g' | awk -F'.' '{print $1 \".\" $2}'") %}
{% set common = salt['grains.filter_by']({
'Debian': {
diff --git a/kubernetes/master/controller.sls b/kubernetes/master/controller.sls
index 9fd08d4..a735a6b 100644
--- a/kubernetes/master/controller.sls
+++ b/kubernetes/master/controller.sls
@@ -144,7 +144,7 @@
{%- endif %}
{%- endif %}
{%- if common.addons.get('virtlet', {}).get('enabled') %}
-{%- if version|float >= 1.8 %}
+{%- if salt['pkg.version_cmp'](version,'1.8') >= 0 %}
--feature-gates=MountPropagation=true
{%- endif %}
{%- if version|float >= 1.9 %}
diff --git a/kubernetes/master/genie.sls b/kubernetes/master/genie.sls
index 4e31c7c..5851c22 100644
--- a/kubernetes/master/genie.sls
+++ b/kubernetes/master/genie.sls
@@ -14,6 +14,9 @@
- template: jinja
- defaults:
hostname: {{ master.host.name }}{% if master.host.get('domain') %}.{{ master.host.domain }}{%- endif %}
+ {%- if master.network.genie.default_plugin is defined %}
+ default_plugin: {{ master.network.genie.default_plugin }}
+ {%- endif %}
/tmp/genie/:
file.directory:
diff --git a/kubernetes/pool/cni.sls b/kubernetes/pool/cni.sls
deleted file mode 100644
index 0cddd8a..0000000
--- a/kubernetes/pool/cni.sls
+++ /dev/null
@@ -1,41 +0,0 @@
-{%- from "kubernetes/map.jinja" import pool with context %}
-{%- from "kubernetes/map.jinja" import common with context %}
-{%- if pool.enabled %}
-
-{%- if common.hyperkube %}
-
-/tmp/cni/:
- file.directory:
- - user: root
- - group: root
-
-copy-network-cni:
- cmd.run:
- - name: docker run --rm -v /tmp/cni/:/tmp/cni/ --entrypoint cp {{ common.hyperkube.image }} -vr /opt/cni/bin/ /tmp/cni/
- - require:
- - file: /tmp/cni/
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-{%- for filename in ['flannel', 'tuning', 'bridge', 'ipvlan', 'loopback', 'macvlan', 'ptp', 'dhcp', 'host-local'] %}
-/opt/cni/bin/{{ filename }}:
- file.managed:
- - source: /tmp/cni/bin/{{ filename }}
- - user: root
- - group: root
- - mode: 755
- - makedirs: True
- - watch_in:
- - service: kubelet_service
- - require:
- - cmd: copy-network-cni
- {%- if grains.get('noservices') %}
- - onlyif: /bin/false
- {%- endif %}
-
-{%- endfor %}
-
-{%- endif %}
-
-{%- endif %}
diff --git a/kubernetes/pool/genie.sls b/kubernetes/pool/genie.sls
index 555a6e4..c00e080 100644
--- a/kubernetes/pool/genie.sls
+++ b/kubernetes/pool/genie.sls
@@ -12,6 +12,9 @@
- template: jinja
- defaults:
hostname: {{ pool.host.name }}{% if pool.host.get('domain') %}.{{ pool.host.domain }}{%- endif %}
+ {%- if pool.network.genie.default_plugin is defined %}
+ default_plugin: {{ pool.network.genie.default_plugin }}
+ {%- endif %}
/tmp/genie/:
file.directory:
diff --git a/kubernetes/pool/init.sls b/kubernetes/pool/init.sls
index 34e217a..c4b1967 100644
--- a/kubernetes/pool/init.sls
+++ b/kubernetes/pool/init.sls
@@ -1,6 +1,5 @@
{%- from "kubernetes/map.jinja" import pool with context %}
include:
-- kubernetes.pool.cni
{%- if pool.network.get('calico', {}).get('enabled', False) %}
- kubernetes.pool.calico
{%- endif %}
diff --git a/metadata/service/common.yml b/metadata/service/common.yml
index 4aa517c..169a5b2 100644
--- a/metadata/service/common.yml
+++ b/metadata/service/common.yml
@@ -35,7 +35,7 @@
server_image: mirantis/k8s-netchecker-server:stable
calico_policy:
enabled: False
- image: calico/kube-policy-controller:v0.5.4
+ image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
namespace: kube-system
contrail_network_controller:
enabled: False
@@ -58,9 +58,13 @@
virtlet:
enabled: False
namespace: kube-system
- image: mirantis/virtlet:v0.9.4
+ image: mirantis/virtlet:v1.0.0
criproxy_version: v0.10.0
criproxy_source: md5=52717b1f70f15558ef4bdb0e4d4948da
+ cni:
+ plugins:
+ source: https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
+ hash: e7e5751d43456f69ea1ed043647e0377
cloudprovider:
enabled: False
provider: openstack
diff --git a/metadata/service/master/single.yml b/metadata/service/master/single.yml
index f2d119b..8c34b63 100644
--- a/metadata/service/master/single.yml
+++ b/metadata/service/master/single.yml
@@ -53,7 +53,7 @@
tiller_image: gcr.io/kubernetes-helm/tiller:v2.4.2
calico_policy:
enabled: False
- image: calico/kube-policy-controller:v0.5.4
+ image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v1.0.4
namespace: kube-system
contrail_network_controller:
enabled: False
@@ -62,7 +62,7 @@
virtlet:
enabled: False
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
token:
admin: ${_param:kubernetes_admin_token}
kubelet: ${_param:kubernetes_kubelet_token}
diff --git a/tests/pillar/master_cluster.sls b/tests/pillar/master_cluster.sls
index 68a0e51..9e8afa2 100644
--- a/tests/pillar/master_cluster.sls
+++ b/tests/pillar/master_cluster.sls
@@ -40,7 +40,7 @@
hosts:
- cmp01
- cmp02
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
monitoring:
backend: prometheus
master:
diff --git a/tests/pillar/master_contrail.sls b/tests/pillar/master_contrail.sls
index b2941b1..e86a293 100644
--- a/tests/pillar/master_contrail.sls
+++ b/tests/pillar/master_contrail.sls
@@ -37,7 +37,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02
diff --git a/tests/pillar/master_contrail4_0.sls b/tests/pillar/master_contrail4_0.sls
index 3f62f67..ec48f54 100644
--- a/tests/pillar/master_contrail4_0.sls
+++ b/tests/pillar/master_contrail4_0.sls
@@ -37,7 +37,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02
diff --git a/tests/pillar/pool_cluster.sls b/tests/pillar/pool_cluster.sls
index 152cab3..4de3614 100644
--- a/tests/pillar/pool_cluster.sls
+++ b/tests/pillar/pool_cluster.sls
@@ -16,7 +16,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02
diff --git a/tests/pillar/pool_cluster_with_domain.sls b/tests/pillar/pool_cluster_with_domain.sls
index 194e060..271d762 100644
--- a/tests/pillar/pool_cluster_with_domain.sls
+++ b/tests/pillar/pool_cluster_with_domain.sls
@@ -16,7 +16,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02
diff --git a/tests/pillar/pool_contrail4_0.sls b/tests/pillar/pool_contrail4_0.sls
index 612187f..f396906 100644
--- a/tests/pillar/pool_contrail4_0.sls
+++ b/tests/pillar/pool_contrail4_0.sls
@@ -16,7 +16,7 @@
virtlet:
enabled: true
namespace: kube-system
- image: mirantis/virtlet:v0.8.0
+ image: mirantis/virtlet:v1.0.0
hosts:
- cmp01
- cmp02