Adjusting k8s-ha-calico-syndic model
The patch modifies the model to be used with Salt
Master-of-Master for edge clouds in devcloud and makes
the model actual from parent k8s-ha-calico model.
Change-Id: I3e04ddf70174b6bb9a2ed5b3e649ced7172c102f
Related-PROD: PROD-20579
diff --git a/classes/cluster/k8s-ha-calico-syndic/infra/config.yml b/classes/cluster/k8s-ha-calico-syndic/infra/config.yml
index 6f51f83..ce7924a 100644
--- a/classes/cluster/k8s-ha-calico-syndic/infra/config.yml
+++ b/classes/cluster/k8s-ha-calico-syndic/infra/config.yml
@@ -6,6 +6,7 @@
- system.salt.minion.ca.salt_master
- system.salt.master.api
- system.salt.minion.cert.k8s_server
+- system.salt.syndic.single
- system.reclass.storage.salt
- system.reclass.storage.system.kubernetes_control_cluster
- cluster.k8s-ha-calico-syndic
@@ -16,14 +17,12 @@
reclass_data_revision: master
reclass_config_master: ${_param:infra_config_deploy_address}
single_address: ${_param:infra_config_address}
+ deploy_address: ${_param:infra_config_deploy_address}
linux_system_codename: xenial
salt_api_password_hash: "$6$WV0P1shnoDh2gI/Z$22/Bcd7ffMv0jDlFpT63cAU4PiXHz9pjXwngToKwqAsgoeK4HNR3PiKaushjxp3JsQ8hNoJmAC6TxzVqfV8WH/"
salt_master_host: 127.0.0.1
+ salt_syndic_enabled: false
salt:
- syndic:
- enabled: true
- master:
- host: ${_param:salt_syndic_master_address}
master:
worker_threads: 40
reactor:
@@ -41,6 +40,8 @@
value_template: <<node_os>>
salt_master_host:
value_template: <<node_master_ip>>
+ deploy_address:
+ value_template: <<node_deploy_ip>>
infra_config:
expression:
- <<node_hostname>>__startswith__cfg
@@ -80,4 +81,6 @@
node_class:
value_template:
- cluster.<<node_cluster>>.kubernetes.compute
-
+ node_param:
+ deploy_address:
+ value_template: <<node_deploy_ip>>
diff --git a/classes/cluster/k8s-ha-calico-syndic/kubernetes/compute.yml b/classes/cluster/k8s-ha-calico-syndic/kubernetes/compute.yml
index 052eeca..de81230 100644
--- a/classes/cluster/k8s-ha-calico-syndic/kubernetes/compute.yml
+++ b/classes/cluster/k8s-ha-calico-syndic/kubernetes/compute.yml
@@ -6,19 +6,19 @@
- system.salt.minion.cert.etcd_client
- cluster.k8s-ha-calico-syndic
parameters:
- _param:
- kubernetes_calico_calicoctl_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/ctl:latest
- kubernetes_calico_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/node:latest
- kubernetes_calico_cni_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/cni:latest
- kubernetes_hyperkube_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/hyperkube-amd64:v1.8.13-11
- kubernetes_pause_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/pause-amd64:v1.8.13-11
docker:
host:
pkgs:
- - docker-engine=1.13.1-0~ubuntu-xenial
+ - ${_param:kubernetes_docker_package}
- python-docker
+ options:
+ bip: 172.31.255.1/24
+ storage-driver: overlay2
kubernetes:
pool:
+ kubelet:
+ address: ${_param:single_address}
+ fail_on_swap: ${_param:kubelet_fail_on_swap}
network:
calico:
enabled: true
@@ -29,8 +29,18 @@
ssl:
enabled: true
common:
+ addons:
+ virtlet:
+ enabled: ${_param:kubernetes_virtlet_enabled}
+ namespace: ${_param:kubernetes_addon_namespace}
+ image: ${_param:kubernetes_virtlet_image}
+ criproxy_version: ${_param:kubernetes_criproxy_version}
+ criproxy_source: ${_param:kubernetes_criproxy_checksum}
+ hosts:
+ - ${_param:kubernetes_compute01_hostname}
hyperkube:
image: ${_param:kubernetes_hyperkube_image}
+ pause_image: ${_param:kubernetes_pause_image}
linux:
network:
resolv:
@@ -39,9 +49,21 @@
- 172.18.176.4
- 10.254.0.10
interface:
- primary_interface:
- route:
- kubernetes_services:
- address: 10.254.0.0
- netmask: 255.255.0.0
- gateway: ${_param:single_address}
+ ens3:
+ enabled: true
+ type: eth
+ proto: static
+ name: ${_param:primary_interface}
+ address: ${_param:deploy_address}
+ netmask: 255.255.255.0
+ gateway: 192.168.10.1
+ post_up_cmds:
+ - ip r rep 10.254.0.0/16 via ${_param:deploy_address}
+ ens4:
+ enabled: true
+ type: eth
+ proto: static
+ address: ${_param:single_address}
+ netmask: '255.255.255.0'
+ ipflush_onchange: true
+ restart_on_ipflush: true
diff --git a/classes/cluster/k8s-ha-calico-syndic/kubernetes/control.yml b/classes/cluster/k8s-ha-calico-syndic/kubernetes/control.yml
index 5454ad0..03a945e 100644
--- a/classes/cluster/k8s-ha-calico-syndic/kubernetes/control.yml
+++ b/classes/cluster/k8s-ha-calico-syndic/kubernetes/control.yml
@@ -16,6 +16,8 @@
keepalived_k8s_apiserver_vip_password: password
etcd:
server:
+ bind:
+ host: ${_param:deploy_address}
source:
engine: docker_hybrid
ssl:
@@ -27,10 +29,26 @@
kubernetes:
common:
addons:
- netchecker:
- enabled: true
+ dashboard:
+ enabled: ${_param:kubernetes_dashboard}
+ image: ${_param:kubernetes_dashboard_image}
helm:
- enabled: true
+ enabled: ${_param:kubernetes_helm_enabled}
+ calico_policy:
+ enabled: ${_param:kubernetes_calico_policy_enabled}
+ image: ${_param:kubernetes_calico_policy_image}
+ netchecker:
+ enabled: ${_param:kubernetes_netchecker_enabled}
+ agent_image: ${_param:kubernetes_netchecker_agent_image}
+ server_image: ${_param:kubernetes_netchecker_server_image}
+ agent_probeurls: ${_param:kubernetes_netchecker_agent_probeurls}
+ dns:
+ enabled: ${_param:kubernetes_dns}
+ kubedns_image: ${_param:kubernetes_kubedns_image}
+ dnsmasq_image: ${_param:kubernetes_dnsmasq_image}
+ sidecar_image: ${_param:kubernetes_sidecar_image}
+ autoscaler:
+ image: ${_param:kubernetes_dns_autoscaler_image}
coredns:
enabled: ${_param:kubernetes_coredns_enabled}
externaldns:
@@ -38,6 +56,9 @@
image: ${_param:kubernetes_externaldns_image}
provider: ${_param:kubernetes_externaldns_provider}
master:
+ kubelet:
+ address: ${_param:single_address}
+ fail_on_swap: ${_param:kubelet_fail_on_swap}
etcd:
ssl:
enabled: true
@@ -50,3 +71,5 @@
namespace:
netchecker:
enabled: true
+ auth:
+ mode: Node,RBAC
diff --git a/classes/cluster/k8s-ha-calico-syndic/kubernetes/init.yml b/classes/cluster/k8s-ha-calico-syndic/kubernetes/init.yml
index d5cf684..12216e2 100644
--- a/classes/cluster/k8s-ha-calico-syndic/kubernetes/init.yml
+++ b/classes/cluster/k8s-ha-calico-syndic/kubernetes/init.yml
@@ -3,6 +3,9 @@
salt_minion_ca_host: ${_param:infra_config_hostname}.${_param:cluster_domain}
salt_minion_ca_authority: salt_master_ca
+ # kubelet
+ kubelet_fail_on_swap: True
+
# kubernetes settings
kubernetes_admin_user: admin
kubernetes_admin_password: sbPfel23ZigJF3Bm
@@ -14,6 +17,38 @@
kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
+ # docker package version
+ kubernetes_docker_package: docker-engine=1.13.1-0~ubuntu-xenial
+
+ # component docker images
+ kubernetes_calico_calicoctl_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/ctl:latest
+ kubernetes_calico_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/node:latest
+ kubernetes_calico_cni_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/cni:latest
+ kubernetes_calico_policy_image: calico/kube-policy-controller:v0.5.4
+
+ kubernetes_hyperkube_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/hyperkube-amd64:v1.8.13-11
+ kubernetes_pause_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/pause-amd64:v1.8.13-11
+ kubernetes_virtlet_image: mirantis/virtlet:v1.0.3
+ kubernetes_criproxy_version: v0.11.0
+ kubernetes_criproxy_checksum: md5=115bbb0c27518db6b0b3bc8cdc5fc897
+ kubernetes_dashboard_image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1
+ kubernetes_kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
+ kubernetes_dnsmasq_image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5
+ kubernetes_sidecar_image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
+ kubernetes_dns_autoscaler_image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
+ kubernetes_netchecker_agent_image: mirantis/k8s-netchecker-agent:stable
+ kubernetes_netchecker_server_image: mirantis/k8s-netchecker-server:stable
+ kubernetes_netchecker_agent_probeurls: "http://ipinfo.io"
+
+ # switches of addons
+ kubernetes_addon_namespace: kube-system
+ kubernetes_dns: true
+ kubernetes_dashboard: true
+ kubernetes_helm_enabled: false
+ kubernetes_netchecker_enabled: true
+ kubernetes_calico_policy_enabled: false
+ kubernetes_virtlet_enabled: false
+
# addresses and hostnames
kubernetes_internal_api_address: 10.254.0.1
kubernetes_control_hostname: ctl
@@ -21,28 +56,26 @@
kubernetes_control_node01_hostname: ctl01
kubernetes_control_node02_hostname: ctl02
kubernetes_control_node03_hostname: ctl03
- kubernetes_compute_node01_hostname: cmp01
- kubernetes_compute_node02_hostname: cmp02
+ kubernetes_compute01_hostname: cmp0
kubernetes_control_node01_address: 172.16.10.101
kubernetes_control_node02_address: 172.16.10.102
kubernetes_control_node03_address: 172.16.10.103
- kubernetes_compute_node01_address: 172.16.10.105
- kubernetes_compute_node02_address: 172.16.10.106
- kubernetes_proxy_node01_hostname: prx01
- kubernetes_proxy_node01_address: 172.16.10.121
+ kubernetes_control_node01_deploy_address: 192.168.10.101
+ kubernetes_control_node02_deploy_address: 192.168.10.102
+ kubernetes_control_node03_deploy_address: 192.168.10.103
cluster_vip_address: ${_param:kubernetes_control_address}
- cluster_local_address: ${_param:single_address}
+ cluster_local_address: ${_param:deploy_address}
# etcd stuff
cluster_node01_hostname: ${_param:kubernetes_control_node01_hostname}
- cluster_node01_address: ${_param:kubernetes_control_node01_address}
+ cluster_node01_address: ${_param:kubernetes_control_node01_deploy_address}
cluster_node01_port: 4001
cluster_node02_hostname: ${_param:kubernetes_control_node02_hostname}
- cluster_node02_address: ${_param:kubernetes_control_node02_address}
+ cluster_node02_address: ${_param:kubernetes_control_node02_deploy_address}
cluster_node02_port: 4001
cluster_node03_hostname: ${_param:kubernetes_control_node03_hostname}
- cluster_node03_address: ${_param:kubernetes_control_node03_address}
+ cluster_node03_address: ${_param:kubernetes_control_node03_deploy_address}
cluster_node03_port: 4001
# calico
@@ -80,32 +113,3 @@
names:
- ${_param:kubernetes_control_node03_hostname}
- ${_param:kubernetes_control_node03_hostname}.${_param:cluster_domain}
- cmp01:
- address: ${_param:kubernetes_compute_node01_address}
- names:
- - ${_param:kubernetes_compute_node01_hostname}
- - ${_param:kubernetes_compute_node01_hostname}.${_param:cluster_domain}
- cmp02:
- address: ${_param:kubernetes_compute_node02_address}
- names:
- - ${_param:kubernetes_compute_node02_hostname}
- - ${_param:kubernetes_compute_node02_hostname}.${_param:cluster_domain}
- prx01:
- address: ${_param:kubernetes_proxy_node01_address}
- names:
- - ${_param:kubernetes_proxy_node01_hostname}
- - ${_param:kubernetes_proxy_node01_hostname}.${_param:cluster_domain}
- system:
- rc:
- local: |
- #!/bin/sh -e
- #
- # rc.local
- #
- ######### This file is managed by Salt! ##########
- # This script is executed at the end of each multiuser runlevel.
- # Make sure that the script will "exit 0" on success or any other
- # value on error.
- #
- ip r a 10.254.0.0/16 dev ens3
- exit 0