blob: fdcb029dfe115829fdd47bd0008f05e383c7e4e1 [file] [log] [blame]
parameters:
_param:
salt_minion_ca_host: ${_param:infra_config_hostname}.${_param:cluster_domain}
salt_minion_ca_authority: salt_master_ca
# kubelet
kubelet_fail_on_swap: True
# kubernetes settings
kubernetes_admin_user: admin
kubernetes_admin_password: sbPfel23ZigJF3Bm
kubernetes_admin_token: PpP6Mm3pAoPVqcKOKUu0x1dh7b1959Fi
kubernetes_kubelet_token: JJ2PKHxjiU6EYvIt18BqwdSK1HvWh8pt
kubernetes_kube-proxy_token: jT0hJk9L6cIw5UpYDNhsRwcj3Z2n62B6
kubernetes_scheduler_token: VgkUHfrW07zNxrb0ucFyX7NBnSJN9Xp6
kubernetes_controller-manager_token: uXrdZ1YKF6qlYm3sHje2iEXMGAGDWOIU
kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
kubernetes_mtu: 1500
etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
# component docker images
kubernetes_pause_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/pause-amd64:v1.11.2-1
kubernetes_netchecker_agent_probeurls: "http://ipinfo.io"
kubernetes_virtlet_image: mirantis/virtlet:v1.4.0
# kube-controllers image is temporarily added here as it's not defined in TCP-QA by now
kubernetes_calico_kube_controllers_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/kube-controllers:v3.1.3
kubernetes_criproxy_version: v0.12.0
kubernetes_criproxy_checksum: md5=371cacd3d8568eb88425498b48a649dd
kubernetes_externaldns_provider: coredns
kubernetes_metallb_addresses_pool: 172.16.10.60-172.16.10.80
# switches of addons
kubernetes_addon_namespace: kube-system
kubernetes_dns: false
kubernetes_dashboard: true
kubernetes_helm_enabled: false
kubernetes_netchecker_enabled: true
kubernetes_calico_policy_enabled: false
kubernetes_virtlet_enabled: true
kubernetes_coredns_enabled: true
kubernetes_externaldns_enabled: false
kubernetes_metallb_enabled: false
kubernetes_ingressnginx_enabled: false
# addresses and hostnames
kubernetes_internal_api_address: 10.254.0.1
kubernetes_control_hostname: ctl
kubernetes_control_address: 192.168.10.253
kubernetes_control_node01_hostname: ctl01
kubernetes_control_node02_hostname: ctl02
kubernetes_control_node03_hostname: ctl03
kubernetes_compute_node01_hostname: cmp01
kubernetes_compute_node02_hostname: cmp02
kubernetes_control_node01_address: 172.16.10.101
kubernetes_control_node02_address: 172.16.10.102
kubernetes_control_node03_address: 172.16.10.103
kubernetes_compute_node01_address: 172.16.10.105
kubernetes_compute_node02_address: 172.16.10.106
kubernetes_control_node01_deploy_address: 192.168.10.101
kubernetes_control_node02_deploy_address: 192.168.10.102
kubernetes_control_node03_deploy_address: 192.168.10.103
kubernetes_compute_node01_deploy_address: 192.168.10.105
kubernetes_compute_node02_deploy_address: 192.168.10.106
kubernetes_proxy_node01_hostname: prx01
kubernetes_proxy_node01_address: 172.16.10.121
deploy_address: ${_param:infra_config_deploy_address}
cluster_vip_address: ${_param:kubernetes_control_address}
cluster_local_address: ${_param:deploy_address}
# etcd stuff
cluster_node01_hostname: ${_param:kubernetes_control_node01_hostname}
cluster_node01_address: ${_param:kubernetes_control_node01_deploy_address}
cluster_node01_port: 4001
cluster_node02_hostname: ${_param:kubernetes_control_node02_hostname}
cluster_node02_address: ${_param:kubernetes_control_node02_deploy_address}
cluster_node02_port: 4001
cluster_node03_hostname: ${_param:kubernetes_control_node03_hostname}
cluster_node03_address: ${_param:kubernetes_control_node03_deploy_address}
cluster_node03_port: 4001
# calico
calico_private_network: 192.168.0.0
calico_private_netmask: 16
linux:
network:
host:
ctl:
address: ${_param:kubernetes_control_address}
names:
- ${_param:kubernetes_control_hostname}
- ${_param:kubernetes_control_hostname}.${_param:cluster_domain}
ctl01:
address: ${_param:kubernetes_control_node01_address}
names:
- ${_param:kubernetes_control_node01_hostname}
- ${_param:kubernetes_control_node01_hostname}.${_param:cluster_domain}
ctl02:
address: ${_param:kubernetes_control_node02_address}
names:
- ${_param:kubernetes_control_node02_hostname}
- ${_param:kubernetes_control_node02_hostname}.${_param:cluster_domain}
ctl03:
address: ${_param:kubernetes_control_node03_address}
names:
- ${_param:kubernetes_control_node03_hostname}
- ${_param:kubernetes_control_node03_hostname}.${_param:cluster_domain}
cmp01:
address: ${_param:kubernetes_compute_node01_address}
names:
- ${_param:kubernetes_compute_node01_hostname}
- ${_param:kubernetes_compute_node01_hostname}.${_param:cluster_domain}
cmp02:
address: ${_param:kubernetes_compute_node02_address}
names:
- ${_param:kubernetes_compute_node02_hostname}
- ${_param:kubernetes_compute_node02_hostname}.${_param:cluster_domain}
prx01:
address: ${_param:kubernetes_proxy_node01_address}
names:
- ${_param:kubernetes_proxy_node01_hostname}
- ${_param:kubernetes_proxy_node01_hostname}.${_param:cluster_domain}
system:
user:
root:
name: root
password: false
rc:
local: |
#!/bin/sh -e
#
# rc.local
#
######### This file is managed by Salt! ##########
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
ip r a 10.254.0.0/16 dev ens4
exit 0