Add salt model for cloudprovider on k8s calico
Change-Id: I9d3480ca4ce1c6cdbe2b6df011664063800024fb
diff --git a/classes/cluster/k8s-ha-calico-cloudprovider/kubernetes/init.yml b/classes/cluster/k8s-ha-calico-cloudprovider/kubernetes/init.yml
new file mode 100644
index 0000000..8baf8b1
--- /dev/null
+++ b/classes/cluster/k8s-ha-calico-cloudprovider/kubernetes/init.yml
@@ -0,0 +1,108 @@
+parameters:
+ _param:
+ salt_minion_ca_host: ${_param:infra_config_hostname}.${_param:cluster_domain}
+ salt_minion_ca_authority: salt_master_ca
+
+ # kubernetes settings
+ kubernetes_admin_user: admin
+ kubernetes_admin_password: sbPfel23ZigJF3Bm
+ kubernetes_admin_token: PpP6Mm3pAoPVqcKOKUu0x1dh7b1959Fi
+ kubernetes_kubelet_token: JJ2PKHxjiU6EYvIt18BqwdSK1HvWh8pt
+ kubernetes_kube-proxy_token: jT0hJk9L6cIw5UpYDNhsRwcj3Z2n62B6
+ kubernetes_scheduler_token: VgkUHfrW07zNxrb0ucFyX7NBnSJN9Xp6
+ kubernetes_controller-manager_token: uXrdZ1YKF6qlYm3sHje2iEXMGAGDWOIU
+ kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
+ etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
+
+ # addresses and hostnames
+ kubernetes_internal_api_address: 10.254.0.1
+ kubernetes_control_hostname: ctl
+ kubernetes_control_address: 192.168.10.253
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node03_hostname: ctl03
+ kubernetes_control_node01_address: 172.16.10.101
+ kubernetes_control_node02_address: 172.16.10.102
+ kubernetes_control_node03_address: 172.16.10.103
+ kubernetes_proxy_node01_hostname: prx01
+ kubernetes_proxy_node01_address: 172.16.10.121
+
+ cluster_vip_address: ${_param:kubernetes_control_address}
+ cluster_local_address: ${_param:single_address}
+
+ # etcd stuff
+ cluster_node01_hostname: ${_param:kubernetes_control_node01_hostname}
+ cluster_node01_address: ${_param:kubernetes_control_node01_address}
+ cluster_node01_port: 4001
+ cluster_node02_hostname: ${_param:kubernetes_control_node02_hostname}
+ cluster_node02_address: ${_param:kubernetes_control_node02_address}
+ cluster_node02_port: 4001
+ cluster_node03_hostname: ${_param:kubernetes_control_node03_hostname}
+ cluster_node03_address: ${_param:kubernetes_control_node03_address}
+ cluster_node03_port: 4001
+
+ # calico
+ calico_private_network: 192.168.0.0
+ calico_private_netmask: 16
+
+ # OpenStack cloud provider
+ openstack_auth_url: https://cloud-cz.bud.mirantis.net:5000/v3
+ openstack_region_name: RegionOne
+ openstack_tenant_name: k8s-openstack-provider
+ openstack_username: k8s-openstack-provider
+ openstack_password: hjk5ers794wep
+ # Set this with heat template or overrides.yml
+ openstack_subnet_id: null
+ openstack_floating_network_id: 3e868882-d59e-416a-90a1-48cc04cab723
+
+ linux:
+ network:
+ host:
+ ctl:
+ address: ${_param:kubernetes_control_address}
+ names:
+ - ${_param:kubernetes_control_hostname}
+ - ${_param:kubernetes_control_hostname}.${_param:cluster_domain}
+ ctl01:
+ address: ${_param:kubernetes_control_node01_address}
+ names:
+ - ${_param:kubernetes_control_node01_hostname}
+ - ${_param:kubernetes_control_node01_hostname}.${_param:cluster_domain}
+ ctl02:
+ address: ${_param:kubernetes_control_node02_address}
+ names:
+ - ${_param:kubernetes_control_node02_hostname}
+ - ${_param:kubernetes_control_node02_hostname}.${_param:cluster_domain}
+ ctl03:
+ address: ${_param:kubernetes_control_node03_address}
+ names:
+ - ${_param:kubernetes_control_node03_hostname}
+ - ${_param:kubernetes_control_node03_hostname}.${_param:cluster_domain}
+ prx01:
+ address: ${_param:kubernetes_proxy_node01_address}
+ names:
+ - ${_param:kubernetes_proxy_node01_hostname}
+ - ${_param:kubernetes_proxy_node01_hostname}.${_param:cluster_domain}
+ linux:
+ network:
+ interface:
+ primary_interface:
+ route:
+ # TODO: Remove comment.
+ # Will work once CI has this in linux formula (packaged): https://gerrit.mcp.mirantis.net/#/c/8952
+ kubernetes_internal:
+ address: 10.254.0.0
+ netmask: 255.255.0.0
+ system:
+ rc:
+ local: |
+ #!/bin/sh -e
+ #
+ # rc.local
+ #
+ ######### This file is managed by Salt! ##########
+ # This script is executed at the end of each multiuser runlevel.
+ # Make sure that the script will "exit 0" on success or any other
+ # value on error.
+ #
+ exit 0