rename cluster levels to non-underscore names

Change-Id: Ic9bdafbeb1333699ce424612f8b2e8d8dafb7be2
diff --git a/classes/cluster/k8s-ha-calico/kubernetes/compute.yml b/classes/cluster/k8s-ha-calico/kubernetes/compute.yml
new file mode 100644
index 0000000..6a1637f
--- /dev/null
+++ b/classes/cluster/k8s-ha-calico/kubernetes/compute.yml
@@ -0,0 +1,48 @@
+classes:
+- system.linux.system.repo.docker
+- system.kubernetes.pool.cluster
+- system.linux.network.hosts
+- system.salt.minion.cert.k8s_client
+- system.salt.minion.cert.etcd_client
+- cluster.k8s-ha-calico
+parameters:
+  _param:
+    kubernetes_calicoctl_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/ctl:latest
+    kubernetes_calico_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/node:latest
+    kubernetes_calico_cni_image: docker-prod-virtual.docker.mirantis.net/mirantis/projectcalico/calico/cni:latest
+    kubernetes_hyperkube_image: docker-prod-virtual.docker.mirantis.net/mirantis/kubernetes/hyperkube-amd64:v1.7.3-1
+  docker:
+    host:
+      pkgs:
+        - docker-engine=1.12.6-0~ubuntu-xenial
+        - python-docker
+      options:
+        bip: 172.31.255.1/24
+  kubernetes:
+    pool:
+      network:
+        etcd:
+          ssl:
+            enabled: true
+        image: ${_param:kubernetes_calico_image}
+        calicoctl:
+          image: ${_param:kubernetes_calicoctl_image}
+        cni:
+          image: ${_param:kubernetes_calico_cni_image}
+    common:
+      hyperkube:
+        image: ${_param:kubernetes_hyperkube_image}
+  linux:
+    network:
+      resolv:
+        dns:
+          - 10.254.0.10
+          - 172.18.176.4
+          - 172.18.176.7
+      interface:
+        primary_interface:
+          route:
+            kubernetes_services:
+              address: 10.254.0.0
+              netmask: 255.255.0.0
+              gateway: ${_param:single_address}
diff --git a/classes/cluster/k8s-ha-calico/kubernetes/control.yml b/classes/cluster/k8s-ha-calico/kubernetes/control.yml
new file mode 100644
index 0000000..e45aea5
--- /dev/null
+++ b/classes/cluster/k8s-ha-calico/kubernetes/control.yml
@@ -0,0 +1,54 @@
+classes:
+- service.etcd.server.cluster
+- system.haproxy.proxy.listen.kubernetes.apiserver
+- system.linux.system.repo.docker
+- system.salt.minion.cert.etcd_server
+- system.kubernetes.master.cluster
+- cluster.k8s-ha-calico.kubernetes.compute
+- cluster.k8s-ha-calico
+# FIXME: replace service.helm to system.helm (once properly in reclass)
+- service.helm.client
+parameters:
+  _param:
+    keepalived_vip_interface: ${_param:primary_interface}
+    keepalived_vip_virtual_router_id: 60
+  helm:
+    client:
+      repos:
+        mirantisworkloads: https://mirantisworkloads.storage.googleapis.com/
+      #releases:
+      # TODO: configure custom application stacks below
+      #  grafana:
+      #    chart: mirantisworkloads/grafana
+      #    version: 0.4.1
+      #    values:
+      #      replicas: 1
+      #      logLevel: INFO
+  etcd:
+    server:
+      source:
+        engine: docker_hybrid
+      ssl:
+        enabled: true
+      setup:
+        calico:
+          key: /calico/v1/ipam/v4/pool/${_param:calico_private_network}-${_param:calico_private_netmask}
+          value: '{"masquerade":true,"cidr":"${_param:calico_private_network}/${_param:calico_private_netmask}"}'
+  kubernetes:
+    common:
+      addons:
+        netchecker:
+          enabled: true
+        helm:
+          enabled: false
+    master:
+      etcd:
+        ssl:
+          enabled: true
+      network:
+        etcd:
+          ssl:
+            enabled: true
+      namespace:
+        netchecker:
+          enabled: true
diff --git a/classes/cluster/k8s-ha-calico/kubernetes/init.yml b/classes/cluster/k8s-ha-calico/kubernetes/init.yml
new file mode 100644
index 0000000..1c4dfda
--- /dev/null
+++ b/classes/cluster/k8s-ha-calico/kubernetes/init.yml
@@ -0,0 +1,98 @@
+parameters:
+  _param:
+    salt_minion_ca_host: ${_param:infra_config_hostname}.${_param:cluster_domain}
+    salt_minion_ca_authority: salt_master_ca
+
+    # kubernetes settings
+    kubernetes_admin_user: admin
+    kubernetes_admin_password: sbPfel23ZigJF3Bm
+    kubernetes_admin_token: PpP6Mm3pAoPVqcKOKUu0x1dh7b1959Fi
+    kubernetes_kubelet_token: JJ2PKHxjiU6EYvIt18BqwdSK1HvWh8pt
+    kubernetes_kube-proxy_token: jT0hJk9L6cIw5UpYDNhsRwcj3Z2n62B6
+    kubernetes_scheduler_token: VgkUHfrW07zNxrb0ucFyX7NBnSJN9Xp6
+    kubernetes_controller-manager_token: uXrdZ1YKF6qlYm3sHje2iEXMGAGDWOIU
+    kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
+    etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
+
+    # addresses and hostnames
+    kubernetes_internal_api_address: 10.254.0.1
+    kubernetes_control_hostname: ctl
+    kubernetes_control_address: 192.168.10.253
+    kubernetes_control_node01_hostname: ctl01
+    kubernetes_control_node02_hostname: ctl02
+    kubernetes_control_node03_hostname: ctl03
+    kubernetes_control_node01_address: 172.16.10.101
+    kubernetes_control_node02_address: 172.16.10.102
+    kubernetes_control_node03_address: 172.16.10.103
+    kubernetes_proxy_node01_hostname: prx01
+    kubernetes_proxy_node01_address: 172.16.10.121
+
+    cluster_vip_address: ${_param:kubernetes_control_address}
+    cluster_local_address: ${_param:single_address}
+
+    # etcd stuff
+    cluster_node01_hostname: ${_param:kubernetes_control_node01_hostname}
+    cluster_node01_address: ${_param:kubernetes_control_node01_address}
+    cluster_node01_port: 4001
+    cluster_node02_hostname: ${_param:kubernetes_control_node02_hostname}
+    cluster_node02_address: ${_param:kubernetes_control_node02_address}
+    cluster_node02_port: 4001
+    cluster_node03_hostname: ${_param:kubernetes_control_node03_hostname}
+    cluster_node03_address: ${_param:kubernetes_control_node03_address}
+    cluster_node03_port: 4001
+
+    # calico
+    calico_private_network: 192.168.0.0
+    calico_private_netmask: 16
+
+  linux:
+    network:
+      host:
+        ctl:
+          address: ${_param:kubernetes_control_address}
+          names:
+          - ${_param:kubernetes_control_hostname}
+          - ${_param:kubernetes_control_hostname}.${_param:cluster_domain}
+        ctl01:
+          address: ${_param:kubernetes_control_node01_address}
+          names:
+          - ${_param:kubernetes_control_node01_hostname}
+          - ${_param:kubernetes_control_node01_hostname}.${_param:cluster_domain}
+        ctl02:
+          address: ${_param:kubernetes_control_node02_address}
+          names:
+          - ${_param:kubernetes_control_node02_hostname}
+          - ${_param:kubernetes_control_node02_hostname}.${_param:cluster_domain}
+        ctl03:
+          address: ${_param:kubernetes_control_node03_address}
+          names:
+          - ${_param:kubernetes_control_node03_hostname}
+          - ${_param:kubernetes_control_node03_hostname}.${_param:cluster_domain}
+        prx01:
+          address: ${_param:kubernetes_proxy_node01_address}
+          names:
+          - ${_param:kubernetes_proxy_node01_hostname}
+          - ${_param:kubernetes_proxy_node01_hostname}.${_param:cluster_domain}
+    linux:
+      network:
+        interface:
+          primary_interface:
+            route:
+              # TODO: Remove comment.
+              # Will work once CI has this in linux formula (packaged): https://gerrit.mcp.mirantis.net/#/c/8952
+              kubernetes_internal:
+                address: 10.254.0.0
+                netmask: 255.255.0.0
+    system:
+      rc:
+        local: |
+          #!/bin/sh -e
+          #
+          # rc.local
+          #
+          ######### This file is managed by Salt! ##########
+          # This script is executed at the end of each multiuser runlevel.
+          # Make sure that the script will "exit 0" on success or any other
+          # value on error.
+          #
+          exit 0