New dev cloud defined stacks

Change-Id: Iaf4efa717900c7be9d297ffe9b6e5d7a177aee24
diff --git a/classes/cluster/k8s_aio_calico/infra/config.yml b/classes/cluster/k8s_aio_calico/infra/config.yml
new file mode 100644
index 0000000..de701cf
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/infra/config.yml
@@ -0,0 +1,65 @@
+alasses:
+- system.linux.system.repo.ubuntu
+- system.openssh.client.lab
+- system.salt.master.pkg
+- system.salt.minion.ca.salt_master
+- system.salt.master.api
+- system.salt.minion.cert.k8s_server
+- system.mysql.client
+- system.reclass.storage.salt
+- system.reclass.storage.system.kubernetes_control_cluster
+- cluster.k8s_aio_calico
+parameters:
+  _param:
+    salt_master_base_environment: prd
+    reclass_data_repository: "https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab.git"
+    reclass_data_revision: master
+    reclass_config_master: ${_param:infra_config_deploy_address}
+    single_address: ${_param:infra_config_address}
+    linux_system_codename: xenial
+    salt_api_password_hash: "$6$WV0P1shnoDh2gI/Z$22/Bcd7ffMv0jDlFpT63cAU4PiXHz9pjXwngToKwqAsgoeK4HNR3PiKaushjxp3JsQ8hNoJmAC6TxzVqfV8WH/"
+    salt_master_host: 127.0.0.1
+  salt:
+    master:
+      reactor:
+        reclass/minion/classify:
+        - salt://reclass/reactor/node_register.sls
+  reclass:
+    storage:
+      class_mapping:
+        common_node:
+          expression: all
+          node_param:
+            single_address:
+              value_template: <<node_ip>>
+            linux_system_codename:
+              value_template: <<node_os>>
+            salt_master_host:
+              value_template: <<node_master_ip>>
+        infra_config:
+          expression: <<node_hostname>>__startswith__cfg
+          cluster_param:
+            infra_config_address:
+              value_template: <<node_ip>>
+            infra_config_deploy_address:
+              value_template: <<node_ip>>
+        kubernetes_control01:
+          expression: <<node_hostname>>__equals__ctl01
+          cluster_param:
+            kubernetes_control_node01_address:
+              value_template: <<node_ip>>
+        kubernetes_control02:
+          expression: <<node_hostname>>__equals__ctl02
+          cluster_param:
+            kubernetes_control_node02_address:
+              value_template: <<node_ip>>
+        kubernetes_control03:
+          expression: <<node_hostname>>__equals__ctl03
+          cluster_param:
+            kubernetes_control_node03_address:
+              value_template: <<node_ip>>
+        kubernetes_compute:
+          expression: <<node_hostname>>__startswith__cmp
+          node_class:
+            value_template:
+              - cluster.<<node_cluster>>.kubernetes.compute
diff --git a/classes/cluster/k8s_aio_calico/infra/init.yml b/classes/cluster/k8s_aio_calico/infra/init.yml
new file mode 100644
index 0000000..d6bda6d
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/infra/init.yml
@@ -0,0 +1,41 @@
+classes:
+- system.linux.system.single
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.mcp.salt
+- system.linux.system.repo.ubuntu
+- system.openssh.server.team.lab
+- system.openssh.server.team.tcpcloud
+- system.openssh.server.team.mcp_qa
+- system.openssh.server.team.k8s_team
+- system.rsyslog.client.single
+parameters:
+  _param:
+    # infra service addresses
+    infra_config_hostname: cfg01
+    infra_config_address: 172.16.10.100
+    infra_config_deploy_address: 192.168.10.100
+
+    cluster_domain: virtual-mcp11-k8s-calico.local
+    cluster_name: virtual-mcp11-k8s-calico
+    apt_mk_version: nightly
+    primary_interface: ens3
+  linux:
+    network:
+      host:
+        cfg01:
+          address: ${_param:infra_config_address}
+          names:
+          - ${_param:infra_config_hostname}
+          - ${_param:infra_config_hostname}.${_param:cluster_domain}
+      interface:
+        primary_interface:
+          enabled: true
+          type: eth
+          proto: dhcp
+          #proto: static
+          #name: ${_param:primary_interface}
+          #address: ${_param:single_address}
+          #netmask: 255.255.255.0
+    system:
+      name: ${_param:infra_config_hostname}
+      domain: ${_param:cluster_domain}
diff --git a/classes/cluster/k8s_aio_calico/init.yml b/classes/cluster/k8s_aio_calico/init.yml
new file mode 100644
index 0000000..de54caf
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/init.yml
@@ -0,0 +1,4 @@
+classes:
+- cluster.k8s_aio_calico.kubernetes
+- cluster.k8s_aio_calico.infra
+- cluster.overrides
diff --git a/classes/cluster/k8s_aio_calico/kubernetes/compute.yml b/classes/cluster/k8s_aio_calico/kubernetes/compute.yml
new file mode 100644
index 0000000..81ea8d1
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/kubernetes/compute.yml
@@ -0,0 +1,45 @@
+classes:
+- system.linux.system.repo.docker
+- system.kubernetes.pool.cluster
+- system.salt.minion.cert.k8s_client
+- system.salt.minion.cert.etcd_client
+- cluster.k8s_aio_calico
+parameters:
+  _param:
+    kubernetes_calicoctl_image: docker-prod-virtual.sandbox.mirantis.net/mirantis/projectcalico/calico/ctl:latest
+    kubernetes_calico_image: docker-prod-virtual.sandbox.mirantis.net/mirantis/projectcalico/calico/node:latest
+    kubernetes_calico_cni_image: docker-prod-virtual.sandbox.mirantis.net/mirantis/projectcalico/calico/cni:latest
+    kubernetes_hyperkube_image: docker-prod-virtual.sandbox.mirantis.net/mirantis/kubernetes/hyperkube-amd64:v1.6.2-2
+  docker:
+    host:
+      pkgs:
+        - docker-engine=1.12.6-0~ubuntu-xenial
+        - python-docker
+  kubernetes:
+    pool:
+      network:
+        etcd:
+          ssl:
+            enabled: true
+        image: ${_param:kubernetes_calico_image}
+        calicoctl:
+          image: ${_param:kubernetes_calicoctl_image}
+        cni:
+          image: ${_param:kubernetes_calico_cni_image}
+    common:
+      hyperkube:
+        image: ${_param:kubernetes_hyperkube_image}
+  linux:
+    network:
+      resolv:
+        dns:
+          - 10.254.0.10
+          - 172.18.176.4
+          - 172.18.176.7
+      interface:
+        primary_interface:
+          route:
+            kubernetes_services:
+              address: 10.254.0.0
+              netmask: 255.255.0.0
+              gateway: ${_param:single_address}
diff --git a/classes/cluster/k8s_aio_calico/kubernetes/control.yml b/classes/cluster/k8s_aio_calico/kubernetes/control.yml
new file mode 100644
index 0000000..8ac609d
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/kubernetes/control.yml
@@ -0,0 +1,39 @@
+classes:
+- service.etcd.server.cluster
+- system.haproxy.proxy.listen.kubernetes.apiserver
+- system.linux.system.repo.docker
+- system.salt.minion.cert.etcd_server
+- system.kubernetes.master.cluster
+- cluster.k8s_aio_calico.kubernetes.compute
+- cluster.k8s_aio_calico
+parameters:
+  _param:
+    keepalived_vip_interface: ${_param:primary_interface}
+    keepalived_vip_virtual_router_id: 60
+  etcd:
+    server:
+      source:
+        engine: docker_hybrid
+      ssl:
+        enabled: true
+      setup:
+        calico:
+          key: /calico/v1/ipam/v4/pool/${_param:calico_private_network}-${_param:calico_private_netmask}
+          value: '{"masquerade":true,"cidr":"${_param:calico_private_network}/${_param:calico_private_netmask}"}'
+  kubernetes:
+    master:
+      etcd:
+        ssl:
+          enabled: true
+      network:
+        etcd:
+          ssl:
+            enabled: true
+      namespace:
+        netchecker:
+          enabled: true
+      addons:
+        netchecker:
+          enabled: true
+        helm:
+          enabled: true
diff --git a/classes/cluster/k8s_aio_calico/kubernetes/init.yml b/classes/cluster/k8s_aio_calico/kubernetes/init.yml
new file mode 100644
index 0000000..7533873
--- /dev/null
+++ b/classes/cluster/k8s_aio_calico/kubernetes/init.yml
@@ -0,0 +1,109 @@
+parameters:
+  _param:
+    salt_minion_ca_host: ${_param:infra_config_hostname}.${_param:cluster_domain}
+    salt_minion_ca_authority: salt_master_ca
+
+    # kubernetes settings
+    kubernetes_admin_user: admin
+    kubernetes_admin_password: sbPfel23ZigJF3Bm
+    kubernetes_admin_token: PpP6Mm3pAoPVqcKOKUu0x1dh7b1959Fi
+    kubernetes_kubelet_token: JJ2PKHxjiU6EYvIt18BqwdSK1HvWh8pt
+    kubernetes_kube-proxy_token: jT0hJk9L6cIw5UpYDNhsRwcj3Z2n62B6
+    kubernetes_scheduler_token: VgkUHfrW07zNxrb0ucFyX7NBnSJN9Xp6
+    kubernetes_controller-manager_token: uXrdZ1YKF6qlYm3sHje2iEXMGAGDWOIU
+    kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
+    etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
+
+    # addresses and hostnames
+    kubernetes_internal_api_address: 10.254.0.1
+    kubernetes_control_hostname: ctl
+    kubernetes_control_address: 172.16.10.253
+    kubernetes_control_node01_hostname: ctl01
+    kubernetes_control_node02_hostname: ctl02
+    kubernetes_control_node03_hostname: ctl03
+    kubernetes_compute_node01_hostname: cmp01
+    kubernetes_compute_node02_hostname: cmp02
+    kubernetes_control_node01_address: 172.16.10.101
+    kubernetes_control_node02_address: 172.16.10.102
+    kubernetes_control_node03_address: 172.16.10.103
+    kubernetes_compute_node01_address: 172.16.10.105
+    kubernetes_compute_node02_address: 172.16.10.106
+    kubernetes_proxy_node01_hostname: prx01
+    kubernetes_proxy_node01_address: 172.16.10.121
+
+    cluster_vip_address: ${_param:kubernetes_control_address}
+    cluster_local_address: ${_param:single_address}
+
+    # etcd stuff
+    cluster_node01_hostname: ${_param:kubernetes_control_node01_hostname}
+    cluster_node01_address: ${_param:kubernetes_control_node01_address}
+    cluster_node01_port: 4001
+    cluster_node02_hostname: ${_param:kubernetes_control_node02_hostname}
+    cluster_node02_address: ${_param:kubernetes_control_node02_address}
+    cluster_node02_port: 4001
+    cluster_node03_hostname: ${_param:kubernetes_control_node03_hostname}
+    cluster_node03_address: ${_param:kubernetes_control_node03_address}
+    cluster_node03_port: 4001
+
+    # calico
+    calico_private_network: 192.168.0.0
+    calico_private_netmask: 16
+
+  linux:
+    network:
+      resolv:
+        domain: virtual-mcp11-k8s-calico.local
+        options:
+          - ndots:5
+          - timeout:2
+          - attempts:2
+      host:
+        ctl:
+          address: ${_param:kubernetes_control_address}
+          names:
+          - ${_param:kubernetes_control_hostname}
+          - ${_param:kubernetes_control_hostname}.${_param:cluster_domain}
+        ctl01:
+          address: ${_param:kubernetes_control_node01_address}
+          names:
+          - ${_param:kubernetes_control_node01_hostname}
+          - ${_param:kubernetes_control_node01_hostname}.${_param:cluster_domain}
+        ctl02:
+          address: ${_param:kubernetes_control_node02_address}
+          names:
+          - ${_param:kubernetes_control_node02_hostname}
+          - ${_param:kubernetes_control_node02_hostname}.${_param:cluster_domain}
+        ctl03:
+          address: ${_param:kubernetes_control_node03_address}
+          names:
+          - ${_param:kubernetes_control_node03_hostname}
+          - ${_param:kubernetes_control_node03_hostname}.${_param:cluster_domain}
+        cmp01:
+          address: ${_param:kubernetes_compute_node01_address}
+          names:
+          - ${_param:kubernetes_compute_node01_hostname}
+          - ${_param:kubernetes_compute_node01_hostname}.${_param:cluster_domain}
+        cmp02:
+          address: ${_param:kubernetes_compute_node02_address}
+          names:
+          - ${_param:kubernetes_compute_node02_hostname}
+          - ${_param:kubernetes_compute_node02_hostname}.${_param:cluster_domain}
+        prx01:
+          address: ${_param:kubernetes_proxy_node01_address}
+          names:
+          - ${_param:kubernetes_proxy_node01_hostname}
+          - ${_param:kubernetes_proxy_node01_hostname}.${_param:cluster_domain}
+    system:
+      rc:
+        local: |
+          #!/bin/sh -e
+          #
+          # rc.local
+          #
+          ######### This file is managed by Salt! ##########
+          # This script is executed at the end of each multiuser runlevel.
+          # Make sure that the script will "exit 0" on success or any other
+          # value on error.
+          #
+          ip r a 10.254.0.0/16 dev ens3
+          exit 0