Sync mcp-virtual-aio model from mcp-virtual-lab repo

This patch syncs mcp-virtual-aio and scripts from mcp-virtual-lab repo

Change-Id: Icf0a9eb44bc0d06e9fffe22bf3eb03b0f29d75c5
diff --git a/classes/cluster/overrides.yml b/classes/cluster/overrides.yml
new file mode 100644
index 0000000..0b36fca
--- /dev/null
+++ b/classes/cluster/overrides.yml
@@ -0,0 +1,2 @@
+# this file is used for dynamic metadata
+# `salt-call reclass.cluster_meta_set foo bar` can be used
diff --git a/classes/cluster/virtual-mcp11-aio/.env b/classes/cluster/virtual-mcp11-aio/.env
new file mode 100644
index 0000000..e138b42
--- /dev/null
+++ b/classes/cluster/virtual-mcp11-aio/.env
@@ -0,0 +1 @@
+FORMULAS_SALT_MASTER+=(java openssh ntp nginx collectd sensu heka sphinx mysql galera grafana libvirt rsyslog glusterfs postfix xtrabackup freeipa prometheus telegraf elasticsearch kibana rundeck devops-portal libvirt rsyslog memcached rabbitmq apache keystone glance nova neutron cinder heat horizon ironic tftpd-hpa bind powerdns designate barbican iptables)
diff --git a/classes/cluster/virtual-mcp11-aio/infra/config.yml b/classes/cluster/virtual-mcp11-aio/infra/config.yml
new file mode 100644
index 0000000..c59029b
--- /dev/null
+++ b/classes/cluster/virtual-mcp11-aio/infra/config.yml
@@ -0,0 +1,10 @@
+classes:
+  - cluster.virtual-mcp11-aio
+parameters:
+  _param:
+    linux_system_codename: xenial
+  linux:
+    system:
+      name: cfg01
+      domain: ${_param:cluster_domain}
+      purge_repos: ${_param:linux_system_purge_repos}
diff --git a/classes/cluster/virtual-mcp11-aio/infra/init.yml b/classes/cluster/virtual-mcp11-aio/infra/init.yml
new file mode 100644
index 0000000..46d0de0
--- /dev/null
+++ b/classes/cluster/virtual-mcp11-aio/infra/init.yml
@@ -0,0 +1,4 @@
+parameters:
+  _param:
+    apt_mk_version: stable
+    linux_system_purge_repos: false
diff --git a/classes/cluster/virtual-mcp11-aio/init.yml b/classes/cluster/virtual-mcp11-aio/init.yml
new file mode 100755
index 0000000..614dfd3
--- /dev/null
+++ b/classes/cluster/virtual-mcp11-aio/init.yml
@@ -0,0 +1,96 @@
+classes:
+- system.linux.network.dynamic_hosts
+- service.git.client
+- system.linux.system.single
+- system.linux.system.repo.mcp.salt
+- system.openssh.client.lab
+- system.salt.master.api
+- system.salt.master.pkg
+- system.salt.minion.ca.salt_master
+- system.salt.minion.cert.proxy
+- system.reclass.storage.salt
+- system.mysql.client
+- system.memcached.server.single
+- system.rabbitmq.server.single
+- cluster.virtual-mcp11-aio.infra
+- cluster.virtual-mcp11-aio.openstack
+- cluster.overrides
+parameters:
+  _param:
+    reclass_data_repository: https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-aio
+    reclass_data_revision: master
+    salt_master_environment_repository: "https://github.com/tcpcloud"
+    salt_master_environment_revision: master
+    reclass_config_master: 192.168.10.90
+    single_address: 172.16.10.90
+    infra_config_address: ${_param:single_address}
+    cluster_domain: virtual-mcp11-aio.local
+    cluster_name: virtual-mcp11-aio
+    infra_config_hostname: cfg01
+
+    mysql_admin_user: root
+    mysql_admin_password: workshop
+    mysql_cluster_role: master
+    rabbitmq_secret_key: workshop
+    rabbitmq_admin_password: workshop
+    loopback_device_size: 20
+    salt_master_host: 192.168.10.90
+    salt_master_base_environment: prd
+    salt_minion_ca_host: ${linux:network:fqdn}
+    salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+  openssh:
+    server:
+      password_auth: true
+      permit_root_login: true
+  linux:
+    system:
+      user:
+        root:
+          enabled: true
+          # r00tme
+          password: $6$9ojWyyN.$26Vj46JtCUL6C7XBQ8RmQTZLwo8/8SkqTRElXh0X2YBLrt7E/aVe2AYQ5gguYUwUknZNOSn5q7M9M3Jyf2gof/
+      repo:
+        linux_system_repo:
+          source: ${_param:linux_system_repo}
+          architectures: amd64
+          clean_file: true
+          pin:
+          - pin: ${_param:linux_system_repo_pin}
+            priority: ${_param:linux_system_repo_priority}
+            package: '*'
+    network:
+      bridge: openvswitch
+      interface:
+        br-floating:
+          enabled: true
+          type: ovs_bridge
+        phy-public:
+          enabled: true
+          type: ovs_port
+          bridge: br-floating
+          proto: static
+          address: ${_param:openstack_public_neutron_subnet_gateway}
+          netmask: 255.255.255.0
+# override on this level
+  nova:
+    compute:
+      cache:
+        members:
+        - host: ${_param:single_address}
+          port: 11211
+  cinder:
+    volume:
+      cache:
+        members:
+        - host: ${_param:single_address}
+          port: 11211
+  galera:
+    master:
+      members:
+      - host: ${_param:single_address}
+        port: 4567
+  neutron:
+    server:
+      message_queue:
+        members:
+          - host: ${_param:single_address}
diff --git a/classes/cluster/virtual-mcp11-aio/openstack/init.yml b/classes/cluster/virtual-mcp11-aio/openstack/init.yml
new file mode 100755
index 0000000..03d7636
--- /dev/null
+++ b/classes/cluster/virtual-mcp11-aio/openstack/init.yml
@@ -0,0 +1,181 @@
+classes:
+- system.linux.system.lowmem
+- system.linux.system.repo.mcp.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.storage.loopback
+- system.rabbitmq.server.vhost.openstack
+- system.keystone.server.wsgi
+- system.keystone.server.single
+- service.galera.master.cluster
+- system.galera.server.database.cinder
+- system.galera.server.database.designate
+- system.galera.server.database.glance
+- system.galera.server.database.heat
+- system.galera.server.database.keystone
+- system.galera.server.database.nova
+- system.galera.server.database.barbican
+- system.keystone.client.single
+- system.keystone.client.service.barbican
+- system.keystone.client.service.nova21
+- system.keystone.client.service.nova-placement
+- system.keystone.client.service.designate
+- system.glance.control.single
+- system.nova.control.single
+- system.neutron.control.openvswitch.single
+- system.neutron.client.service.public
+- system.heat.server.single
+- system.nova.compute.single
+- service.neutron.gateway.single
+- system.cinder.control.single
+- system.cinder.control.backend.lvm
+- service.cinder.volume.single
+- system.cinder.volume.backend.lvm
+- system.horizon.server.single
+- system.bind.server.single
+- system.barbican.server.single
+- service.barbican.server.plugin.simple_crypto
+- system.designate.server.single
+- system.designate.server.backend.bind
+parameters:
+  _param:
+    openstack_version: ocata
+    cluster_public_host: ${_param:single_address}
+    cluster_public_protocol: http
+    openstack_region: RegionOne
+    admin_email: root@localhost
+    rabbitmq_openstack_password: workshop
+    galera_server_cluster_name: openstack_cluster
+    galera_server_maintenance_password: workshop
+    galera_server_admin_password: workshop
+    keystone_version: ${_param:openstack_version}
+    barbican_version: ${_param:openstack_version}
+    glance_version: ${_param:openstack_version}
+    nova_version: ${_param:openstack_version}
+    neutron_version: ${_param:openstack_version}
+    cinder_version: ${_param:openstack_version}
+    heat_version: ${_param:openstack_version}
+    horizon_version: ${_param:openstack_version}
+    designate_version: ${_param:openstack_version}
+    keystone_service_token: workshop
+    keystone_admin_password: workshop
+    keystone_barbican_password: workshop
+    keystone_ceilometer_password: workshop
+    keystone_cinder_password: workshop
+    keystone_glance_password: workshop
+    keystone_heat_password: workshop
+    keystone_neutron_password: workshop
+    keystone_nova_password: workshop
+    keystone_designate_password: workshop
+    keystone_service_host: ${_param:single_address}
+    mysql_keystone_password: workshop
+    mysql_barbican_password: workshop
+    mysql_glance_password: workshop
+    mysql_nova_password: workshop
+    mysql_neutron_password: workshop
+    mysql_cinder_password: workshop
+    mysql_heat_password: workshop
+    mysql_designate_password: workshop
+    barbican_service_host: ${_param:single_address}
+    heat_service_host: ${_param:single_address}
+    neutron_service_host: ${_param:single_address}
+    glance_service_host: ${_param:single_address}
+    cinder_service_host: ${_param:single_address}
+    designate_service_host: ${_param:single_address}
+    nova_service_host: ${_param:single_address}
+    control_address: ${_param:single_address}
+    metadata_password: workshop
+    cluster_vip_address: ${_param:single_address}
+    cluster_local_address: ${_param:single_address}
+    openstack_database_address: ${_param:single_address}
+    tenant_address: ${_param:single_address}
+    heat_domain_admin_password: workshop
+    horizon_secret_key: workshop
+    horizon_identity_encryption: none
+    horizon_identity_version: 2
+    horizon_identity_host: ${_param:single_address}
+    designate_bind9_rndc_key: 4pc+X4PDqb2q+5o72dISm72LM1Ds9X2EYZjqg+nmsS7FhdTwzFFY8l/iEDmHxnyjkA33EQC8H+z0fLLBunoitw==
+    designate_pool_target_type: bind9
+    designate_domain_id: 5186883b-91fb-4891-bd49-e6769234a8fc
+    designate_pool_ns_records:
+      - hostname: 'ns1.example.org.'
+        priority: 10
+    designate_pool_nameservers:
+      - host: ${_param:single_address}
+        port: 53
+    designate_pool_target_masters:
+      - host: ${_param:single_address}
+        port: 5354
+    designate_pool_target_options:
+      host: ${_param:single_address}
+      port: 53
+      rndc_host: 127.0.0.1
+      rndc_port: 953
+      rndc_key_file: /etc/designate/rndc.key
+    designate_quota_zones: 40
+    designate_worker_enabled: true
+    linux_system_repo: deb [arch=amd64] http://mirror.fuel-infra.org/mcp-repos/${_param:openstack_version}/xenial ${_param:openstack_version} main
+    linux_system_repo_pin: release a=${_param:openstack_version}
+    linux_system_repo_priority: 1200
+    openstack_public_neutron_subnet_gateway:  192.168.130.1
+    openstack_public_neutron_subnet_cidr: 192.168.130.0/24
+    openstack_public_neutron_subnet_allocation_start: 192.168.130.10
+    openstack_public_neutron_subnet_allocation_end: 192.168.130.254
+    barbican_simple_crypto_kek: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY=
+    barbican_integration_enabled: False
+  galera:
+    master:
+      members: ~
+      innodb_buffer_pool_size: 1024M
+      max_connections: 1000
+    slave:
+      enabled: false
+  barbican:
+    server:
+      ks_notifications_enable: True
+      store:
+        software:
+          crypto_plugin: simple_crypto
+          store_plugin: store_crypto
+          global_default: True
+  neutron:
+    server:
+      message_queue:
+        members: ~
+    gateway:
+      agent_mode: dvr_snat
+      dvr: True
+  nova:
+    compute:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      vncproxy_url: http://${_param:single_address}:6080
+      network:
+        user: neutron
+        password: ${_param:keystone_neutron_password}
+        tenant: service
+      cache:
+        members: ~
+    controller:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+      vncproxy_url: http://${_param:single_address}:6080
+  cinder:
+    controller:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}
+    volume:
+      cache:
+        members: ~
+  horizon:
+    server:
+      secure: False
+  designate:
+    server:
+      quota:
+        zones: ${_param:designate_quota_zones}
+    worker:
+      enabled: ${_param:designate_worker_enabled}
+  glance:
+    server:
+      barbican:
+        enabled: ${_param:barbican_integration_enabled}