Add heat template for oc41 deploymeent
Change-Id: Ida9e87c6452f9f7769fab1764332c29fce26bf7b
diff --git a/tcp_tests/templates/_heat_environments/ReadMe.md b/tcp_tests/templates/_heat_environments/ReadMe.md
new file mode 100644
index 0000000..09f568a
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/ReadMe.md
@@ -0,0 +1,116 @@
+1. Required template parameters
+===============================
+Parameters with fixed names required by Jenkins pipeline swarm-bootstrap-salt-cluster-heat.groovy.
+These parameters can be defined in .env or .hot file and are used to generate model.
+```
+management_subnet_cidr
+management_subnet_gateway_ip
+management_subnet_cfg01_ip
+control_subnet_cidr
+tenant_subnet_cidr
+external_subnet_cidr
+```
+Also, the following parameters might be useful to define:
+```
+management_subnet_pool_start
+management_subnet_pool_end
+```
+
+2. Required template objects
+============================
+
+2.1 Node roles
+--------------
+
+Node roles are automatically gathered in the envmanager_heat.py
+from OS::Nova::Server , where defined as a list using "metadata:roles" key:
+
+```
+ cfg01_node:
+ type: OS::Nova::Server
+ properties:
+ metadata:
+ roles:
+ - salt_master
+```
+
+2.2 L3 network roles
+--------------------
+
+Network roles are automatically gathered in the envmanager_heat.py
+from OS::Neutron::Subnet , where defined as list of tags:
+
+```
+ control_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ ...
+ tags:
+ - private-pool01
+```
+There are four fixed network roles at the moment:
+```
+admin-pool01 # for management_subnet_cidr
+private-pool01 # for control_subnet_cidr
+tenant-pool01 # for tenant_subnet_cidr
+external-pool01 # for external_subnet_cidr
+```
+
+3. External parameters
+======================
+
+There are parameters which are automatically defined outside
+of the template defaults in the envmanager_heat.py, and can be used
+in the template to define or find specified resources:
+```
+env_name # set from environment variable ENV_NAME. Matches heat stack name
+mcp_version # set from environment variable MCP_VERSION
+```
+
+4. Pre-defined resources in the OpenStack cloud
+===============================================
+
+4.1 Public network
+------------------
+Public network for floating IP addresses should be pre-defined.
+Heat templates must use this network to define floating IPs.
+
+4.2 Images
+----------
+Jenkins pipeline swarm-bootstrap-salt-cluster-heat.groovy check and create
+required images. In the template, the following image names should be used:
+
+```
+# Image used to bootstrap salt master node cfg01:
+image: { list_join: ['', [ 'cfg01-day01-', { get_param: mcp_version } ]] }
+
+# Config drive image to boot cfg01, with user-data and reclass model
+image: { list_join: ['', [ 'cfg01.', { get_param: env_name }, '-config-drive.iso' ]] }
+
+# Image used to bootstrap VCP nodes:
+image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+
+# Image used to bootstrap the Foundation node:
+image: { list_join: ['', [ 'ubuntu-16.04-foundation-', { get_param: mcp_version } ]] }
+```
+
+5. The foundation node
+======================
+To get direct access to the environment resources without tunnels and jump hosts,
+the pipeline swarm-bootstrap-salt-cluster-heat.groovy expects that a foundation node
+will be defined in each heat template.
+
+This node is used to launch a Jenkins agent and run Jenkins jobs inside the
+heat stack. Depending on environment, the Foundation node could be connected
+to several or to all the internal networks to run necessary tests.
+
+The template 'outputs' should contain the 'foundation_floating' key, for example:
+```
+outputs:
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+```
diff --git a/tcp_tests/templates/_heat_environments/eu-cloud.env b/tcp_tests/templates/_heat_environments/eu-cloud.env
new file mode 100644
index 0000000..de3bb06
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/eu-cloud.env
@@ -0,0 +1,40 @@
+
+resource_registry:
+ "MCP::MultipleInstance": fragments/MultipleInstance.yaml
+ #"MCP::Flavors": fragments/Flavors.yaml
+ "MCP::MasterNode": fragments/MasterNode.yaml
+ "MCP::Compute": fragments/Compute.yaml
+ "MCP::Networks": fragments/Networks.yaml
+ "MCP::SingleInstance": fragments/Instance.yaml
+ "MCP::FoundationNode": fragments/FoundationNode.yaml
+
+parameter_defaults:
+
+ cfg_flavor: system.virtual.salt_master
+ ctl_flavor: system.golden.openstack.control
+ cid_flavor: system.golden.cicd.control
+ ntw_flavor: system.compact.opencontrail.control
+ nal_flavor: system.compact.opencontrail.analytics
+ dbs_flavor: system.golden.openstack.database
+ msg_flavor: system.golden.openstack.message_queue
+ mon_flavor: system.golden.stacklight.server
+ log_flavor: system.golden.stacklight.log
+ mtr_flavor: system.golden.stacklight.telemetry
+ cmp_flavor: system.virtual.openstack.compute
+ kvm_fake_flavor: system.virtual.fake_kvm
+ foundation_flavor: system.virtual.foundation
+
+ key_pair: system_key_8133
+
+ net_public: public
+
+ nameservers: 172.18.208.44
+ control_subnet_cidr: "10.6.0.0/24"
+ tenant_subnet_cidr: "10.8.0.0/24"
+ external_subnet_cidr: "10.9.0.0/24"
+ management_subnet_cidr: "10.7.0.0/24"
+ management_subnet_cfg01_ip: 10.7.0.15
+ management_subnet_gateway_ip: 10.7.0.1
+ management_subnet_pool_start: 10.7.0.20
+ management_subnet_pool_end: 10.7.0.60
+ salt_master_control_ip: 10.6.0.15
diff --git a/tcp_tests/templates/_heat_environments/fragments/Compute.yaml b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
new file mode 100644
index 0000000..6b4c0c7
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Compute.yaml
@@ -0,0 +1,110 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ network:
+ type: string
+ instance_flavor:
+ type: string
+ instance_name:
+ type: string
+ instance_config_host:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ control_net_static_ip:
+ type: string
+ underlay_userdata:
+ type: string
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: control_net_static_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: instance_flavor }
+ image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: instance_domain } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ block_device_mapping_v2:
+ - device_name: /dev/vdb
+ device_type: disk
+ boot_index: -1
+ delete_on_termination: true
+ ephemeral_size: 10
+
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+ #template: { get_file: ../underlay-userdata.yaml }
+ template: { get_param: underlay_userdata }
+ params:
+ hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+ $node_hostname: { get_param: instance_name }
+ $node_domain: { get_param: instance_domain }
+ $config_host: { get_param: instance_config_host }
+ metadata:
+ roles:
+ - salt_minion
+
+ floating_ip:
+ depends_on: [instance_instance]
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: net_public }
+ port_id: { get_resource: instance_port01 }
+ floating_ip_association:
+ depends_on: [floating_ip]
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: floating_ip }
+ port_id: { get_resource: instance_port01 }
+
+outputs:
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml b/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml
new file mode 100644
index 0000000..6db41e6
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Flavors.yaml
@@ -0,0 +1,123 @@
+---
+
+heat_template_version: queens
+
+resources:
+ cfg01_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 100
+ extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,4,1,5", "hw:numa_cpus.1": "2,6,3,7", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+ name: cfg01_virtual
+ ram: 16384
+ vcpus: 8
+
+ kvm_fake_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: kvm_fake_virtual
+ ram: 2048
+ vcpus: 1
+
+ ctl_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,4,1,5", "hw:numa_cpus.1": "2,6,3,7", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+ name: ctl_virtual
+ ram: 16384
+ vcpus: 8
+
+ cid_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: cid_virtual
+ ram: 6144
+ vcpus: 2
+
+ ntw_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "8192", "hw:numa_mem.1": "8192"}
+ name: ntw_virtual
+ ram: 16384
+ vcpus: 4
+
+ nal_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: nal_virtual
+ ram: 4096
+ vcpus: 4
+
+ dbs_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 150
+ extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "4096", "hw:numa_mem.1": "4096"}
+ name: dbs_virtual
+ ram: 8192
+ vcpus: 4
+
+ msg_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: msg_virtual
+ ram: 16384
+ vcpus: 4
+
+ mon_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 120
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: mon_virtual
+ ram: 4096
+ vcpus: 2
+
+ log_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 100
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: log_virtual
+ ram: 4096
+ vcpus: 2
+
+ mtr_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 100
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: mtr_virtual
+ ram: 4096
+ vcpus: 2
+
+ cmp_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 150
+ extra_specs: {"capabilities:hypervisor_type": "QEMU", "hw:numa_nodes": "2", "hw:numa_cpus.0": "0,1", "hw:numa_cpus.1": "2,3", "hw:numa_mem.0": "4096", "hw:numa_mem.1": "4096"}
+ name: cmp_virtual
+ ephemeral: 10
+ ram: 8192
+ vcpus: 4
+
+ foundation_virtual:
+ type: OS::Nova::Flavor
+ properties:
+ disk: 100
+ extra_specs: {"capabilities:hypervisor_type": "QEMU"}
+ name: foundation_virtual
+ ram: 4096
+ vcpus: 2
+...
diff --git a/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
new file mode 100644
index 0000000..91f058a
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/FoundationNode.yaml
@@ -0,0 +1,117 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ network:
+ type: string
+ instance_flavor:
+ type: string
+ instance_name:
+ type: string
+ instance_config_host:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ control_net_static_ip:
+ type: string
+ underlay_userdata:
+ type: string
+ env_name:
+ type: string
+ mcp_version:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: control_net_static_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ instance_port04:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: instance_flavor }
+ image: { list_join: ['', [ 'ubuntu-16.04-foundation-', { get_param: mcp_version } ]] }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: env_name } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ - port: { get_resource: instance_port04 }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+ #template: { get_file: ../underlay-userdata.yaml }
+ template: { get_param: underlay_userdata }
+ params:
+ hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+ $node_hostname: { get_param: instance_name }
+ $node_domain: { get_param: instance_domain }
+ $config_host: { get_param: instance_config_host }
+ metadata:
+ roles:
+ - foundation_jenkins_slave
+
+ floating_ip:
+ depends_on: [instance_instance]
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: net_public }
+ port_id: { get_resource: instance_port01 }
+ floating_ip_association:
+ depends_on: [floating_ip]
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: floating_ip }
+ port_id: { get_resource: instance_port01 }
+
+outputs:
+
+ instance_floating_address:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - floating_ip
+ - floating_ip_address
+
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/Instance.yaml b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
new file mode 100644
index 0000000..1c9be45
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Instance.yaml
@@ -0,0 +1,103 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ network:
+ type: string
+ instance_flavor:
+ type: string
+ instance_name:
+ type: string
+ instance_config_host:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ control_net_static_ip:
+ type: string
+ underlay_userdata:
+ type: string
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: control_net_static_ip }
+ instance_port03:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: instance_flavor }
+ image: { list_join: ['', [ 'ubuntu-vcp-', { get_param: mcp_version } ]] }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: instance_domain } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ - port: { get_resource: instance_port03 }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ #template: { get_file: underlay--user-data-cfg01--heat.yaml }
+ template: { get_param: underlay_userdata }
+ #template: { get_file: ../../templates/{ get_param: lab_config_name }/underlay-userdata.yaml }
+ params:
+ hostname: { list_join: ['.', [ { get_param: instance_name }, { get_param: instance_domain } ]] }
+ $node_hostname: { get_param: instance_name }
+ $node_domain: { get_param: instance_domain }
+ $config_host: { get_param: instance_config_host }
+ metadata:
+ roles:
+ - salt_minion
+
+ floating_ip:
+ depends_on: [instance_instance]
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: net_public }
+ port_id: { get_resource: instance_port01 }
+ floating_ip_association:
+ depends_on: [floating_ip]
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: floating_ip }
+ port_id: { get_resource: instance_port01 }
+
+outputs:
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
new file mode 100644
index 0000000..410deb6
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/MasterNode.yaml
@@ -0,0 +1,94 @@
+heat_template_version: queens
+
+description: Single server instance fragment
+
+parameters:
+ management_subnet_cfg01_ip:
+ type: string
+ salt_master_control_ip:
+ type: string
+ network:
+ type: string
+ cfg01_flavor:
+ type: string
+ instance_name:
+ type: string
+ key_pair:
+ type: string
+ instance_domain:
+ type: string
+ net_public:
+ type: string
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+
+resources:
+ instance_port01:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: management_subnet_cfg01_ip }
+
+ instance_port02:
+ type: OS::Neutron::Port
+ properties:
+ port_security_enabled: false
+ network_id: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ fixed_ips:
+ - ip_address: { get_param: salt_master_control_ip }
+
+ instance_instance:
+ type: OS::Nova::Server
+ properties:
+ image_update_policy: REBUILD
+ flavor: { get_param: cfg01_flavor }
+ image: { list_join: ['', [ 'cfg01-day01-', { get_param: mcp_version } ]] }
+ key_name: { get_param: key_pair }
+ name:
+ list_join:
+ - '.'
+ - [ { get_param: instance_name }, { get_param: instance_domain } ]
+ networks:
+ - port: { get_resource: instance_port01 }
+ - port: { get_resource: instance_port02 }
+ block_device_mapping_v2:
+ - device_name: /dev/cdrom
+ device_type: cdrom
+ boot_index: -1
+ delete_on_termination: true
+ image: { list_join: ['', [ 'cfg01.', { get_param: env_name }, '-config-drive.iso' ]] }
+ volume_size: 1
+ metadata:
+ roles:
+ - salt_master
+
+ floating_ip:
+ depends_on: [instance_instance]
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: net_public }
+ port_id: { get_resource: instance_port01 }
+ floating_ip_association:
+ depends_on: [floating_ip]
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: floating_ip }
+ port_id: { get_resource: instance_port01 }
+
+outputs:
+ instance_address:
+ value:
+ get_attr:
+ - instance_instance
+ - addresses
+ - 'management_net'
+ - 0
+ - addr
+ description: "Instance's private IP address"
+ instance:
+ value: { get_resource: instance_instance }
+ description: "Instance"
diff --git a/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
new file mode 100644
index 0000000..986b855
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/MultipleInstance.yaml
@@ -0,0 +1,76 @@
+heat_template_version: queens
+
+description: 3 single nodes fragment
+
+parameters:
+ key_pair:
+ type: string
+ network:
+ type: string
+ instance01_name:
+ type: string
+ instance02_name:
+ type: string
+ instance03_name:
+ type: string
+ instance_domain:
+ type: string
+ instance_flavor:
+ type: string
+ instance_config_host:
+ type: string
+ instance01_control_net_static_ip:
+ type: string
+ instance02_control_net_static_ip:
+ type: string
+ instance03_control_net_static_ip:
+ type: string
+ underlay_userdata:
+ type: string
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+
+resources:
+ instance01:
+ type: MCP::SingleInstance
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ key_pair: { get_param: key_pair }
+ network: { get_param: network }
+ control_net_static_ip: {get_param: instance01_control_net_static_ip }
+ instance_name: { get_param: instance01_name }
+ instance_domain: { get_param: instance_domain }
+ instance_flavor: { get_param: instance_flavor }
+ instance_config_host: { get_param: instance_config_host }
+ underlay_userdata: { get_param: underlay_userdata }
+
+ instance02:
+ type: MCP::SingleInstance
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ key_pair: { get_param: key_pair }
+ network: { get_param: network }
+ control_net_static_ip: {get_param: instance02_control_net_static_ip }
+ instance_name: { get_param: instance02_name }
+ instance_domain: { get_param: instance_domain }
+ instance_flavor: { get_param: instance_flavor }
+ instance_config_host: { get_param: instance_config_host }
+ underlay_userdata: { get_param: underlay_userdata }
+
+ instance03:
+ type: MCP::SingleInstance
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ key_pair: { get_param: key_pair }
+ network: { get_param: network }
+ control_net_static_ip: {get_param: instance03_control_net_static_ip }
+ instance_name: { get_param: instance03_name }
+ instance_domain: { get_param: instance_domain }
+ instance_flavor: { get_param: instance_flavor }
+ instance_config_host: { get_param: instance_config_host }
+ underlay_userdata: { get_param: underlay_userdata }
diff --git a/tcp_tests/templates/_heat_environments/fragments/Networks.yaml b/tcp_tests/templates/_heat_environments/fragments/Networks.yaml
new file mode 100644
index 0000000..076684b
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/fragments/Networks.yaml
@@ -0,0 +1,173 @@
+---
+heat_template_version: queens
+
+description: Network fragment
+
+parameters:
+ env_name:
+ type: string
+ net_public:
+ type: string
+ stack_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ tenant_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ external_subnet_cidr:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+# control_net_dhcp:
+# type: boolean
+# default: false
+# tenant_net_dhcp:
+# type: boolean
+# default: false
+ management_net_dhcp:
+ type: boolean
+ default: true
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+# external_net_dhcp:
+# type: boolean
+# default: false
+
+ nameservers:
+ type: comma_delimited_list
+
+resources:
+ control_net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ name: { list_join: ['-', [ 'control_net', { get_param: env_name } ]] }
+ tenant_net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ name: { list_join: ['-', [ 'tenant_net', { get_param: env_name } ]] }
+ management_net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ name: { list_join: ['-', [ 'management_net', { get_param: env_name } ]] }
+ external_net:
+ type: OS::Neutron::Net
+ properties:
+ port_security_enabled: false
+ name: { list_join: ['-', [ 'external_net', { get_param: env_name } ]] }
+
+ control_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { list_join: ['-', [ 'control_subnet', { get_param: env_name } ]] }
+ #name: control_subnet
+ network: { get_resource: control_net }
+ cidr: { get_param: control_subnet_cidr }
+ #enable_dhcp: { get_param: control_net_dhcp }
+ #dns_nameservers: { get_param: nameservers }
+ dns_nameservers: [0.0.0.0]
+ gateway_ip: null
+ tags:
+ - private-pool01
+
+ tenant_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { list_join: ['-', [ 'tenant_subnet', { get_param: env_name } ]] }
+ #name: tenant_subnet
+ network: { get_resource: tenant_net }
+ cidr: { get_param: tenant_subnet_cidr }
+ #enable_dhcp: { get_param: tenant_net_dhcp }
+ #dns_nameservers: { get_param: nameservers }
+ dns_nameservers: [0.0.0.0]
+ gateway_ip: null
+ tags:
+ - tenant-pool01
+
+ management_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ gateway_ip: { get_param: management_subnet_gateway_ip }
+ name: { list_join: ['-', [ 'management_subnet', { get_param: env_name } ]] }
+ #name: management_subnet
+ network: { get_resource: management_net }
+ cidr: { get_param: management_subnet_cidr }
+ enable_dhcp: { get_param: management_net_dhcp }
+ allocation_pools:
+ - start: { get_param: management_subnet_pool_start }
+ end: { get_param: management_subnet_pool_end }
+ dns_nameservers: { get_param: nameservers }
+ tags:
+ - admin-pool01
+
+ external_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { list_join: ['-', [ 'external_subnet', { get_param: env_name } ]] }
+ #name: external_subnet
+ network: { get_resource: external_net }
+ cidr: { get_param: external_subnet_cidr }
+ #enable_dhcp: { get_param: external_net_dhcp }
+ #dns_nameservers: { get_param: nameservers }
+ dns_nameservers: [0.0.0.0]
+ gateway_ip: null
+ tags:
+ - external-pool01
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ #name: publicbarerouter
+ external_gateway_info:
+ network: { get_param: net_public }
+ #enable_snat: True
+
+ router_subnet:
+ type: OS::Neutron::RouterInterface
+ depends_on: management_subnet
+ properties:
+ router: { get_resource: router }
+ subnet: { get_resource: management_subnet }
+
+outputs:
+ network:
+ value: { get_param: stack_name }
+ management_net_prefix:
+ value:
+ list_join:
+ - '.'
+ - - str_split: ['.', { get_param: management_subnet_cidr }, 0]
+ - str_split: ['.', { get_param: management_subnet_cidr }, 1]
+ - str_split: ['.', { get_param: management_subnet_cidr }, 2]
+
+ control_net_prefix:
+ value:
+ list_join:
+ - '.'
+ - - str_split: ['.', { get_param: control_subnet_cidr }, 0]
+ - str_split: ['.', { get_param: control_subnet_cidr }, 1]
+ - str_split: ['.', { get_param: control_subnet_cidr }, 2]
+
+ tenant_net_prefix:
+ value:
+ list_join:
+ - '.'
+ - - str_split: ['.', { get_param: tenant_subnet_cidr }, 0]
+ - str_split: ['.', { get_param: tenant_subnet_cidr }, 1]
+ - str_split: ['.', { get_param: tenant_subnet_cidr }, 2]
+
+ external_net_prefix:
+ value:
+ list_join:
+ - '.'
+ - - str_split: ['.', { get_param: external_subnet_cidr }, 0]
+ - str_split: ['.', { get_param: external_subnet_cidr }, 1]
+ - str_split: ['.', { get_param: external_subnet_cidr }, 2]
+
+...
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh b/tcp_tests/templates/_heat_environments/microcloud-8116-cookied-cicd-queens-dvr-sl.sh
old mode 100755
new mode 100644
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8116.env b/tcp_tests/templates/_heat_environments/microcloud-8116.env
deleted file mode 100644
index 9570a55..0000000
--- a/tcp_tests/templates/_heat_environments/microcloud-8116.env
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-parameter_defaults:
- #flavor_medium: baremetal
- flavor_medium: cfg01-virtual
- flavor_ctl: ctl-virtual
- image_vcp: ironic_provision_image
- image_ubuntu_cloud_xenial: cfg01-day01
- #keypair: system-ci-keypair
- keypair: baremetal
-
- net_public: public
-
- # ironic-specific parameters
- management_physical_network: ironicnet1
- management_subnet_cidr: 10.13.0.0/24
- management_subnet_pool_start: 10.13.0.20
- management_subnet_pool_end: 10.13.0.60
- management_subnet_gateway_ip: 10.13.0.1
- management_subnet_cfg01_ip: 10.13.0.15
-
- cfg01_configdrive_image: cfg01.cookied-cicd-queens-dvr-sl-config-drive.iso
- dns_nameservers: 172.18.208.44
-...
diff --git a/tcp_tests/templates/_heat_environments/microcloud-8133.env b/tcp_tests/templates/_heat_environments/microcloud-8133.env
new file mode 100644
index 0000000..6e1cb3b
--- /dev/null
+++ b/tcp_tests/templates/_heat_environments/microcloud-8133.env
@@ -0,0 +1,40 @@
+
+resource_registry:
+ "MCP::MultipleInstance": fragments/MultipleInstance.yaml
+ "MCP::Flavors": fragments/Flavors.yaml
+ "MCP::MasterNode": fragments/MasterNode.yaml
+ "MCP::Compute": fragments/Compute.yaml
+ "MCP::Networks": fragments/Networks.yaml
+ "MCP::SingleInstance": fragments/Instance.yaml
+ "MCP::FoundationNode": fragments/FoundationNode.yaml
+
+parameter_defaults:
+
+ cfg_flavor: system.virtual.salt_master
+ ctl_flavor: system.golden.openstack.control
+ cid_flavor: system.golden.cicd.control
+ ntw_flavor: system.compact.opencontrail.control
+ nal_flavor: system.compact.opencontrail.analytics
+ dbs_flavor: system.golden.openstack.database
+ msg_flavor: system.golden.openstack.message_queue
+ mon_flavor: system.golden.stacklight.server
+ log_flavor: system.golden.stacklight.log
+ mtr_flavor: system.golden.stacklight.telemetry
+ cmp_flavor: system.virtual.openstack.compute
+ kvm_fake_flavor: system.virtual.fake_kvm
+ foundation_flavor: system.virtual.foundation
+
+ key_pair: system_key_8133
+
+ net_public: public
+
+ nameservers: 172.18.208.44
+ control_subnet_cidr: "10.6.0.0/24"
+ tenant_subnet_cidr: "10.8.0.0/24"
+ external_subnet_cidr: "10.9.0.0/24"
+ management_subnet_cidr: "10.7.0.0/24"
+ management_subnet_cfg01_ip: 10.7.0.15
+ management_subnet_gateway_ip: 10.7.0.1
+ management_subnet_pool_start: 10.7.0.20
+ management_subnet_pool_end: 10.7.0.60
+ salt_master_control_ip: 10.6.0.15
diff --git a/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
new file mode 100644
index 0000000..992dc35
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_heat-cicd-pike-contrail41-sl.yaml
@@ -0,0 +1,25 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'heat-cicd-pike-contrail41-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','heat-cicd-pike-contrail41-sl') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml', 'salt-context-cookiecutter-contrail.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '10') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '20') %}
+
+{%- set IPMI_USER = os_env('IPMI_USER', 'mcp-qa') %}
+{%- set IPMI_PASS = os_env('IPMI_PASS', 'password') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
new file mode 100644
index 0000000..7d58774
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-cookiecutter-contrail.yaml
@@ -0,0 +1,304 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEApq5WxkagvkNWO85FtS1ByHDKkNWhmFdpY9D49dZrSwuE9XGQ
+ +WW79F2AGwKki2N2j1iyfpMEKRIEIb/5cbl6fZzTGTndhd7Jxkx6xGdhZkX9VM6N
+ qotaO4ckj7NsfiZKTwp58/YSRkz3Ii1XPpt0NQqZLuNAwus4Bl9e1Wk5dNw+gHN3
+ m4JmAczJbQ81lrQURC7f3d2xjoFkXWXC2FKkMS6AOl1j87ATeeSG9xeHLbOvIyBw
+ 7IwP9MFA5vUtHl8DzsdmzWmVRabe2VMtGa1Ya5JTTgK8nXmtYW3dvEQ/DtgzcKPJ
+ 2fO31cze9LRpDSS0E6d/cISBgzsPfBJuUCGHTQIDAQABAoIBAQCmFVVVoA6PRt1o
+ HjMLQpsntGvDQXsRJxhWY2WO4CZs0n+baZvBRgOwjHIXd9ypH2SFlSXWRXuByPfh
+ AT72eJB7FYaqviPjPojjVFWH2lMM63RvypkSdGRmqFRf87KJSHIGrDO0SV8QOaSO
+ o4spURDLwVG9jKd9EY/zmZgPIhgkPazzVrFoGr8YnKE6qSJh5HivscNl8D3+36SN
+ 5uhuElzBTNGd2iU4elLJIGjahetIalEZqL0Fvi1ZzAWoK0YXDmbI8uG8/epJ5Sy4
+ XyyHc7+0Jvm1JWwXczdDFuy+RlL9r66Ja8V9MauuJyigOKnNOJhE2b5/klEcczhC
+ AHA/Hw4pAoGBANcJ/gdouXgcuq3JNXq5Cb4w9lvZbDwQdEtY3+qdHAVndomoGsDT
+ USKq6ZRZzkAAnjiN2YywAQzqFGevoYig+WNLTPd2TdNdlNHfw9Wc4G2iSFb1pIr2
+ uoJ+TQGv4Ck/7LS2NVnWfqNoeo8Iq+Wvnh+F3twv0UIazGI8Bj/xLxvrAoGBAMZu
+ QErf3vzbY4g50HFVbPNi2Nl63A7/P421pEe4JAT1clwIVMyntRpNdVyHKkkKdDWr
+ 98tBOhf71+shgsVPEMkfPyZ2nuiBit7LzZ+EAztG9i3hhm8yIUPXoipo0YCOe+yF
+ r+r03pX97aciXuRMPmMTHH6N1vFaUXHSgVs6Y7OnAoGAP4v1ZO0eug8LX6XxRuX9
+ qhXAB96VrJ5UL5wA980b5cDwd7eUyFzqQittwWhUmfdUynOo0XmFpfJau1VckAq6
+ CAzNnud4Ejk6bFcLAUpNzDhD1mbbDDHjZgK68P+vZ6E7ax/ZXkYTwGh0p2Yxnjuq
+ p7gg5sK+vSE8Ot9wHV9Bw6cCgYEAguPq6PjvgF+/Mfbg9kFhUtKbNCoEyqe4ZmOw
+ 79YZfGPjga3FMhJWNfluNxC55eBNc7HyDFMEXRm0/dbnCfvzmJdR8q9AdyIsVnad
+ NmHAN/PBI9al9OdeZf/xaoQl3eUe/Y/Z0OShhtMvVpYnffSFGplarGgnpqDrJGe1
+ CFZlufUCgYBemuy+C6gLwTOzhcTcCo4Ir5ZiKcXAE6ufk8OIdGnMWJcmTxxmIMY6
+ XyKu0oobWpOBXPiipQ6TmDpI+flxWYRHwPFFzPa+jhCtupRuTdORKrklV2UfdIWZ
+ N4e+J2yCu7lyz0upwa3MkFIVQ1ez0o8X9NRvAz243qi64y1+KOMPmQ==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmrlbGRqC+Q1Y7zkW1LUHIcMqQ1aGYV2lj0Pj11mtLC4T1cZD5Zbv0XYAbAqSLY3aPWLJ+kwQpEgQhv/lxuXp9nNMZOd2F3snGTHrEZ2FmRf1Uzo2qi1o7hySPs2x+JkpPCnnz9hJGTPciLVc+m3Q1Cpku40DC6zgGX17VaTl03D6Ac3ebgmYBzMltDzWWtBRELt/d3bGOgWRdZcLYUqQxLoA6XWPzsBN55Ib3F4cts68jIHDsjA/0wUDm9S0eXwPOx2bNaZVFpt7ZUy0ZrVhrklNOArydea1hbd28RD8O2DNwo8nZ87fVzN70tGkNJLQTp39whIGDOw98Em5QIYdN
+ bmk_enabled: 'False'
+ cicd_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: ==IPV4_NET_CONTROL_PREFIX==.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAuBC224XQZFyzqC56EyS7yr/rlpRRYsr2vji77faoWQFmgYbZ
+ oeyqqqm8eSN0Cc0wAnxWsQ7H3ZN9uTnyWVrsogs1vx8597iorZAT4Mu6JDbkWlZh
+ IUHo9P9itWJdUWpxjDavqIvjZo+DmOO1mfv9K1asP8COanQEsgHSyuf+XKMBg0ko
+ kEammAUtS9HRxCAJ47QgLPSCFij5ih/MRWY3HWFUFEF3gRdUodWmeJNmW+7JH7T2
+ wId1kn8oRya7eadKxd6wEaCGm5ILXwwVFmFkOGlEeC8wHnbkatd/A53DxzUfOHBi
+ 27Gaf83DPxKqDWW0aAh7b49EnFhdkuF3ZyXbYwIDAQABAoIBAFtioQbYpyBNDj2f
+ 5af/guUk6Di4pregAWVsEZIR9n9KPLRuWTsVn55f611Rhtke8IkrZnc92WlfQvpl
+ lLdcd0P0wNiFDmi5W7XgZJ4lR+OXBUT8wfibGqgY688WaTJ04K82r3vFCD/xXOrZ
+ k15CR+3ueFKmrY6Yz4P5d8iZ6iXfR47ZYm+wdmx3vmJ+IVfZCRRPAGP25GxqsOs5
+ 3qMl9hV7a1MGVVaVPmVzrq0Xzk6IAW2+0p5udGmezn4y6HFPIvOriUVUkni3mNjX
+ dokrETqVbOjkdFkSw28cMBfP/tO3vyfGh5VX24xvRztWtcpAm6Qr5lKEDSvFv13r
+ 0z/DxRECgYEA8oZ4+w2cqLJz91fKpWutGZKj4m/HEY6FZfjVflsTT2bKTt+nTtRY
+ qAeKGYIbrjZMAyy4dG+RgW7WORFcRHFyeSrS5Aw51zO+JQ0KzuBv83UqcbqNLcsz
+ BAPHPk/7f30W4wuInqgXrWMTiGePz0hQsvNU6aR7MH4Sd2C0ot4W+00CgYEAwkq+
+ UtugC8ywK+F0xZvjXHi3VJRJZf4WLtRxZGy8CimaritSKpZZRG23Sk0ifDE6+4fD
+ VtxeTfTmeZBictg/fEAPVHzhsNPNyDMA8t7t4ZKmMX9DNYAqVX21s5YQ9encH6KT
+ 1q0NRpjvw7QzhfbFfsxeAxHKZFbFlVmROplF+W8CgYAWHVz6x4r5dwxMCZ1Y6DCo
+ nE6FX1vvpedUHRSaqQNhwiXAe3RuI77R054sJUkQ4bKct386XtIN02WFXqfjNdUS
+ Z21DjjnX/cfg6QeLRbvvn0d3h2NIQbctLosEi5aLUYS8v1h93yYJkXc+gPMEG7wA
+ FWAwzebNzTEx4YeXMlk2IQKBgCt8JxTMawm5CkUH9Oa1eTGdIwsfFT5qm/RnP+nG
+ HF/559DLiVxWwiv6kmdi1DEPo6/gNuwd7k1sXpkeo6oolCzu+X9jY+/7t7bzE2dI
+ Vd2CwQebACPdR5xSwnQrRiiD6ux5qrUFjk8as68NieqVzKYQf4oYVUAX26kNnt+K
+ poqpAoGBAINHTGBFVK3XC+fCbu7rhFS8wZAjBmvEDHGnUBp19JREEr3q7a2D84T3
+ 17zo0bwxL09QFnOCDDJcXsh8eGbCONV0hJvJU2o7wGol+lRFSd+v6WYZ37bPEyEx
+ l8kv0xXAElriC1RE1CNtvoOn/uxyRs+2OnNgBVxtAGqUWVdpm6CD
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4ELbbhdBkXLOoLnoTJLvKv+uWlFFiyva+OLvt9qhZAWaBhtmh7Kqqqbx5I3QJzTACfFaxDsfdk325OfJZWuyiCzW/Hzn3uKitkBPgy7okNuRaVmEhQej0/2K1Yl1RanGMNq+oi+Nmj4OY47WZ+/0rVqw/wI5qdASyAdLK5/5cowGDSSiQRqaYBS1L0dHEIAnjtCAs9IIWKPmKH8xFZjcdYVQUQXeBF1Sh1aZ4k2Zb7skftPbAh3WSfyhHJrt5p0rF3rARoIabkgtfDBUWYWQ4aUR4LzAeduRq138DncPHNR84cGLbsZp/zcM/EqoNZbRoCHtvj0ScWF2S4XdnJdtj
+ cluster_domain: heat-cicd-pike-contrail41-sl.local
+ cluster_name: heat-cicd-pike-contrail41-sl
+ opencontrail_version: 4.1
+ linux_repo_contrail_component: oc41
+ compute_bond_mode: active-backup
+ compute_padding_with_zeros: 'True'
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: TFWH0xgUevQkslwhbWVedwwYhBtImHLiGUIExjT9ahxPAUBHh9Kg3QSAIrqTqtvk
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: ==IPV4_NET_CONTROL_PREFIX==.0/24
+ control_vlan: '10'
+ tenant_vlan: '20'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.com/mk/cookiecutter-templates.git
+ deploy_network_gateway: ==IPV4_NET_ADMIN_PREFIX==.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: ==IPV4_NET_ADMIN_PREFIX==.0/24
+ deployment_type: physical
+ dns_server01: 172.18.208.44
+ dns_server02: 172.18.176.6
+ email_address: sgudz@mirantis.com
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: ==IPV4_NET_CONTROL_PREFIX==.241
+ infra_kvm01_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.67
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: ==IPV4_NET_CONTROL_PREFIX==.242
+ infra_kvm02_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.68
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: ==IPV4_NET_CONTROL_PREFIX==.243
+ infra_kvm03_deploy_address: ==IPV4_NET_ADMIN_PREFIX==.69
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: ==IPV4_NET_CONTROL_PREFIX==.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_enabled: 'False'
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_analytics_address: ==IPV4_NET_CONTROL_PREFIX==.30
+ opencontrail_analytics_hostname: nal
+ opencontrail_analytics_node01_address: ==IPV4_NET_CONTROL_PREFIX==.31
+ opencontrail_analytics_node01_hostname: nal01
+ opencontrail_analytics_node02_address: ==IPV4_NET_CONTROL_PREFIX==.32
+ opencontrail_analytics_node02_hostname: nal02
+ opencontrail_analytics_node03_address: ==IPV4_NET_CONTROL_PREFIX==.33
+ opencontrail_analytics_node03_hostname: nal03
+ opencontrail_compute_iface_mask: '24'
+ opencontrail_control_address: ==IPV4_NET_CONTROL_PREFIX==.20
+ opencontrail_control_hostname: ntw
+ opencontrail_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.21
+ opencontrail_control_node01_hostname: ntw01
+ opencontrail_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.22
+ opencontrail_control_node02_hostname: ntw02
+ opencontrail_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.23
+ opencontrail_control_node03_hostname: ntw03
+ opencontrail_enabled: 'True'
+ opencontrail_router01_address: ==IPV4_NET_CONTROL_PREFIX==.220
+ opencontrail_router01_hostname: rtr01
+ opencontrail_router02_address: ==IPV4_NET_CONTROL_PREFIX==.101
+ opencontrail_router02_hostname: rtr02
+ openldap_enabled: 'True'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: ''
+ openstack_benchmark_node01_address: ==IPV4_NET_CONTROL_PREFIX==.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_single_address_ranges: ==IPV4_NET_CONTROL_PREFIX==.101-==IPV4_NET_CONTROL_PREFIX==.102
+ openstack_compute_deploy_address_ranges: ==IPV4_NET_ADMIN_PREFIX==.73-==IPV4_NET_ADMIN_PREFIX==.74
+ openstack_compute_tenant_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.102
+ openstack_compute_backend_address_ranges: ==IPV4_NET_TENANT_PREFIX==.101-==IPV4_NET_TENANT_PREFIX==.102
+ openstack_control_address: ==IPV4_NET_CONTROL_PREFIX==.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: ==IPV4_NET_CONTROL_PREFIX==.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: ==IPV4_NET_CONTROL_PREFIX==.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: ==IPV4_NET_CONTROL_PREFIX==.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: ==IPV4_NET_CONTROL_PREFIX==.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: ==IPV4_NET_CONTROL_PREFIX==.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: ==IPV4_NET_CONTROL_PREFIX==.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: ==IPV4_NET_CONTROL_PREFIX==.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_message_queue_address: ==IPV4_NET_CONTROL_PREFIX==.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: ==IPV4_NET_CONTROL_PREFIX==.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: ==IPV4_NET_CONTROL_PREFIX==.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: ==IPV4_NET_CONTROL_PREFIX==.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: opencontrail
+ openstack_neutron_bgp_vpn: 'False'
+ openstack_neutron_bgp_vpn_driver: bagpipe
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_nova_compute_reserved_host_memory_mb: '900'
+ openstack_proxy_address: ==IPV4_NET_CONTROL_PREFIX==.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: ==IPV4_NET_CONTROL_PREFIX==.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: ==IPV4_NET_CONTROL_PREFIX==.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: ==IPV4_NET_CONTROL_PREFIX==.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: BNRhXeGFdgVNx0Ikm2CAMw7eyeHf4grH
+ salt_api_password_hash: $6$jriFnsbZ$eon54Ts/Kn4ywKpexe/W8srpBF64cxr2D8jd0RzTH8zdZVjS3viYt64m1d1VlXenurwpcGLkGzaGmOI0dlOox0
+ salt_master_address: ==IPV4_NET_CONTROL_PREFIX==.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: ==IPV4_NET_ADMIN_PREFIX==.15
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.com/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: ==IPV4_NET_CONTROL_PREFIX==.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: ==IPV4_NET_CONTROL_PREFIX==.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: ==IPV4_NET_CONTROL_PREFIX==.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: ==IPV4_NET_CONTROL_PREFIX==.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: ==IPV4_NET_CONTROL_PREFIX==.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: ==IPV4_NET_CONTROL_PREFIX==.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: ==IPV4_NET_CONTROL_PREFIX==.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: ==IPV4_NET_CONTROL_PREFIX==.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: ==IPV4_NET_CONTROL_PREFIX==.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: ==IPV4_NET_CONTROL_PREFIX==.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: ==IPV4_NET_CONTROL_PREFIX==.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: ==IPV4_NET_CONTROL_PREFIX==.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: ==IPV4_NET_TENANT_PREFIX==.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: ==IPV4_NET_TENANT_PREFIX==.0/24
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ openldap_domain: heat-cicd-pike-contrail41-sl.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ ceph_enabled: 'False'
+ ceph_version: "luminous"
+ ceph_hyper_converged: "False"
+ ceph_osd_backend: "bluestore"
+ ceph_osd_count: "3"
+ ceph_osd_node_count: 3
+ ceph_osd_block_db_size: 20
+ ceph_osd_journal_size: 20
+ ceph_osd_bond_mode: "active-backup"
+ ceph_osd_data_partition_prefix: ""
+
+ ceph_public_network_allocation: storage
+ ceph_public_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+ ceph_cluster_network: "==IPV4_NET_CONTROL_PREFIX==.0/24"
+
+# for 2018.11.0+
+ ceph_osd_single_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.200-==IPV4_NET_CONTROL_PREFIX==.202"
+ ceph_osd_deploy_address_ranges: "==IPV4_NET_ADMIN_PREFIX==.70-==IPV4_NET_ADMIN_PREFIX==.72"
+ ceph_osd_storage_address_ranges: "==IPV4_NET_CONTROL_PREFIX==.200-==IPV4_NET_CONTROL_PREFIX==.202"
+ ceph_osd_backend_address_ranges: "==IPV4_NET_TENANT_PREFIX==.200-==IPV4_NET_TENANT_PREFIX==.202"
+
+ ceph_osd_data_disks: "/dev/sdb"
+ ceph_osd_journal_or_block_db_disks: "/dev/sdb"
+ ceph_osd_mode: "separated"
+ ceph_osd_deploy_nic: "eth0"
+ ceph_osd_primary_first_nic: "eth1"
+ ceph_osd_primary_second_nic: "eth2"
+ #ceph_mon_node01_address: "172.16.47.66"
+ #ceph_mon_node01_deploy_address: "172.16.48.66"
+ ceph_mon_node01_address: "==IPV4_NET_CONTROL_PREFIX==.66"
+ ceph_mon_node01_hostname: "cmn01"
+ #ceph_mon_node02_address: "172.16.47.67"
+ #ceph_mon_node02_deploy_address: "172.16.48.67"
+ ceph_mon_node02_address: "==IPV4_NET_CONTROL_PREFIX==.67"
+ ceph_mon_node02_hostname: "cmn02"
+ #ceph_mon_node03_address: "172.16.47.68"
+ #ceph_mon_node03_deploy_address: "172.16.48.68"
+ ceph_mon_node03_address: "==IPV4_NET_CONTROL_PREFIX==.68"
+ ceph_mon_node03_hostname: "cmn03"
+ #ceph_rgw_address: "172.16.47.75"
+ ceph_rgw_address: "==IPV4_NET_CONTROL_PREFIX==.75"
+ #ceph_rgw_node01_address: "172.16.47.76"
+ #ceph_rgw_node01_deploy_address: "172.16.48.76"
+ ceph_rgw_node01_address: "==IPV4_NET_CONTROL_PREFIX==.76"
+ ceph_rgw_node01_hostname: "rgw01"
+ #ceph_rgw_node02_address: "172.16.47.77"
+ #ceph_rgw_node02_deploy_address: "172.16.48.77"
+ ceph_rgw_node02_address: "==IPV4_NET_CONTROL_PREFIX==.77"
+ ceph_rgw_node02_hostname: "rgw02"
+ #ceph_rgw_node03_address: "172.16.47.78"
+ #ceph_rgw_node03_deploy_address: "172.16.48.78"
+ ceph_rgw_node03_address: "==IPV4_NET_CONTROL_PREFIX==.78"
+ ceph_rgw_node03_hostname: "rgw03"
+ manila_enabled: 'False'
+ barbican_enabled: 'False'
+ barbican_integration_enabled: 'False'
+ # SSL settings
+ nova_vnc_tls_enabled: 'True'
+ galera_ssl_enabled: 'True'
+ openstack_mysql_x509_enabled: 'True'
+ rabbitmq_ssl_enabled: 'True'
+ openstack_rabbitmq_x509_enabled: 'True'
+ openstack_internal_protocol: 'http'
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml
new file mode 100644
index 0000000..fe4194e
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt-context-environment.yaml
@@ -0,0 +1,380 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ - features_runtest_cfg
+ interfaces:
+ ens3:
+ role: single_static_mgm
+ ens4:
+ role: single_static_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ classes:
+ - system.linux.system.repo.mcp.apt_mirantis.docker
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs01:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs02:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dbs03:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg01:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg02:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ msg03:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx02:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ nal01:
+ reclass_storage_name: opencontrail_analytics_node01
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ nal02:
+ reclass_storage_name: opencontrail_analytics_node02
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ nal03:
+ reclass_storage_name: opencontrail_analytics_node03
+ roles:
+ - opencontrail_analytics
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ntw01:
+ reclass_storage_name: opencontrail_control_node01
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ntw02:
+ reclass_storage_name: opencontrail_control_node02
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ntw03:
+ reclass_storage_name: opencontrail_control_node03
+ roles:
+ - opencontrail_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmp<<count>>:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - features_lvm_backend_volume_vdb
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens5:
+ role: bond0_ab_contrail_single
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml
new file mode 100644
index 0000000..a51a8a9
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/salt.yaml
@@ -0,0 +1,14 @@
+{% set HOSTNAME_CFG01='cfg01.heat-cicd-pike-contrail41-sl.local' %}
+{% set LAB_CONFIG_NAME='heat-cicd-pike-contrail41-sl' %}
+{% set DOMAIN_NAME='heat-cicd-pike-contrail41-sl.local' %}
+
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
new file mode 100644
index 0000000..cb551ef
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay--user-data-foundation.yaml
@@ -0,0 +1,63 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ - name: jenkins
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFSxeuXh2sO4VYL8N2dlNFVyNcr2RvoH4MeDD/cV2HThfU4/BcH6IOOWXSDibIU279bWVKCL7QUp3mf0Vf7HPuyFuC12QM+l7MwT0jCYh5um3hmAvM6Ga0nkhJygHexe9/rLEYzZJkIjP9/IS/YXSv8rhHg484wQ6qkEuq15nyMqil8tbDQCq0XQ+AWEpNpIa4pUoKmFMsOP8lq10KZXIXsJyZxizadr6Bh4Lm9LWrk8YCw7qP3rmgWxK/s8qXQh1ISZe6ONfcmk6p03qbh4H3CwKyWzxmnIHQvE6PgN/O+PuAZj3PbR2mkkJjYX4jNPlxvj8uTStaVPhAwfR9Spdx jenkins@cz8133
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+packages:
+ - openjdk-8-jre-headless
+ - libyaml-dev
+ - libffi-dev
+ - libvirt-dev
+ - python-dev
+ - python-pip
+ - python-virtualenv
+ #- python-psycopg2
+ - pkg-config
+ - vlan
+ - bridge-utils
+ - ebtables
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /etc/bash_completion.d/fuel_devops30_activate
+ content: |
+ source /home/jenkins/fuel-devops30/bin/activate
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml
new file mode 100644
index 0000000..567a445
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay-userdata.yaml
@@ -0,0 +1,78 @@
+#cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ssh_pwauth: True
+users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+
+disable_root: false
+chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+runcmd:
+ - if lvs vg0; then pvresize /dev/vda3; fi
+ - if lvs vg0; then /usr/bin/growlvm.py --image-layout-file /usr/share/growlvm/image-layout.yml; fi
+
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo resolvconf -u
+ #- sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /usr/share/growlvm/image-layout.yml
+ content: |
+ root:
+ size: '65%VG'
+ home:
+ size: '1%VG'
+ var_log:
+ size: '10%VG'
+ var_log_audit:
+ size: '5%VG'
+ var_tmp:
+ size: '10%VG'
+ tmp:
+ size: '5%VG'
+ owner: root:root
+
+growpart:
+ mode: auto
+ devices:
+ - '/'
+ - '/dev/vda3'
+ ignore_growroot_disabled: false
diff --git a/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
new file mode 100644
index 0000000..6285549
--- /dev/null
+++ b/tcp_tests/templates/heat-cicd-pike-contrail41-sl/underlay.hot
@@ -0,0 +1,440 @@
+---
+
+heat_template_version: queens
+
+description: MCP environment for heat-cicd-pike-contrail41-sl
+
+parameters:
+ instance_domain:
+ type: string
+ default: heat-cicd-pike-contrail41-sl.local
+ mcp_version:
+ type: string
+ env_name:
+ type: string
+ control_subnet_cidr:
+ type: string
+ management_subnet_cidr:
+ type: string
+ management_subnet_pool_start:
+ type: string
+ management_subnet_pool_end:
+ type: string
+ management_subnet_cfg01_ip:
+ type: string
+ management_subnet_gateway_ip:
+ type: string
+
+ key_pair:
+ type: string
+
+ ctl_flavor:
+ type: string
+ cfg_flavor:
+ type: string
+ cid_flavor:
+ type: string
+ ntw_flavor:
+ type: string
+ nal_flavor:
+ type: string
+ kvm_fake_flavor:
+ type: string
+ dbs_flavor:
+ type: string
+ msg_flavor:
+ type: string
+ mon_flavor:
+ type: string
+ log_flavor:
+ type: string
+ mtr_flavor:
+ type: string
+ cmp_flavor:
+ type: string
+ foundation_flavor:
+ type: string
+
+ net_public:
+ type: string
+
+resources:
+ networks:
+ type: MCP::Networks
+ properties:
+ stack_name: { get_param: "OS::stack_name" }
+ env_name: { get_param: env_name }
+
+ #flavors:
+ # type: MCP::Flavors
+
+ cfg01_node:
+ type: MCP::MasterNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ cfg01_flavor: { get_param: cfg_flavor }
+ instance_name: cfg01
+ instance_domain: {get_param: instance_domain}
+ network: { get_attr: [networks, network] }
+
+ control_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ctl01
+ instance02_name: ctl02
+ instance03_name: ctl03
+ instance_flavor: {get_param: ctl_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '11' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '12' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '13' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_database_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: dbs01
+ instance02_name: dbs02
+ instance03_name: dbs03
+ instance_flavor: {get_param: dbs_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '51' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '52' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '53' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ fake_kvm_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: kvm01
+ instance02_name: kvm02
+ instance03_name: kvm03
+ instance_flavor: {get_param: kvm_fake_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '241' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '242' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '243' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ openstack_message_queue_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_database_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: msg01
+ instance02_name: msg02
+ instance03_name: msg03
+ instance_flavor: {get_param: msg_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '41' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '42' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '43' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cicd_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: cid01
+ instance02_name: cid02
+ instance03_name: cid03
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '91' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '92' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '93' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ contrail_ntw_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_message_queue_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: ntw01
+ instance02_name: ntw02
+ instance03_name: ntw03
+ instance_flavor: {get_param: ntw_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '21' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '22' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '23' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ contrail_nal_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [contrail_ntw_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: nal01
+ instance02_name: nal02
+ instance03_name: nal03
+ instance_flavor: {get_param: nal_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '31' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '32' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '33' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_monitor_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [openstack_message_queue_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mon01
+ instance02_name: mon02
+ instance03_name: mon03
+ instance_flavor: {get_param: mon_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '71' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '72' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '73' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_log_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_monitor_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: log01
+ instance02_name: log02
+ instance03_name: log03
+ instance_flavor: {get_param: log_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '61' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '62' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '63' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ stacklight_mtr_cluster:
+ type: MCP::MultipleInstance
+ depends_on: [stacklight_log_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance01_name: mtr01
+ instance02_name: mtr02
+ instance03_name: mtr03
+ instance_flavor: {get_param: mtr_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ instance01_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '86' ]
+ instance02_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '87' ]
+ instance03_control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '88' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ prx01_virtual:
+ type: MCP::SingleInstance
+ depends_on: [control_cluster]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: prx01
+ instance_flavor: {get_param: cid_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '81' ]
+
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp001_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp001
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '101' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ cmp002_virtual:
+ type: MCP::Compute
+ depends_on: [cfg01_node]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: cmp002
+ instance_flavor: {get_param: cmp_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay-userdata.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '102' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+ foundation_node:
+ type: MCP::FoundationNode
+ depends_on: [networks]
+ properties:
+ env_name: { get_param: env_name }
+ mcp_version: { get_param: mcp_version }
+ instance_domain: {get_param: instance_domain}
+ instance_name: foundation
+ instance_flavor: {get_param: foundation_flavor}
+ network: { get_attr: [networks, network] }
+ underlay_userdata: { get_file: ./underlay--user-data-foundation.yaml }
+ control_net_static_ip:
+ list_join:
+ - '.'
+ - [ { get_attr: [networks, control_net_prefix] }, '5' ]
+ instance_config_host: { get_attr: [cfg01_node, instance_address] }
+
+outputs:
+
+ control_subnet_cidr:
+ description: Control network CIDR
+ value: { get_param: control_subnet_cidr }
+
+ management_subnet_cidr:
+ description: Admin network CIDR
+ value: { get_param: management_subnet_cidr }
+
+ foundation_floating:
+ description: foundation node IP address (floating) from external network
+ value:
+ get_attr:
+ - foundation_node
+ - instance_floating_address
+...