Add cookied-cicd-k8s-calico and cookied-cicd-k8s-calico-sl
- Add new templates for k8s deployments using CICD
- In new templates, there is deploy-and-test.groovy script for
integration CI, which performs necessary steps for:
- prepare environment
- get the inventory details from environment (networks)
- generate cookied model for the environment (using inventory
details)
- create ISO config-drive (using the job created by D.Tyzhnenko)
- Bootstrap salt cluster on all nodes (using salt.yaml)
- Use Jenkins on salt-master to deploy 'core' and 'cicd' stacks
- Use Jenkins on CICD nodes to deploy 'k8s', 'calico' and
'stacklight'
- Run system tests from tcp-qa test directories, for this there
are two new pytest marks: 'k8s_calico' and 'k8s_calico_sl'
The script deploy-and-test.groovy can use the shared library
tcp_tests/templates/SharedPipeline.groovy at certain step for
common methods.
- fix replacing networks in shared-salt.yaml for cookied models
Change-Id: I958d183d8951b869877f0c36f4bd1000a5b7e6a9
diff --git a/tcp_tests/templates/SharedPipeline.groovy b/tcp_tests/templates/SharedPipeline.groovy
new file mode 100644
index 0000000..a34be31
--- /dev/null
+++ b/tcp_tests/templates/SharedPipeline.groovy
@@ -0,0 +1,114 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+ common.printMsg("Run shell command:\n" + cmd, "blue")
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+ script = """\
+ set +x;
+ echo 'activate python virtualenv ${VENV_PATH}';
+ . ${VENV_PATH}/bin/activate;
+ bash -c 'set -ex; set -ex; ${cmd.stripIndent()}'
+ """
+ return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+ return run_cmd(cmd, true)
+}
+
+def generate_cookied_model() {
+ // do not fail if environment doesn't exists
+ def IPV4_NET_ADMIN=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep admin-pool01").trim().split().last()
+ def IPV4_NET_CONTROL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep private-pool01").trim().split().last()
+ def IPV4_NET_TENANT=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep tenant-pool01").trim().split().last()
+ def IPV4_NET_EXTERNAL=run_cmd_stdout("dos.py net-list ${ENV_NAME} | grep external-pool01").trim().split().last()
+ println("IPV4_NET_ADMIN=" + IPV4_NET_ADMIN)
+ println("IPV4_NET_CONTROL=" + IPV4_NET_CONTROL)
+ println("IPV4_NET_TENANT=" + IPV4_NET_TENANT)
+ println("IPV4_NET_EXTERNAL=" + IPV4_NET_EXTERNAL)
+
+ def parameters = [
+ string(name: 'LAB_CONTEXT_NAME', value: "${LAB_CONFIG_NAME}"),
+ string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+ string(name: 'DOMAIN_NAME', value: "${LAB_CONFIG_NAME}.local"),
+ string(name: 'REPOSITORY_SUITE', value: "${MCP_VERSION}"),
+ string(name: 'SALT_MODELS_SYSTEM_COMMIT', value: "${MCP_VERSION}"),
+ string(name: 'COOKIECUTTER_TEMPLATE_COMMIT', value: "${MCP_VERSION}"),
+ string(name: 'TCP_QA_REVIEW', value: "${TCP_QA_REFS}"),
+ string(name: 'IPV4_NET_ADMIN', value: IPV4_NET_ADMIN),
+ string(name: 'IPV4_NET_CONTROL', value: IPV4_NET_CONTROL),
+ string(name: 'IPV4_NET_TENANT', value: IPV4_NET_TENANT),
+ string(name: 'IPV4_NET_EXTERNAL', value: IPV4_NET_EXTERNAL),
+ ]
+ common.printMsg("Start building job 'swarm-cookied-model-generator' with parameters:", "purple")
+ common.prettyPrint(parameters)
+ build job: 'swarm-cookied-model-generator',
+ parameters: parameters
+}
+
+def generate_configdrive_iso() {
+ def SALT_MASTER_IP=run_cmd_stdout("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ echo \$SALT_MASTER_IP
+ """).trim().split().last()
+ println("SALT_MASTER_IP=" + SALT_MASTER_IP)
+ def parameters = [
+ string(name: 'CLUSTER_NAME', value: "${LAB_CONFIG_NAME}"),
+ string(name: 'MODEL_URL', value: "http://cz8133.bud.mirantis.net:8098/${LAB_CONFIG_NAME}.git"),
+ string(name: 'MODEL_URL_OBJECT_TYPE', value: "git"),
+ booleanParam(name: 'DOWNLOAD_CONFIG_DRIVE', value: true),
+ string(name: 'MCP_VERSION', value: "${MCP_VERSION}"),
+ string(name: 'COMMON_SCRIPTS_COMMIT', value: "${MCP_VERSION}"),
+ string(name: 'NODE_NAME', value: "${NODE_NAME}"),
+ string(name: 'CONFIG_DRIVE_ISO_NAME', value: "${CFG01_CONFIG_IMAGE_NAME}"),
+ string(name: 'SALT_MASTER_DEPLOY_IP', value: SALT_MASTER_IP),
+ string(name: 'PIPELINE_REPO_URL', value: "https://github.com/Mirantis"),
+ booleanParam(name: 'PIPELINES_FROM_ISO', value: true),
+ string(name: 'MCP_SALT_REPO_URL', value: "http://apt.mirantis.com/xenial"),
+ string(name: 'MCP_SALT_REPO_KEY', value: "http://apt.mirantis.com/public.gpg"),
+ string(name: 'PIPELINE_LIBRARY_REF', value: "${PIPELINE_LIBRARY_REF}"),
+ string(name: 'MK_PIPELINES_REF', value: "${MK_PIPELINES_REF}"),
+ ]
+ common.printMsg("Start building job 'create-cfg-config-drive' with parameters:", "purple")
+ common.prettyPrint(parameters)
+ build job: 'create-cfg-config-drive',
+ parameters: parameters
+}
+
+def run_job_on_day01_node(stack_to_install) {
+ // stack_to_install="core,cicd"
+ def stack = "${stack_to_install}"
+ run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ . ./tcp_tests/utils/env_jenkins_day01
+ JOB_PARAMETERS=\"{
+ \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+ \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+ }\"
+ JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+ python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+ """)
+}
+
+def run_job_on_cicd_nodes(stack_to_install) {
+ // stack_to_install="k8s,calico,stacklight"
+ def stack = "${stack_to_install}"
+ run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ . ./tcp_tests/utils/env_jenkins_cicd
+ JOB_PARAMETERS=\"{
+ \\\"SALT_MASTER_URL\\\": \\\"\${SALTAPI_URL}\\\",
+ \\\"STACK_INSTALL\\\": \\\"${stack}\\\"
+ }\"
+ JOB_PREFIX="[ {job_name}/{build_number}:${stack} {time} ] "
+ python ./tcp_tests/utils/run_jenkins_job.py --verbose --job-name=deploy_openstack --job-parameters="\$JOB_PARAMETERS" --job-output-prefix="\$JOB_PREFIX"
+ sleep 60 # Wait for IO calm down on cluster nodes
+ """)
+}
+
+
+// pretend a groovy class, DO NOT REMOVE
+return this
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
new file mode 100644
index 0000000..e055d78
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -0,0 +1,197 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAtyCfiXxwB6Dk6n7Y1t9u2XqMkLPvMArKwRUWGEwTzS7w0NzY
+ bCYdUfxo9m3tmhCO6hb0Yqzk6LEcOrARR7nHK7dS0JpRmZBeD3thdgXD8wNaG1PQ
+ ZzdNwGHP5cjfBXPHYXdP1k6HtLB1PymPDyEqhU0ZJrVGBK4+WSLNesSGOMiREQSx
+ kg/85aGdagLWHgAbgi0x1xx+Bu8LtuVkIcz8IMa3lanY7B8s5aIMxsOGTokJvdRL
+ QZJN0AfGRSANTIQZXfgkTO5wP85UsNisB8j7bliLl1wbxgnq/LTJZ+nQ1PA4dx8c
+ t0FKHYIR6zSd0LkDZaxJnZBgSrVnZ2JBbt7hTwIDAQABAoIBAHu/Ic+INTQSd142
+ hVT9+ywe8em+jX0LbeN32kxk7GSUucqJ0f2S6/FA/bS4p/yZ/9kT1eTwLGdJd2f5
+ HlQ3p+1UnjO0dDuvIMCZgUx9rOIEe9lHk+aLqpC8B/6g9IP8rtigBWUt/+oL687Y
+ yIFSyib16G8Nw9jZ3evh5rR1JLYtPHvAJiodsT3iY/+wZkuo4dAa4/QlKPT7QXaU
+ G5/AA/8zdsVOJl5JOHjP2pFBMzxttkWbkuYpEQe7LRw5MOlfFpMEYYZ+NJGVwDNe
+ 0WTpiOIDo78xaq5TsOS23fJCEKodtFrITXvSv0c1tNoL/WcslwmwcV3mKyySFffG
+ Sj7G5RECgYEA7NszuBZBY0Zn7qLkczIdTq15lZ0KFJb2sHIRQbzeeCYn6Q8LJsCb
+ ELhgevun4BxrE2O+R8H4HL+g002vqzL9Vn9oOqFTn3GZMaHojiMSmjCBNl05Mftw
+ EM69l6WV6H8E+D90GMGGoxRJlqHdOuNcQ9bdQpkF4vTNBfzx7VU+5csCgYEAxe23
+ h191srNg7wjafMuK22RtM739knqX+sqeFaGqM6f73+vJaqNilvfgSRQYZc1MOetp
+ Ty4A4g/Jx/NkDWkaLbewFaHw7dNK62Vr6Ovl67Z9sEo8A2ySS7VWVuAqzVbRjyGp
+ yddGiW2Q+ITdfPfHbCFobVUgFeSinfZxkMFw4g0CgYBG8rZASzJU+W8CdXq24ukS
+ ezYzUbIGTt4gJlry9Q8ysEM+NYpilkkcrg4AaMd1gy2zxinmNr0KZ4BWKywWvRRT
+ x6BCB7cTyKRZ0KTnhqv40dSyoyQRy75a2oLCHRCVbw7fCarOC5I34UjVvTCWhipK
+ C9+FJm8z954+T/Fr5SANFwKBgEJBSvhD2jBRn5ckjY7My9SZD30Mkj9gTlOjU7vF
+ /CWCi+vvD+NkgfIrU6bi1S/uwx94UC4zJhSGWHNYZBuhHSREour65J2X5zJZJwA3
+ RyXaVsSWdPRoeahiMV6vd2R5NXkGOcHZEEGcrbSjNUlJ4DWwETbYEf+CI3VhM67T
+ MihZAoGBALxcTSivHJZDle81lsu1dcgmzZkUfQAcUSYDWhg+Bqg3A8FVKMpEzrbd
+ weGRM8S8oAz1PN0T/LRcpJq3TFZpy+iXx59jl5XenmoKwPr+u5XFrEHTWqNS2NcL
+ MwS8VTJhWYNVdrNIRWClRVUv87hZMha40JHiPK1KA4em1G+H29x3
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3IJ+JfHAHoOTqftjW327ZeoyQs+8wCsrBFRYYTBPNLvDQ3NhsJh1R/Gj2be2aEI7qFvRirOTosRw6sBFHuccrt1LQmlGZkF4Pe2F2BcPzA1obU9BnN03AYc/lyN8Fc8dhd0/WToe0sHU/KY8PISqFTRkmtUYErj5ZIs16xIY4yJERBLGSD/zloZ1qAtYeABuCLTHXHH4G7wu25WQhzPwgxreVqdjsHyzlogzGw4ZOiQm91EtBkk3QB8ZFIA1MhBld+CRM7nA/zlSw2KwHyPtuWIuXXBvGCer8tMln6dDU8Dh3Hxy3QUodghHrNJ3QuQNlrEmdkGBKtWdnYkFu3uFP
+ bmk_enabled: 'False'
+ calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+ calico_enable_nat: 'True'
+ calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+ calico_netmask: '16'
+ calico_network: 192.168.0.0
+ calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAv64AnFbEuuOQHLlmMkmaZ+Hh/8hJ+VfFpJ/MzW1wWzYyhis7
+ 3A8rxNFWJ/I1/LJSsFI8qU0DpxjFjS9LMTTFXhDPPpuzgRLwfVusEmuQdXjOiT34
+ AIs07Q4w1nlvJ2+/l788ie1AEfnewd9erUHOs8Wt/PT3OOM/0ikY7EibvYF4L1Lb
+ xGRKYnUkY7G3eal9XcQpsTzAFRXoK3WafbCFBFsfzEWOhx1T+tn1SwaxPYJDt1OB
+ B1s77enFtBwbmbd0m1F1ufSXmdWea2xF3+5caS6tps/hwhCoOSJUQb7+dK4ri8og
+ q2YIhfEptrMP1R+nVqEY76P31aa/YSw4zOvcQwIDAQABAoIBAQCLKOzQlD4n4ObT
+ s9Z6U+2B1gCaDvOFzy9yoYGy8u1Li0GLHwBKd8kzDzgZsEN5vo1B7bKUx5ELU3S5
+ V8ijZMiVzmZn8eqUnwdyO4flp6otXxOzmAXhfy9hm5fhXjBQ1VSn+vMcv95wLpSG
+ 9IBsEQbchXwX1lFWP8Yp8iRiByTqoz6A7qSxRzIOtq1/coYS9Vcy7VZDMiUjqvuc
+ pYvwYHvrgeYqxLXyDRzbZX1BbkSoNI/5VwxLb9IMG901IXph0r4V3uVgnnq+Xzkk
+ MoOfmB3cyOrvtWblZAjkyA+jzTs/QNALRUeI7wUeh4FvlwEGHE6v5G4G28zOS0vL
+ 7IEhCqThAoGBAOeyDO07b060l+NOO+Jkv+NV31VD0w3S4TMyLPVSxXsrRPoHM9RM
+ udi6lewmALE4wk2Lc1Il6n0UrUGVbXxf55NJp2BQoSic+ZK2nTki0cZ/CkUDVNwY
+ R0WtWE0i3J+eF3e8j9VYm1mIlv0aDoYeH4qCp5is/JanvLy4MUl6tM7/AoGBANPJ
+ XheDO5lmqq1ejDTo3GAzYuAs44dQLDs0znEuuaUKZ4MKgQ4ax0L9n0MxvsuUGVcN
+ Nm7fZS4uMY3zLCOLcAXyD1jXY210gmOgFdXeYrH+2kSmqfflV8KHOLCatxLzRtbe
+ KBflcrEnrpUVNGKlpZaYr+4AyapXeMuXIxwveva9AoGAYtoDS9/UwHaqau+A+zlS
+ 6TJFA8LZNAepz0b0CYLUAJXYavhRs508mWwZ9NPN7c6yj5UUkZLdtZnxxY50VOEy
+ ExQUljIwX/yBOogxEiR57b9b6U/fj7vIBMFNcDOUf4Far9pCX5rbRNrS2I+abLxD
+ ZrwRt0Duz3QnQTkwxhHVPI8CgYAaIjQJJLl7AW84O32DneRrvouJ7CAbd2ot2CNN
+ Vh20XudNBUPNkMJb4t3/Nak8h8bktg2sesaKf0rAIGym6jLlmOwJ43IydHkOgBeR
+ r4JwQml+pS4+F7/Pkk4NhNnobbqlEv7RjA+uCp6BaP9w2M3pGmhDLzezXF3ciYbc
+ mINM5QKBgQCyM9ZWwSiA0D3oitnhs7C4eC0IHBfnSoa7f40osKm4VvmqKBFgRu8L
+ qYK9qX++pUm4sk0q7poGUscc1udMlejAkfc/HLIlUi6MM+S7ZQ2NHtnZ7COZa5O4
+ 9fG8FTiigLvMHka9ihYXtPbyGvusCaqyHp3D9VyOT+WsyM5eJe40lA==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/rgCcVsS645AcuWYySZpn4eH/yEn5V8Wkn8zNbXBbNjKGKzvcDyvE0VYn8jX8slKwUjypTQOnGMWNL0sxNMVeEM8+m7OBEvB9W6wSa5B1eM6JPfgAizTtDjDWeW8nb7+XvzyJ7UAR+d7B316tQc6zxa389Pc44z/SKRjsSJu9gXgvUtvEZEpidSRjsbd5qX1dxCmxPMAVFegrdZp9sIUEWx/MRY6HHVP62fVLBrE9gkO3U4EHWzvt6cW0HBuZt3SbUXW59JeZ1Z5rbEXf7lxpLq2mz+HCEKg5IlRBvv50riuLyiCrZgiF8Sm2sw/VH6dWoRjvo/fVpr9hLDjM69xD
+ cluster_domain: cookied-mcp-k8s-calico-sl.local
+ cluster_name: cookied-mcp-k8s-calico-sl
+ context_seed: T3sbEdCaBfxrg9ysyA6LIaift250Ktb389rpcISKbdqPi5j0WHKiKAhBftYueBKl
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ etcd_ssl: 'True'
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_compute_node01_address: 10.167.4.101
+ kubernetes_compute_node01_deploy_address: 10.167.5.101
+ kubernetes_compute_node01_hostname: cmp01
+ kubernetes_compute_node02_address: 10.167.4.102
+ kubernetes_compute_node02_deploy_address: 10.167.5.102
+ kubernetes_compute_node02_hostname: cmp02
+ kubernetes_control_address: 10.167.4.10
+ kubernetes_control_node01_address: 10.167.4.11
+ kubernetes_control_node01_deploy_address: 10.167.5.11
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_address: 10.167.4.12
+ kubernetes_control_node02_deploy_address: 10.167.5.12
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node03_address: 10.167.4.13
+ kubernetes_control_node03_deploy_address: 10.167.5.13
+ kubernetes_control_node03_hostname: ctl03
+ kubernetes_enabled: 'True'
+ kubernetes_externaldns_enabled: 'False'
+ kubernetes_keepalived_vip_interface: ens4
+ kubernetes_network_calico_enabled: 'True'
+ kubernetes_virtlet_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_deploy_range_end: 10.167.5.254
+ maas_deploy_range_start: 10.167.5.1
+ maas_deploy_vlan: '0'
+ maas_fabric_name: fabric-0
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_enabled: 'False'
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_webhook_login_id: '13'
+ platform: kubernetes_enabled
+ public_host: ${_param:infra_config_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: LTlVnap35hqpRVbB5QjA27EuKh9Ttl3k
+ salt_api_password_hash: $6$RKagUPuQ$Javpjz7b.hqKOOr1rai7uGQd/FnqlOH59tXn12/0G.LkVyunYmgBkSC5zTjoqZvIS1fOOOqsmCb9Q4HcGUbXS.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 10.167.4.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 10.167.4.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 10.167.4.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 10.167.4.63
+ stacklight_log_node03_hostname: log03
+ stacklight_long_term_storage_type: prometheus
+ stacklight_monitor_address: 10.167.4.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 10.167.4.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 10.167.4.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 10.167.4.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 10.167.4.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 10.167.4.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 10.167.4.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 10.167.4.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
+ vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
new file mode 100644
index 0000000..71b3e8d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/deploy-and-test.groovy
@@ -0,0 +1,140 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+ common.printMsg("Run shell command:\n" + cmd, "blue")
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+ script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
+ return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+ return run_cmd(cmd, true)
+}
+
+node ("${NODE_NAME}") {
+ try {
+
+ stage("Clean the environment") {
+ println "Clean the working directory ${env.WORKSPACE}"
+ deleteDir()
+ // do not fail if environment doesn't exists
+ println "Remove environment ${ENV_NAME}"
+ run_cmd("""\
+ dos.py erase ${ENV_NAME} || true
+ """)
+ println "Remove config drive ISO"
+ run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
+
+ stage("Clone tcp-qa project and install requirements") {
+ run_cmd("""\
+ git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+ #cd tcp-qa
+ if [ -n "$TCP_QA_REFS" ]; then
+ set -e
+ git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+ fi
+ pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+ """)
+ }
+
+ // load shared methods from the clonned tcp-qa repository.
+ // DO NOT MOVE this code before clonning the repo
+ def rootDir = pwd()
+ def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
+
+ stage("Create an environment ${ENV_NAME} in disabled state") {
+ // do not fail if environment doesn't exists
+ run_cmd("""\
+ python ./tcp_tests/utils/create_devops_env.py
+ """)
+ }
+
+ stage("Generate the model") {
+ shared.generate_cookied_model()
+ }
+
+ stage("Generate config drive ISO") {
+ shared.generate_configdrive_iso()
+ }
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ run_cmd("""\
+ virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh pool-refresh --pool default
+ """)
+ }
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ run_cmd("""\
+ export MANAGER=devops
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export BOOTSTRAP_TIMEOUT=900
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
+ export TEST_GROUP=test_install_local_salt
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
+ sleep 60 # wait for jenkins to start and IO calm down
+
+ """)
+ }
+
+ // Install core and cicd
+ stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
+ shared.run_job_on_day01_node("core")
+ }
+
+ stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
+ shared.run_job_on_day01_node("cicd")
+ }
+
+ // Install the cluster
+ for (stack in "${STACK_INSTALL}".split(",")) {
+ stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+ shared.run_job_on_cicd_nodes(stack)
+ }
+ }
+
+ stage("Run tests") {
+ run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ . ./tcp_tests/utils/env_k8s
+
+ # Initialize variables used in tcp-qa tests
+ export CURRENT_SNAPSHOT=sl_deployed # provide the snapshot name required by the test
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
+
+ export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
+ # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+ export COMMON_SERVICES_INSTALLED=true # skip common_services_deployed fixture
+ export K8S_INSTALLED=true # skip k8s_deployed fixture
+ export sl_installed=true # skip sl_deployed fixture
+
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico_sl
+
+ #dos.py suspend ${ENV_NAME}
+ #dos.py snapshot ${ENV_NAME} test_completed
+ #dos.py resume ${ENV_NAME}
+ #dos.py time-sync ${ENV_NAME}
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
new file mode 100644
index 0000000..248b42d
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/environment-context-k8s-sl.yaml
@@ -0,0 +1,235 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: kubernetes_control_node01
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: kubernetes_control_node02
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: kubernetes_control_node03
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmp01:
+ reclass_storage_name: kubernetes_compute_node01
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node01_address}
+
+ cmp02:
+ reclass_storage_name: kubernetes_compute_node02
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node02_address}
+
+ mon01:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
new file mode 100644
index 0000000..70f3cd3
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/salt.yaml
@@ -0,0 +1,16 @@
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-k8s-calico-sl/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Workaround, configure ntp and rsyslog on salt master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..77c18d1
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo touch /is_cloud_init_started
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - echo "******** MOUNT CONFIG DRIVE"
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ #- sudo ifdown ens3
+ #- sudo ip r d default || true # remove existing default route to get it from dhcp
+ #- sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+ - rm -f /etc/network/interfaces
+ #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+ #- cp /root/config-drive/user-data /root/user-data
+ #- sed -i '/^reboot$/d' /root/user-data
+ #- set -x; cd /root && /bin/bash -xe ./user-data
+ - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
+
+ #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ # Enable root access (after reboot)
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ #- service sshd stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - touch /is_cloud_init_finished
+ #- reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ #- path: /etc/network/interfaces
+ - path: /root/interfaces
+ content: |
+ auto lo
+ iface lo inet loopback
+
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 60
+ ServerAliveCountMax 0
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
new file mode 100644
index 0000000..b1b6430
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ #- fallocate -l 16G /swapfile
+ #- chmod 600 /swapfile
+ #- mkswap /swapfile
+ #- swapon /swapfile
+ #- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
new file mode 100644
index 0000000..10c716e
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/underlay.yaml
@@ -0,0 +1,772 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico-sl') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+
+{% import 'cookied-cicd-k8s-calico-sl/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-cicd-k8s-calico-sl/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_LOG }}: +60
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_MTR }}: +85
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+ storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+ use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+ # it will be uploaded after config drive generation
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: k8s_controller
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
new file mode 100644
index 0000000..693a589
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -0,0 +1,166 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA3ufjR+Eh/CJp84JZPKosMNL7ydXidfe9qdAnQIGGOsS/TBnc
+ RyY+hy4Mg5Or//VBpY53frcrEEnm1CzEeIfGALiQtsMWOwEEiEHIDzbxN7xyYK1u
+ 9fpcRHZy16VJx2gQOxbeAIyct9jrQeQuSbN0k7Tr+bfWLVPoGL4SZiBC+dxRjrmT
+ 2pMIdqa8kAcd5cakuyENXT31ZI/ffVscl7TJBat7tUbgD+48GK4LqMBL/eC6v2bQ
+ ohmo6ZqWLh9uT+/l/rRdIiBhI+kmPpUDRLnjkd/gH4GQh/r/PPlxu11JNwPgY4Kx
+ IAk5hq9uPRn3pqBKPg+WHgWmQvpHqVVDDnf7XQIDAQABAoIBAQDCEtuL5bQVNlFR
+ NphDfVZkXA3lOVempkB37Ud/nkYkPNDhjVKAkAe44pr6pEQI4px5bIUVypyv3egf
+ q6qT1oLKdedpeImObeBoUf3BYXC7ulNLYTVO7OAQq6BpqPuHpk8bY1l+2O5KE48h
+ G25BtQE26TrbfPf5FyjpAfQ6/rPRniURu1ZMFK1Do18wf7lGxa6RN4jPbfGfEvlf
+ q6GWGtsOB2kLXnUDjuDeUrgS8HgxBSMH+lwxrkdX0Qb4VN+cBOp8TC30rHXdLAmn
+ mWUDQhaao+zZpZsAAxGbM2BAFUQAicd/OS6FJn6xkH0KN6+Rp1Iiy3Sa97wMsMti
+ aHAyVwkBAoGBAPf1gcRmKTHaUVb7XgS3acytBm7LM2GCQfgPDvQIp8rf8bmnAko6
+ MzxPdq2WXzWY75JiNxQSsmemcJyBRJm1sscp0txAnZS5SSycWlHy0zP5LJDtU7jW
+ Z7dXtRYzdDL0sH6KVQCOmfDmGowLs3eO0F7MyCbDIwdkIQ4LCs+TWcYxAoGBAOYi
+ ZUR7vXFbmXQQUEHxeft3sF6v8epFhnMuvwHgmHIzSCDDKoIMoLlqDOV8KynggyqQ
+ /YpvzqfCuP4aDpriU1glTZB0R9WdkKwk+GW13U9LfDw86u/XfGkMtT2QP6PmIQaI
+ 1MJlX2b0rihnUy6zqRFH+mU4+9I66Gg1s8O9s4DtAoGAbiUol82pzvNj3neatA2l
+ eb4CdYTeNhpeo4pM4ipWHtCL2CRP6BkiWVATL9j0QiLFiQkH3mrPxSsyKtNhXcZQ
+ vBfgCubJGR+VWbO6i1yKZTPykA5cemcDe3YCgvIoU9pN7GgWikDOMSyF7l/kQN+3
+ v+THpDBahxX7ePl+u+aAooECgYBOoigJ+2HirtLDJqPVtGXit6XK5MF7M+BZ0Pow
+ 8QYF12Ho1+bZYuk0EXlwnDm/aFhJHhuTxtpM1isRn+Onpnel4bEcD69P3TPGric1
+ 0atZ4cgEaSg5ZV68Ijx3Wad1IDfenLhd5/duHWK4qX1xsq+tGPQEzDC3R6uLl/Xh
+ hxsjjQKBgQC53W+e4N6pOK8oCA2tlDw8Nu733FRrxNP4emdTzYyKJbNxBP2LI/ts
+ K/fgcD9aWeo0zt3Y/0UzzijqrWMCG2NdAlHwEShcXUt1525O4H64mH50MeylTGcj
+ t6ZFlhArriIXlejxuU9Jxe/HEKMh/1iBdlnD0rCOfhJaY/HO9dWtRw==
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDe5+NH4SH8Imnzglk8qiww0vvJ1eJ1972p0CdAgYY6xL9MGdxHJj6HLgyDk6v/9UGljnd+tysQSebULMR4h8YAuJC2wxY7AQSIQcgPNvE3vHJgrW71+lxEdnLXpUnHaBA7Ft4AjJy32OtB5C5Js3STtOv5t9YtU+gYvhJmIEL53FGOuZPakwh2pryQBx3lxqS7IQ1dPfVkj999WxyXtMkFq3u1RuAP7jwYrguowEv94Lq/ZtCiGajpmpYuH25P7+X+tF0iIGEj6SY+lQNEueOR3+AfgZCH+v88+XG7XUk3A+BjgrEgCTmGr249GfemoEo+D5YeBaZC+kepVUMOd/td
+ bmk_enabled: 'False'
+ calico_cni_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/cni:latest
+ calico_enable_nat: 'True'
+ calico_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/node:latest
+ calico_netmask: '16'
+ calico_network: 192.168.0.0
+ calicoctl_image: docker-prod-local.artifactory.mirantis.com/mirantis/projectcalico/calico/ctl:latest
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEA0vs3yV4GVrnMDT1i1rc3DKVCU4kQ6D7OCDhJ10kiA4VZ/9Td
+ CIKGxL3+kzNHBqEB3HcjsGJ7zTud3S97Zd21HzTeF0iia7kQXZs3zw3GnU4IWCzd
+ zeyJenD1twrDILc1Q80SPIw8klbgK3T8Wcs0sACyfAORTKnM4Alrq3PSyn2G/L7s
+ wL+4OYWw8+V1Bo6f/1zsVdJWFelZ1HNd1fuGP9Egx+PSHznmBQniewM2AktZVHkd
+ e2j+sIMnimRr55ic6tEiPKb1sp+mdqfxUbkxU1q3r80Bnq5lbYNexJZLZFbY8pBF
+ w5qtVoXsrUra0xT66yDs9z9kgTXZt8MppSPEywIDAQABAoIBAD60d3IbxdqEwga1
+ Vejm7y+M9leJh0LfV7DNufSIQdm2CnektkTPNmrG5SCuvs9TjxANQMgtnQn0TAjv
+ EcfGywwJVHCrH8rQZ/LKLR4WxA8AoC5Et296e6muZPkP88qHaQdLrb3dGCuOMnX4
+ AQBXCsO8kU+WZ9rXJJL5ecytUdroRZnmChUN9sZFoCrrFmCJyKzSdmbtyHwvmc6t
+ K9YHx5HcQppdnbaUT/PcRNjNgOdQsGrYG/4tGpZE9GYqBRrtEZAFKnRDDlaq9mvc
+ FJ451YhomyUNtM7hor+VpSi9WtgiXh9fWUfJUIH4Ven9y29I7SNAxftfs0Xxdz2O
+ aEYnhPECgYEA9YKsXtslfSsjyG3Z6NTy9peAjBlUSA7jkbsfOIcN1qLAxSlOMIVG
+ P9knWa5rgp1nOssHxpbCJaiZv1r2C0sHZ+B/ZCNV1lCdz+pN+Y0AQHz2/VrQEtkR
+ cvClpWXjACT0igJ5mrmehzV166CI3t7+nCrkRd9aYTuVpNP4o9aEuGcCgYEA2/7f
+ 2UVcq8SKJYAJD0mNc4mfnNwmyvIfwJqQfechPPwXG3kmd2jgFdUV3JyCwS771TRz
+ 9QAHBLoBgKSVKLBORsTjYy7TSIYvnTUgrymMwc75nevVIy258hPSYpF6cVEIRnNr
+ sek5heYQxJZr9RraxihvoaZytmPRm918sR6B4f0CgYEAlizQc1VpoR76TGelm55m
+ 4B/cKdZ0j39MBKCJgHJcLKZxdCjIAzYCupuCToE6kjLmKjh3ESq2p4JySXLCfjXu
+ 2cOhKQfUQbweTEfuWm+9b7UBAAjErkLJQZ2iNYIVUMlKLAFHkTVpmxtAflk8X9fX
+ tn8mEveEuWVRK/ndZZqapJECgYA3IQSpZsdVR/gyc4ZRrWXkCR3VahnSi6BHXLRO
+ yKe8p5OGz/JCxCY7cl17HkFp9cMn53ATekFH/vC3cwbp3lyPQXGV/jr2FqJB6/lX
+ y7q5KovE9j9ABIpvTmZPSxN66AqB1RSszbwbgM685NEC6Arg02s9//8JE7SIMZW4
+ sONtZQKBgQDZoa9yAM9YRRFUpgiyKdmp4Yzq3xc17xhlrv3HgAKXzCwlRXm+TxKP
+ kmwFI2nn3sPN9jSegCGyOtfzoh4Q7DtBowrjLPUisWk1gLZH4HpCP7mkndR9ODI3
+ So+yjY6Y787NTuAtS97T1AfOPQR8VOm9vuGT1IRWNno3ckThokPcCg==
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDS+zfJXgZWucwNPWLWtzcMpUJTiRDoPs4IOEnXSSIDhVn/1N0IgobEvf6TM0cGoQHcdyOwYnvNO53dL3tl3bUfNN4XSKJruRBdmzfPDcadTghYLN3N7Il6cPW3CsMgtzVDzRI8jDySVuArdPxZyzSwALJ8A5FMqczgCWurc9LKfYb8vuzAv7g5hbDz5XUGjp//XOxV0lYV6VnUc13V+4Y/0SDH49IfOeYFCeJ7AzYCS1lUeR17aP6wgyeKZGvnmJzq0SI8pvWyn6Z2p/FRuTFTWrevzQGermVtg17ElktkVtjykEXDmq1WheytStrTFPrrIOz3P2SBNdm3wymlI8TL
+ cluster_domain: cookied-mcp-k8s-calico.local
+ cluster_name: cookied-mcp-k8s-calico
+ context_seed: 6RD8HFGk0xksGje6RcIiGRkHIIkdvHUDgBuUsCTYDv5Tw4DmVMbRlRVnatzGHYJd
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.4.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: ''
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 10.167.5.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 10.167.5.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ etcd_ssl: 'True'
+ #hyperkube_image: docker-prod-local.artifactory.mirantis.com/mirantis/kubernetes/hyperkube-amd64:v1.8.5-4
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.4.241
+ infra_kvm01_deploy_address: 10.167.5.91
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.4.242
+ infra_kvm02_deploy_address: 10.167.5.92
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.4.243
+ infra_kvm03_deploy_address: 10.167.5.93
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.4.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ internal_proxy_enabled: 'False'
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
+ kubernetes_compute_node01_address: 10.167.4.101
+ kubernetes_compute_node01_deploy_address: 10.167.5.101
+ kubernetes_compute_node01_hostname: cmp01
+ kubernetes_compute_node02_address: 10.167.4.102
+ kubernetes_compute_node02_deploy_address: 10.167.5.102
+ kubernetes_compute_node02_hostname: cmp02
+ kubernetes_control_address: 10.167.4.10
+ kubernetes_control_node01_address: 10.167.4.11
+ kubernetes_control_node01_deploy_address: 10.167.5.11
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_control_node02_address: 10.167.4.12
+ kubernetes_control_node02_deploy_address: 10.167.5.12
+ kubernetes_control_node02_hostname: ctl02
+ kubernetes_control_node03_address: 10.167.4.13
+ kubernetes_control_node03_deploy_address: 10.167.5.13
+ kubernetes_control_node03_hostname: ctl03
+ kubernetes_enabled: 'True'
+ kubernetes_externaldns_enabled: 'False'
+ kubernetes_keepalived_vip_interface: ens4
+ kubernetes_network_calico_enabled: 'True'
+ kubernetes_virtlet_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 10.167.5.15
+ maas_hostname: cfg01
+ mcp_common_scripts_branch: ''
+ mcp_version: proposed
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: ${_param:cluster_name}.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openssh_groups: cicd
+ openstack_enabled: 'False'
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: kubernetes_enabled
+ public_host: ${_param:infra_config_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: GItjqF2mBE9JpA6WjnqD4pKNJMWJK72g
+ salt_api_password_hash: $6$XxwWWczf$tFbAgdW1PeVJWTn0Jw/xfwJlss/RgOf9fGWqx2XE7vZ5O/ZGR1AuIgl/HH7Qm3.ZxvutaWmfWszxWcPFZepzv.
+ salt_master_address: 10.167.4.15
+ salt_master_hostname: cfg01
+ salt_master_management_address: 10.167.5.15
+ shared_reclass_branch: ''
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.6.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.6.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
new file mode 100644
index 0000000..18a0c2c
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/deploy-and-test.groovy
@@ -0,0 +1,135 @@
+common = new com.mirantis.mk.Common()
+
+def run_cmd(cmd, returnStdout=false) {
+ common.printMsg("Run shell command:\n" + cmd, "blue")
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+ script = "set +x; echo 'activate python virtualenv ${VENV_PATH}';. ${VENV_PATH}/bin/activate; bash -c 'set -ex;set -ex;${cmd.stripIndent()}'"
+ return sh(script: script, returnStdout: returnStdout)
+}
+
+def run_cmd_stdout(cmd) {
+ return run_cmd(cmd, true)
+}
+
+node ("${NODE_NAME}") {
+ try {
+
+ stage("Clean the environment") {
+ println "Clean the working directory ${env.WORKSPACE}"
+ deleteDir()
+ // do not fail if environment doesn't exists
+ println "Remove environment ${ENV_NAME}"
+ run_cmd("""\
+ dos.py erase ${ENV_NAME} || true
+ """)
+ println "Remove config drive ISO"
+ run_cmd("""\
+ rm /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} || true
+ """)
+ }
+
+ stage("Clone tcp-qa project and install requirements") {
+ run_cmd("""\
+ git clone https://github.com/Mirantis/tcp-qa.git ${env.WORKSPACE}
+ #cd tcp-qa
+ if [ -n "$TCP_QA_REFS" ]; then
+ set -e
+ git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
+ fi
+ pip install --upgrade --upgrade-strategy=only-if-needed -r tcp_tests/requirements.txt
+ """)
+ }
+
+ // load shared methods from the clonned tcp-qa repository.
+ // DO NOT MOVE this code before clonning the repo
+ def rootDir = pwd()
+ def shared = load "${rootDir}/tcp_tests/templates/SharedPipeline.groovy"
+
+ stage("Create an environment ${ENV_NAME} in disabled state") {
+ // do not fail if environment doesn't exists
+ run_cmd("""\
+ python ./tcp_tests/utils/create_devops_env.py
+ """)
+ }
+
+ stage("Generate the model") {
+ shared.generate_cookied_model()
+ }
+
+ stage("Generate config drive ISO") {
+ shared.generate_configdrive_iso()
+ }
+
+ stage("Upload generated config drive ISO into volume on cfg01 node") {
+ run_cmd("""\
+ virsh vol-upload ${ENV_NAME}_cfg01.${LAB_CONFIG_NAME}.local_config /home/jenkins/images/${CFG01_CONFIG_IMAGE_NAME} --pool default
+ virsh pool-refresh --pool default
+ """)
+ }
+
+ stage("Run the 'underlay' and 'salt-deployed' fixtures to bootstrap salt cluster") {
+ run_cmd("""\
+ export MANAGER=devops
+ export SHUTDOWN_ENV_ON_TEARDOWN=false
+ export BOOTSTRAP_TIMEOUT=900
+ export PYTHONIOENCODING=UTF-8
+ export REPOSITORY_SUITE=${MCP_VERSION}
+ #export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt.yaml
+ export TEST_GROUP=test_install_local_salt
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k \${TEST_GROUP}
+ sleep 60 # wait for jenkins to start and IO calm down
+
+ """)
+ }
+
+ // Install core and cicd
+ stage("Run Jenkins job on salt-master [deploy_openstack:core]") {
+ shared.run_job_on_day01_node("core")
+ }
+
+ stage("Run Jenkins job on salt-master [deploy_openstack:cicd]") {
+ shared.run_job_on_day01_node("cicd")
+ }
+
+ // Install the cluster
+ for (stack in "${STACK_INSTALL}".split(",")) {
+ stage("Run Jenkins job on CICD [deploy_openstack:${stack}]") {
+ shared.run_job_on_cicd_nodes(stack)
+ }
+ }
+
+ stage("Run tests") {
+ run_cmd("""\
+ export ENV_NAME=${ENV_NAME}
+ . ./tcp_tests/utils/env_salt
+ . ./tcp_tests/utils/env_k8s
+
+ # Initialize variables used in tcp-qa tests
+ export CURRENT_SNAPSHOT=k8s_deployed # provide the snapshot name required by the test
+ export TESTS_CONFIGS=\$(pwd)/${ENV_NAME}_salt_deployed.ini # some SSH data may be filled separatelly
+
+ export MANAGER=empty # skip 'hardware' fixture, disable snapshot/revert features
+ # export SSH='{...}' # non-empty SSH required to skip 'underlay' fixture. It is filled from TESTS_CONFIGS now
+ export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
+ export salt_master_port=6969
+ export SALT_USER=\$SALTAPI_USER
+ export SALT_PASSWORD=\$SALTAPI_PASS
+ export COMMON_SERVICES_INSTALLED=true # skip common_services_deployed fixture
+ export K8S_INSTALLED=true # skip k8s_deployed fixture
+
+ py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -m k8s_calico
+ """)
+ }
+
+ } catch (e) {
+ common.printMsg("Job failed", "red")
+ throw e
+ } finally {
+ // TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
+ // and report appropriate data to TestRail
+ run_cmd("""\
+ dos.py destroy ${ENV_NAME}
+ """)
+ }
+
+}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
new file mode 100644
index 0000000..44c773f
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/environment-context-k8s.yaml
@@ -0,0 +1,136 @@
+nodes:
+ cfg01:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ kvm01:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm02:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ kvm03:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid01:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid02:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cid03:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01:
+ reclass_storage_name: kubernetes_control_node01
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02:
+ reclass_storage_name: kubernetes_control_node02
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03:
+ reclass_storage_name: kubernetes_control_node03
+ roles:
+ - kubernetes_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ cmp01:
+ reclass_storage_name: kubernetes_compute_node01
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node01_address}
+
+ cmp02:
+ reclass_storage_name: kubernetes_compute_node02
+ roles:
+ - kubernetes_compute
+ - linux_system_codename_xenial
+ - salt_master_host
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ single_address: ${_param:kubernetes_compute_node02_address}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
new file mode 100644
index 0000000..7451056
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/salt.yaml
@@ -0,0 +1,16 @@
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-cicd-k8s-calico/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Workaround, configure ntp and rsyslog on salt master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls ntp,rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..77c18d1
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml
@@ -0,0 +1,102 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo touch /is_cloud_init_started
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - echo "******** MOUNT CONFIG DRIVE"
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ #- sudo ifdown ens3
+ #- sudo ip r d default || true # remove existing default route to get it from dhcp
+ #- sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3; ip l set down ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4; ip l set down ens4
+ - rm -f /etc/network/interfaces
+ #- ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
+ #- cp /root/config-drive/user-data /root/user-data
+ #- sed -i '/^reboot$/d' /root/user-data
+ #- set -x; cd /root && /bin/bash -xe ./user-data
+ - set -x; cd /root/config-drive && /bin/bash -xe ./user-data
+
+ #- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ # Enable root access (after reboot)
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ #- service sshd stop
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - touch /is_cloud_init_finished
+ #- reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ #- path: /etc/network/interfaces
+ - path: /root/interfaces
+ content: |
+ auto lo
+ iface lo inet loopback
+
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 60
+ ServerAliveCountMax 0
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
new file mode 100644
index 0000000..b1b6430
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay--user-data1604.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ #- fallocate -l 16G /swapfile
+ #- chmod 600 /swapfile
+ #- mkswap /swapfile
+ #- swapon /swapfile
+ #- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
new file mode 100644
index 0000000..921be9b
--- /dev/null
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/underlay.yaml
@@ -0,0 +1,538 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-cicd-k8s-calico') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+
+{% import 'cookied-cicd-k8s-calico/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-cicd-k8s-calico/underlay--user-data1604-swp.yaml' as CLOUDINIT_USER_DATA_1604_SWP with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_swp {{ CLOUDINIT_USER_DATA_1604_SWP }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ default_{{ HOSTNAME_CTL }}: +10
+ default_{{ HOSTNAME_CTL01 }}: +11
+ default_{{ HOSTNAME_CTL02 }}: +12
+ default_{{ HOSTNAME_CTL03 }}: +13
+ default_{{ HOSTNAME_CMP01 }}: +101
+ default_{{ HOSTNAME_CMP02 }}: +102
+ default_{{ HOSTNAME_LOG }}: +60
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MON }}: +70
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_MTR }}: +85
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_PRX01 }}: +222
+ default_{{ HOSTNAME_KVM }}: +240
+ default_{{ HOSTNAME_KVM01 }}: +241
+ default_{{ HOSTNAME_KVM02 }}: +242
+ default_{{ HOSTNAME_KVM03 }}: +243
+
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+
+ default_{{ HOSTNAME_CID }}: +90
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
+
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+ storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+ use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01') }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604') }}
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: {{ os_env('CFG_NODE_CPU', 3) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: config
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ # source_image: !os_env CFG01_CONFIG_PATH # no source image required.
+ # it will be uploaded after config drive generation
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('KVM_NODE_CPU', 1) }}
+ memory: {{ os_env('KVM_NODE_MEMORY', 1024) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('KVM_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_swp
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: k8s_controller
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
new file mode 100644
index 0000000..4642e7c
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico-sl.yaml
@@ -0,0 +1,38 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico-sl' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s-sl.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s-sl.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+ cmd: |
+ set -e;
+ apt-get install -y salt-formula-*
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
+ reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+ reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
new file mode 100644
index 0000000..ceace31
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-cicd-k8s-calico.yaml
@@ -0,0 +1,38 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-cicd-k8s-calico' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'cookiecutter-context-k8s.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['environment-context-k8s.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+- description: Re-install all the fromulas
+ cmd: |
+ set -e;
+ apt-get install -y salt-formula-*
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Workaround for compute nodes. Auto-registration for compute nodes cannot be used without external address inventory
+ reclass-tools add-key parameters._param.kubernetes_compute_node01_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.101 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+ reclass-tools add-key parameters._param.kubernetes_compute_node02_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.102 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/kubernetes/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index adfe43f..061308f 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -379,15 +379,16 @@
# 192.168.10 -> 10.16.0 (generated network for admin)
# 10.16.0 -> <external network>
# So let's replace constant networks to the keywords, and then keywords to the desired networks.
- sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {{ CLUSTER_CONTEXT_PATH }}
+ export REPLACE_DIRS="{{ CLUSTER_CONTEXT_PATH }} /tmp/cookiecutter-templates"
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.5/==IPV4_NET_ADMIN_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.4/==IPV4_NET_CONTROL_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/10\.167\.6/==IPV4_NET_TENANT_PREFIX==/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/172\.17\.16/==IPV4_NET_EXTERNAL_PREFIX==/g' {} +
- sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
- sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_ADMIN_PREFIX==/{{ IPV4_NET_ADMIN_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_CONTROL_PREFIX==/{{ IPV4_NET_CONTROL_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_TENANT_PREFIX==/{{ IPV4_NET_TENANT_PREFIX }}/g' {} +
+ find ${REPLACE_DIRS} -type f -exec sed -i 's/==IPV4_NET_EXTERNAL_PREFIX==/{{ IPV4_NET_EXTERNAL_PREFIX }}/g' {} +
{% set items = CLUSTER_PRODUCT_MODELS or '$(ls /tmp/cookiecutter-templates/cluster_product/)' %}
for item in {{ items }}; do
diff --git a/tcp_tests/tests/environment/test_local_dns.py b/tcp_tests/tests/environment/test_local_dns.py
index fcc0978..dfd29a1 100644
--- a/tcp_tests/tests/environment/test_local_dns.py
+++ b/tcp_tests/tests/environment/test_local_dns.py
@@ -25,3 +25,7 @@
@pytest.mark.fail_snapshot
def test_install_local_dns(self, config, underlay):
LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ def test_install_local_salt(self, config, underlay, salt_deployed):
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index 0dc7265..484aec5 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -52,6 +52,7 @@
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@pytest.mark.cz8116
+ @pytest.mark.k8s_calico
def test_calico_route_recovery(self, show_step, config, underlay,
k8s_deployed):
"""Test for deploying k8s environment with Calico plugin and check
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index e0e88cf..ac4d1d1 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -27,6 +27,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
+ @pytest.mark.k8s_calico_sl
def test_k8s_install_calico_lma(self, config, show_step,
k8s_deployed, k8s_actions,
sl_deployed, sl_actions):
@@ -193,6 +194,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
+ @pytest.mark.k8s_calico
def test_only_k8s_install(self, config, show_step,
k8s_deployed, k8s_actions, k8s_logs):
"""Test for deploying MCP environment with k8s and check it
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index ab57efc..2ebc010 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -27,6 +27,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@pytest.mark.cz8116
+ @pytest.mark.k8s_calico
def test_k8s_externaldns_coredns(self, show_step, config, k8s_deployed):
"""Test externaldns integration with coredns