Merge "Add bm cookied template for k8s + contrail"
diff --git a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
index c4f342f..5cb47fb 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/lab04-physical-inventory.yaml
@@ -55,8 +55,6 @@
enp5s0f0:
role: bond0_ab_contrail
tenant_address: 192.168.0.101
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '90:e2:ba:19:c2:18'
enp5s0f1:
role: single_vlan_ctl
single_address: 10.167.8.101
@@ -74,37 +72,6 @@
enp5s0f0:
role: bond0_ab_contrail
tenant_address: 192.168.0.102
- dpdk_pci: "'0000:05:00.0'"
- dpdk_mac: '00:1b:21:87:21:98'
enp5s0f1:
role: single_vlan_ctl
single_address: 10.167.8.102
-
- # Physical nodes for manual testing replacing resources
- # kvm04.cookied-bm-contrail40.local:
- # reclass_storage_name: infra_kvm_node04
- # roles:
- # - infra_kvm
- # - linux_system_codename_xenial
- # interfaces:
- # enp2s0f0:
- # role: single_mgm
- # enp2s0f1:
- # role: bond0_ab_ovs_vlan_ctl
-
- # cmp003.cookied-bm-contrail40.local:
- # reclass_storage_name: openstack_compute_node03
- # roles:
- # - openstack_compute
- # - features_lvm_backend
- # - linux_system_codename_xenial
- # interfaces:
- # enp2s0f0:
- # role: single_mgm
- # deploy_address: 172.16.49.122
- # enp2s0f1:
- # role: bond0_ab_ctl_contrail
- # tenant_address: 192.168.0.103
- # single_address: 10.167.8.103
- # dpdk_pci: "'0000:05:00.0'"
- # dpdk_mac: '00:1b:21:87:21:98'
diff --git a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
index cc5b43d..1b97dc7 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/salt.yaml
@@ -1,9 +1,6 @@
{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-contrail40/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-contrail40/underlay.yaml' import HOSTNAME_KVM03 with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
@@ -43,15 +40,15 @@
retry: {count: 1, delay: 10}
skip_fail: false
- #- description: "WR for changing image to proposed"
- # cmd: |
- # set -e;
+- description: "WR for changing image to proposed"
+ cmd: |
+ set -e;
# Add message_queu host for opencontrail
- # . /root/venv-reclass-tools/bin/activate;
- # reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
- # node_name: {{ HOSTNAME_CFG01 }}
- # retry: {count: 1, delay: 10}
- # skip_fail: false
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcp{{ SHARED.REPOSITORY_SUITE }}.qcow2' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
@@ -127,7 +124,7 @@
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -152,6 +149,10 @@
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
- description: "Lab04 workaround: Give each node root acces with key from cfg01"
cmd: |
set -e;
diff --git a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
index 3e07967..87651f8 100644
--- a/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-contrail40/sl.yaml
@@ -107,11 +107,19 @@
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus.collector
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
- skip_fail: true
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
@@ -255,4 +263,4 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 15}
- skip_fail: true
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/common-services.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/common-services.yaml
new file mode 100644
index 0000000..530a4e7
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/common-services.yaml
@@ -0,0 +1,117 @@
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
new file mode 100644
index 0000000..72c8bf4
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-cookiecutter-openstack_ovs_dpdk.yaml
@@ -0,0 +1,200 @@
+default_context:
+ backup_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEogIBAAKCAQEAvDqzt/PHWvSSJrBtvD3UWp21CDrAajgOPeXrVm7VU+sDDpw0
+ YqDvVhYfT/N6ha+SWOK00KyTuMMbB8/I4tvsP9vvCXy7v2AJID/ZO2z/t8KfTDEJ
+ C75/6a0UBg6sl3i7+cUOHbeK+lkcfdnSI1H8Jzdhk4Xj7t7q+MIKTs5n+AlScgyz
+ NSiD2nG/U5LmaQ+rjZ1VsF9J0YTds0yLDF3hztVoDTs7j5obl7Xab3ZlwalgH5Gc
+ Id6BI09jkUbppdPzHpzV2oad7cFpVYTt9m3/MMT0amzPuwl/u/dI64rRuWPe60eT
+ qeVMQD0zP6o9F79upbzQStt82lPJcfF4CXvxYwIDAQABAoIBAAHUXDzUfMKQj/8a
+ RebHfxHmaIUM9SPTKahGXNQ5PY+UQDJbKFMxF0Jx8pn3VuCHxVdh1LoWg1UPaGra
+ BSzXUGOKgrdH5BdHGq+aj0T5mT6zAJNgAqN/lYSy7vfkGp9aSBF0bd+yEgK+7Pz4
+ Kge320iSTDt/2KhQuF30nN8JOI97m2uk2YHH8TixfVtmgLPEy+0Mw4VZLsHD4OY1
+ zu8xN6co2aQR0DB0MPKD6IxH62wSOJKBzF4o5xzzy/fl0ysDZbZ8Z/5Rejvp3yNT
+ 68B0X5CM27hVdYE+/tcKGl9WKmewIf3fTZUfBcwFIObMIl9fkK/519nwFed4AfOX
+ /a2LCBECgYEA9Lyl/eyzXuU2dgs6Gw/WMobqOVnHF9wbukS1XSKdwMogtpt7Pb23
+ +32r9xHgeRDvvWwSp8lNPZ8mu77dQ6akbOuOk5C6V3Mqt4zam3DBDMtL63nKq8tq
+ LQ0PVjj8cAgu3GSDCz7htqUb44rn5tX9zlM0vrwxzyYqbet7ZbsGoYsCgYEAxORQ
+ JFn1vwag8VBw3bngx3SJ46CpCC8Gz830W7pEaTS6zTTiDC4p5sATGya91JS8l47G
+ ikP2bcWzvT6aP/u+TZSqZiqp5Kn37fx8Et+ltIl47SH+PJHIR9F9r2f9zqla6mlP
+ zcX/mTSuAJCTP4whQA3/f1wNAeBnewhK9fXCOokCgYAz6TPYSXW+giXsIfOAO/q2
+ GvHhmk5lnDVxbBOAHtCRTQ5lTVN1xCEbQgvQy0TuyQ3hAuRuHH+6u2BO4Gw0Zkto
+ IwrJ+B/eXrpH1qOj5uW73f9Lgjjf+bSau7NuGYZKCSJPcy5smzjrMdhZimQoDWnZ
+ csK0VlzGUJUdXZ599I6ygwKBgGTf+LN3J7H0Snb4WKsw9Zoa+h6WjKO1vE6xXVW1
+ rCEes+o5Autsp2ki1WcexTlp7unTa6MhSNta5Ei8Dzli2FBVL6xihWKzNmRG7Kaa
+ 0QIbQMp1lRUhN7Sb/0HkDKRaHktlI07w95Bd7hw59kcjm1F/Gnz9A2kHuNzPFeDI
+ RffJAoGAdeCID5sb0oHEHTIxxB+cgfaiyaAe9qrW2INNWLVn5OTDh6cidatnWAor
+ M/SxwNoiYcCpi869q7wzjw5gNOVoNJbmwzDA7s+lgjTPQpq2jmO6RtweKbYoN5Zw
+ ++LiD3r07TD3p2QAyeooT29D/d6/2Hd6oyTJcZWIQTN+MTcXQO4=
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8OrO388da9JImsG28PdRanbUIOsBqOA495etWbtVT6wMOnDRioO9WFh9P83qFr5JY4rTQrJO4wxsHz8ji2+w/2+8JfLu/YAkgP9k7bP+3wp9MMQkLvn/prRQGDqyXeLv5xQ4dt4r6WRx92dIjUfwnN2GThePu3ur4wgpOzmf4CVJyDLM1KIPacb9TkuZpD6uNnVWwX0nRhN2zTIsMXeHO1WgNOzuPmhuXtdpvdmXBqWAfkZwh3oEjT2ORRuml0/MenNXahp3twWlVhO32bf8wxPRqbM+7CX+790jritG5Y97rR5Op5UxAPTM/qj0Xv26lvNBK23zaU8lx8XgJe/Fj
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_control_node01_address: 10.167.11.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.11.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.11.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.11.90
+ cicd_control_vip_hostname: cid
+ cicd_enabled: 'True'
+ cicd_private_key: |-
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAshiE2vK11KH1/PHO9v5IcT1ol3kuAorv6IgW+1paT9w4pFnd
+ H2DHQxTJsZ629cig+ELVAKHQnkND2U++/DM20ai5ZfpOwlvd+dL95/FbGb62Ozxx
+ kxBjyc/Bbbs8LcZtS1SN+agdkjQG1StpckUbFppoJ9nzWgnEcdYdonQ6aThgd+YL
+ rAOX04s3cMlCflClQl3lGFo24Qdhk/Y4M5rodfqfD5NOSKEhYP/dTMunri8zB5bU
+ ifvOvCWUKUOxLjkx95raY82xMHUobMYk87RcLPcq8pyz96/FPoiLqxM1oznTKNiI
+ 0bW0xjf7FFjfLCjTapKZPRz8+Wkvzmzj35LLrwIDAQABAoIBADJoGCo0Kdy93nay
+ JgboX66VV+YPaUNU+aQR6JdJsmgKB4oU2S4JYTyORKveJSCZoV3C5LCiG/6/QRPf
+ q0mMYUaj/51qZCJEiCYuXqjoOgWmYcOQTwD10ZiMEc4yAU1fbQ22J9zyhTQdP5XU
+ DKtH/eu+1h35ZRQl0ZD6rjaNuP6QekJM6IVCC7XBaCG5+wSER9R25HbbDhdb7CwH
+ W1GP9IgISqy9Z3f4PQOyCUmn/O99lN8kry6ui7bCywRfITV6C+pn02DpMgzKZ8jn
+ 3yts1f2mIbYVxnahtCaI3QTag6wBsnFq+U0uIXrUGMeeRzg9N1Ur01qdJpIR9g0v
+ Nt7QUZkCgYEA4lEavsFitSll/33JY4q82nYpjXAxTON3xraUqNYn5Cde06gNi8n1
+ t9TCLUqDhSpvgEOyJE/uwo5LAj79Ce2EwLkCttNggqRXBlY5ZpljwfWmxZtuGm/z
+ BJaOtkaK/1diR/+Qn/fTMyPH5JIXuQ6/XF60W4MSbzPgY4GO1BDx+G0CgYEAyXRT
+ 00GDdiXbxQmzeHTO9Bg5y36Y1FEWDLnc89bpHPTR4sT/XCczerevy/l8jsdzZlnu
+ 5ZddfWMF7EGNo51Zbmi0oLQ7nzigoVFcnhFHRFoCP36T9mvJk7O8Ao3ttpl/J2r0
+ mFiaKi0lhmZVbNpmliKjWAMZJyt6I7AfYekcOwsCgYEA0W8MuQptNgkhgtX80ElL
+ iz9eJK12chjuds3vtG66a8CjWGtkXcB/y6bwKsmR/GHQ3XnIGSJ/aTwU3fc8YzuS
+ ZmbPxDDIVx2OCycv52p7jrqtoqC7u2tuEQji+Hs/lhxfrxEp3V+R6vlpunQX0AF9
+ xRU/ApDBNndjZ7I1YrprseECgYA+zx8HgaiMIJeZokGrb7fKkPcMBCeAItveEeDa
+ wYmito3txv/a6nn5a+XKkbmNBpBrO+To/j1ux33kQDf56Cgm7UxLwoXISa6DPUvE
+ GJ0AqZOD2mIldUu+2k3m+ftAcDEdyBIEobNHLRZDBgriSmGrs5b77NNdzAdjsxjF
+ vRlJKwKBgD8DcP/C9pABC2mRQyH//RTk6XZfiDY0L18lwH7acEdHlJiF1PTwvIHD
+ cj1nMyG2MxEiSt1E5O/YQ4Lo3sognFIb8keu7IYxEgLXhvWFR3RwaYCjrF4ZGfD2
+ +83eUFPZQvEwTY/8OCogzJQfs1CT8+pLdO9tZQbrAaxfmF6c48KN
+ -----END RSA PRIVATE KEY-----
+ cicd_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyGITa8rXUofX88c72/khxPWiXeS4Ciu/oiBb7WlpP3DikWd0fYMdDFMmxnrb1yKD4QtUAodCeQ0PZT778MzbRqLll+k7CW9350v3n8VsZvrY7PHGTEGPJz8Ftuzwtxm1LVI35qB2SNAbVK2lyRRsWmmgn2fNaCcRx1h2idDppOGB35gusA5fTizdwyUJ+UKVCXeUYWjbhB2GT9jgzmuh1+p8Pk05IoSFg/91My6euLzMHltSJ+868JZQpQ7EuOTH3mtpjzbEwdShsxiTztFws9yrynLP3r8U+iIurEzWjOdMo2IjRtbTGN/sUWN8sKNNqkpk9HPz5aS/ObOPfksuv
+ cluster_domain: cookied-bm-dpdk-pipeline.local
+ cluster_name: cookied-bm-dpdk-pipeline
+ compute_bond_mode: balance-slb
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: zEFbUBMME6LFdiL0rJWFgHMdQGgywnDSE9vFYvHgEBeYHb4QJsDl3HqpdaTgqYlF
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 10.167.11.0/24
+ control_vlan: '2416'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates.git
+ deploy_network_gateway: 172.16.49.62
+ deploy_network_netmask: 255.255.255.192
+ deploy_network_subnet: 172.16.49.0/26
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: obutenko@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 10.167.11.241
+ infra_kvm01_deploy_address: 172.16.49.11
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 10.167.11.242
+ infra_kvm02_deploy_address: 172.16.49.12
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 10.167.11.243
+ infra_kvm03_deploy_address: 172.16.49.13
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 10.167.11.240
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 172.16.49.15
+ maas_hostname: cfg01
+ mcp_version: testing
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openldap_domain: deploy-name.local
+ openldap_enabled: 'True'
+ openldap_organisation: ${_param:cluster_name}
+ openstack_benchmark_node01_address: 10.167.11.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '3'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 10.167.11
+ openstack_compute_rack01_tenant_subnet: 10.167.12
+ openstack_control_address: 10.167.11.10
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 10.167.11.11
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 10.167.11.12
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 10.167.11.13
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 10.167.11.50
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 10.167.11.51
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 10.167.11.52
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 10.167.11.53
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 10.167.11.224
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.167.12.6
+ openstack_gateway_node02_address: 10.167.11.225
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.12.7
+ openstack_gateway_node03_address: 10.167.11.226
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.167.12.8
+ openstack_message_queue_address: 10.167.11.40
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 10.167.11.41
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 10.167.11.42
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 10.167.11.43
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'True'
+ openstack_nfv_sriov_enabled: 'True'
+ openstack_nova_compute_hugepages_count: '16'
+ openstack_nova_compute_nfv_req_enabled: 'True'
+ openstack_nfv_sriov_network: physnet1
+ openstack_nfv_sriov_numvfs: '7'
+ openstack_nfv_sriov_pf_nic: enp5s0f1
+ openstack_nova_cpu_pinning: 6,7,8,9,10,11
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 10.167.11.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 10.167.11.81
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 10.167.11.82
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 10.167.11.19
+ openstack_version: pike
+ cinder_version: ${_param:openstack_version}
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: HlcaUHzUnsWsg62uhF8ua5KEbqRbzijz
+ salt_api_password_hash: $6$qdIFillN$XnzP7oIXRcbroVch7nlthyrSekjKlWND8q2MtoMF3Wz2ymepjAOjyqpyR55nmbH9OQzS8EcQJ6sfr5hWKDesV1
+ salt_master_address: 10.167.11.2
+ salt_master_hostname: cfg01
+ salt_master_management_address: 172.16.49.2
+ shared_reclass_branch: master
+ shared_reclass_url: https://gerrit.mcp.mirantis.net/salt-models/reclass-system.git
+ stacklight_enabled: 'False'
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.167.12.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.167.12.0/24
+ tenant_vlan: '2417'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
new file mode 100644
index 0000000..ff8340b
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-environment.yaml
@@ -0,0 +1,111 @@
+nodes:
+ cfg01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_static_ctl
+
+ # Physical nodes
+ kvm01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: infra_kvm_node01
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: infra_kvm_node02
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ kvm03.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: infra_kvm_node03
+ roles:
+ - infra_kvm
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm
+ enp3s0f1:
+ role: bond0_ab_ovs_vlan_ctl
+
+ cmp01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_compute_node01
+ roles:
+ - openstack_compute_dpdk
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ - openstack_compute_sriov
+ interfaces:
+ enp5s0f0:
+ role: combined_vlan_ctl_mgm
+ single_address: 10.167.11.105
+ enp3s0f0:
+ role: bond_dpdk_prv_lacp
+ dpdk_pci: "0000:03:00.0"
+ tenant_address: 10.167.12.105
+ enp3s0f1:
+ role: bond_dpdk_prv_lacp
+ dpdk_pci: "0000:03:00.1"
+ enp5s0f1:
+ role: sriov
+
+ cmp02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_compute_node02
+ roles:
+ - openstack_compute_dpdk
+ - features_lvm_backend
+ - linux_system_codename_xenial
+ - openstack_compute_sriov
+ interfaces:
+ enp5s0f0:
+ role: combined_vlan_ctl_mgm
+ single_address: 10.167.11.106
+ enp3s0f0:
+ role: bond_dpdk_prv_lacp
+ dpdk_pci: "0000:03:00.0"
+ tenant_address: 10.167.12.106
+ enp3s0f1:
+ role: bond_dpdk_prv_lacp
+ dpdk_pci: "0000:03:00.1"
+ enp5s0f1:
+ role: sriov
+
+ gtw01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.5
+ enp3s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+
+ gtw02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp3s0f0:
+ role: single_mgm
+ deploy_address: 172.16.49.4
+ enp3s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
new file mode 100644
index 0000000..cec7902
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt-context-vcp-environment.yaml
@@ -0,0 +1,175 @@
+nodes:
+ ctl01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - openstack_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ ctl03.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - openstack_control
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_database_node01
+ roles:
+ - openstack_database_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_database_node02
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ dbs03.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_database_node03
+ roles:
+ - openstack_database
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_message_queue_node01
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_message_queue_node02
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ msg03.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_message_queue_node03
+ roles:
+ - openstack_message_queue
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ prx02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: openstack_proxy_node02
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+# mtr01.cookied-bm-dpdk-pipeline.local:
+# reclass_storage_name: stacklight_telemetry_node01
+# roles:
+# - stacklight_telemetry
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+
+# mtr02.cookied-bm-dpdk-pipeline.local:
+# reclass_storage_name: stacklight_telemetry_node02
+# roles:
+# - stacklight_telemetry
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+
+# mtr03.cookied-bm-dpdk-pipeline.local:
+# reclass_storage_name: stacklight_telemetry_node03
+# roles:
+# - stacklight_telemetry
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+
+ cid01.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.cookied-bm-dpdk-pipeline.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
new file mode 100644
index 0000000..6a702b8
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/salt.yaml
@@ -0,0 +1,183 @@
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-bm-dpdk-pipeline/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: WR for mounting 1G hugepages before linux.state
+ cmd: |
+ salt 'cmp*' state.sls linux.system.hugepages;
+ salt 'cmp*' cmd.run "mount -o mode=775,pagesize=1G -t hugetlbfs Hugetlbfs-kvm /mnt/hugepages_1G";
+ salt 'cmp*' cmd.run "echo 16 | sudo tee /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+ cmd: |
+ git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+ git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+#########################################
+# Configure all running salt minion nodes
+#########################################
+
+- description: Refresh pillars on all minions
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Show reclass-salt --top for generated nodes
+ cmd: reclass-salt --top -u /srv/salt/reclass/nodes/_generated/
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Add cpm nodes to /etc/hosts
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-dpdk-pipeline.local cmp01' >> /etc/hosts";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.106 cmp02.cookied-bm-dpdk-pipeline.local cmp02' >> /etc/hosts";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Temporary WR
+ cmd: |
+ ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Enable Jenkins
+ cmd: |
+ systemctl enable jenkins || true;
+ systemctl restart jenkins || true;
+ sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: run jenkins.client
+ cmd: |
+ salt-call state.sls jenkins.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: "Give each node root access with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: temp WR
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ifdown br-prv; ifup br-prv'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.128/26 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.130,end=172.17.42.180 --gateway 172.17.42.129'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type vxlan'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 192.168.1.0/24 --name net04__subnet --allocation-pool start=192.168.1.150,end=192.168.1.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
+
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..3f4f128
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml
@@ -0,0 +1,79 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..07a6936
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
new file mode 100644
index 0000000..9168b7f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
new file mode 100644
index 0000000..612299f
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -0,0 +1,494 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-dpdk-pipeline') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
+{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.49.3') %}
+{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.49.31') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+
+{% import 'cookied-bm-dpdk-pipeline/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-bm-dpdk-pipeline/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +62
+ l2_network_device: +61
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
+ default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
+ default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ ip_ranges:
+ dhcp: [+2, -4]
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
+ params:
+ ip_reserved:
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+ gateway: +1
+ l2_network_device: +1
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.167.12.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: -2
+
+
+ groups:
+ - name: virtual
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ admin:
+ address_pool: admin-pool01
+ dhcp: false
+ parent_iface:
+ phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+ private:
+ parent_iface:
+ phys_dev: !os_env CONTROL_IFACE
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+
+ - name: default
+ driver:
+ name: devops_driver_ironic
+ params:
+ os_auth_token: fake-token
+ ironic_url: !os_env IRONIC_URL # URL that will be used by fuel-devops
+ # to access Ironic API
+ # Agent URL that is accessible from deploying node when nodes
+ # are bootstrapped with PXE. Usually PXE/provision network address is used.
+ agent_kernel_url: !os_env IRONIC_AGENT_KERNEL_URL
+ agent_ramdisk_url: !os_env IRONIC_AGENT_RAMDISK_URL
+
+ network_pools:
+ admin: admin-pool01
+
+ nodes:
+ - name: {{ HOSTNAME_KVM01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces:
+ - label: enp3s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
+
+ network_config:
+ enp3s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp3s0f1
+
+ - name: {{ HOSTNAME_KVM02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces:
+ - label: enp3s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
+
+ network_config:
+ enp3s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp3s0f1
+
+ - name: {{ HOSTNAME_KVM03 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_KVM03 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces:
+ - label: enp3s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
+
+ network_config:
+ enp3s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp3s0f1
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+ interfaces:
+ - label: enp3s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
+ - label: enp5s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP01
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP01
+ network_config:
+ enp5s0f0:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp5s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+ interfaces:
+ - label: enp3s0f0
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
+ - label: enp5s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH2_MAC_ADDRESS_CMP02
+ - label: enp5s0f1
+ mac_address: !os_env ETH3_MAC_ADDRESS_CMP02
+ network_config:
+ enp5s0f0:
+ networks:
+ - admin
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_GTW01 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+ interfaces:
+ - label: enp3s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
+
+ network_config:
+ enp3s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp3s0f1
+
+ - name: {{ HOSTNAME_GTW02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
+
+ interfaces:
+ - label: enp3s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
+ - label: enp3s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
+
+ network_config:
+ enp3s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp3s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
index 06df7ad..6dc4829 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/common-services.yaml
@@ -1,5 +1,20 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
- description: Install glusterfs
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glusterfs:server' state.sls glusterfs.server.service
@@ -21,21 +36,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
- description: Install RabbitMQ on ctl01
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@rabbitmq:server and *01*' state.sls rabbitmq
@@ -114,10 +114,10 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Check the OpenStack control VIP
+- description: Check the VIP
cmd: |
- OPENSTACK_CONTROL_ADDRESS=$(salt --out=newline_values_only "ctl01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 9c24f6d..44badb5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -1,11 +1,10 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW01 with context %}
-{# from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context #}
+{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_GTW02 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
-{% set PATTERN = os_env('PATTERN', 'false') %}
-{% set RUN_TEMPEST = os_env('RUN_TEMPEST', 'false') %}
+{% import 'shared-salt.yaml' as SHARED with context %}
# Install OpenStack control services
@@ -97,7 +96,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:volume' state.sls cinder
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check cinder list
@@ -180,23 +179,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -206,7 +188,7 @@
- description: Create subnet_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ '. /root/keystonercv3; neutron subnet-create net04_ext 172.17.42.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start=172.17.42.10,end=172.17.42.60 --gateway 172.17.42.1'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -220,7 +202,7 @@
- description: Create subnet_net04
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ '. /root/keystonercv3; neutron subnet-create net04 192.168.0.0/24 --name net04__subnet --allocation-pool start=192.168.0.120,end=192.168.0.240'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -246,54 +228,4 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: sync time
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
-# 'service ntp stop; ntpd -gq; service ntp start'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: Temp workaround of PROD-13167
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
- 'apt-get install python-pymysql -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install docker.io on gtw
- cmd: salt 'gtw01*' cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Enable forward policy
- cmd: salt "gtw01*" cmd.run 'iptables --policy FORWARD ACCEPT'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 392798b..f1416d5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,7 +1,17 @@
default_context:
- mcp_version: testing
+ mcp_version: proposed
ceph_enabled: 'False'
- cicd_enabled: 'False'
+ cicd_enabled: 'True'
+ cicd_control_node01_address: 10.167.4.91
+ cicd_control_node01_hostname: cid01
+ cicd_control_node02_address: 10.167.4.92
+ cicd_control_node02_hostname: cid02
+ cicd_control_node03_address: 10.167.4.93
+ cicd_control_node03_hostname: cid03
+ cicd_control_vip_address: 10.167.4.90
+ cicd_control_vip_hostname: cid
+ kqueen_custom_mail_enabled: 'False'
+ kqueen_enabled: 'False'
cluster_domain: cookied-bm-mcp-dvr-vxlan.local
cluster_name: cookied-bm-mcp-dvr-vxlan
compute_bond_mode: active-backup
@@ -10,13 +20,13 @@
context_seed: Psupdi5ne1kCk31iDWV7fhbHnBALIr3SWhce7Z01jCaMwlAhGKxeLPFPQ9CgYzJD
control_network_netmask: 255.255.255.0
control_network_subnet: 10.167.4.0/24
- control_vlan: '2416'
+ control_vlan: '2403'
cookiecutter_template_branch: master
cookiecutter_template_credentials: gerrit
cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
- deploy_network_gateway: 172.16.49.1
+ deploy_network_gateway: 172.16.164.1
deploy_network_netmask: 255.255.255.192
- deploy_network_subnet: 172.16.49.0/26
+ deploy_network_subnet: 172.16.164.0/26
deployment_type: physical
dns_server01: 172.18.176.6
dns_server02: 172.18.208.44
@@ -26,37 +36,40 @@
infra_bond_mode: active-backup
infra_deploy_nic: eth0
infra_kvm01_control_address: 10.167.4.241
- infra_kvm01_deploy_address: 172.16.49.11
+ infra_kvm01_deploy_address: 172.16.164.11
infra_kvm01_hostname: kvm01
infra_kvm02_control_address: 10.167.4.242
- infra_kvm02_deploy_address: 172.16.49.12
+ infra_kvm02_deploy_address: 172.16.164.12
infra_kvm02_hostname: kvm02
infra_kvm03_control_address: 10.167.4.243
- infra_kvm03_deploy_address: 172.16.49.13
+ infra_kvm03_deploy_address: 172.16.164.13
infra_kvm03_hostname: kvm03
infra_kvm_vip_address: 10.167.4.240
infra_primary_first_nic: eth1
infra_primary_second_nic: eth2
kubernetes_enabled: 'False'
local_repositories: 'False'
- maas_deploy_address: 172.16.49.15
+ maas_deploy_address: 172.16.164.14
maas_hostname: cfg01
offline_deployment: 'False'
opencontrail_enabled: 'False'
- #openstack_benchmark_node01_address: 10.167.4.95
- #openstack_benchmark_node01_hostname: bmk01
+ openldap_enabled: 'False'
+ bmk_enabled: 'False'
+ static_ips_on_deploy_network_enabled: 'False'
+ penstack_benchmark_node01_address: 10.167.4.95
+ openstack_benchmark_node01_hostname: bmk01
openstack_compute_count: '2'
openstack_compute_rack01_hostname: cmp
openstack_compute_rack01_single_subnet: 10.167.4
openstack_compute_rack01_tenant_subnet: 10.167.6
- openstack_compute_node01_hostname: cmp001
- openstack_compute_node02_hostname: cmp002
+ openstack_compute_node01_hostname: cmp01
+ openstack_compute_node02_hostname: cmp02
openstack_compute_node01_address: 10.167.4.3
openstack_compute_node02_address: 10.167.4.31
openstack_compute_node01_single_address: 10.167.4.3
openstack_compute_node02_single_address: 10.167.4.31
- openstack_compute_node01_deploy_address: 172.16.49.3
- openstack_compute_node02_deploy_address: 172.16.49.31
+ openstack_compute_node01_deploy_address: 172.16.164.3
+ openstack_compute_node02_deploy_address: 172.16.164.31
openstack_control_address: 10.167.4.10
openstack_control_hostname: ctl
openstack_control_node01_address: 10.167.4.11
@@ -76,10 +89,10 @@
openstack_enabled: 'True'
openstack_gateway_node01_address: 10.167.4.224
openstack_gateway_node01_hostname: gtw01
- openstack_gateway_node01_tenant_address: 10.167.6.6
+ openstack_gateway_node01_tenant_address: 10.167.6.4
openstack_gateway_node02_address: 10.167.4.225
openstack_gateway_node02_hostname: gtw02
- openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node02_tenant_address: 10.167.6.5
openstack_message_queue_address: 10.167.4.40
openstack_message_queue_hostname: msg
openstack_message_queue_node01_address: 10.167.4.41
@@ -140,39 +153,43 @@
backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
salt_master_address: 10.167.4.2
salt_master_hostname: cfg01
- salt_master_management_address: 172.16.49.2
+ salt_master_management_address: 172.16.164.2
shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
- stacklight_enabled: 'True'
- stacklight_log_address: 10.167.8.60
+ stacklight_enabled: 'False'
+ fluentd_enabled: 'False'
+ stacklight_log_address: 10.167.4.60
stacklight_log_hostname: log
- stacklight_log_node01_address: 10.167.8.61
+ stacklight_log_node01_address: 10.167.4.61
stacklight_log_node01_hostname: log01
- stacklight_log_node02_address: 10.167.8.62
+ stacklight_log_node02_address: 10.167.4.62
stacklight_log_node02_hostname: log02
- stacklight_log_node03_address: 10.167.8.63
+ stacklight_log_node03_address: 10.167.4.63
stacklight_log_node03_hostname: log03
- stacklight_monitor_address: 10.167.8.70
+ stacklight_monitor_address: 10.167.4.70
stacklight_monitor_hostname: mon
- stacklight_monitor_node01_address: 10.167.8.71
+ stacklight_monitor_node01_address: 10.167.4.71
stacklight_monitor_node01_hostname: mon01
- stacklight_monitor_node02_address: 10.167.8.72
+ stacklight_monitor_node02_address: 10.167.4.72
stacklight_monitor_node02_hostname: mon02
- stacklight_monitor_node03_address: 10.167.8.73
+ stacklight_monitor_node03_address: 10.167.4.73
stacklight_monitor_node03_hostname: mon03
stacklight_notification_address: alerts@localhost
stacklight_notification_smtp_host: 127.0.0.1
- stacklight_telemetry_address: 10.167.8.85
+ stacklight_telemetry_address: 10.167.4.85
stacklight_telemetry_hostname: mtr
- stacklight_telemetry_node01_address: 10.167.8.86
+ stacklight_telemetry_node01_address: 10.167.4.86
stacklight_telemetry_node01_hostname: mtr01
- stacklight_telemetry_node02_address: 10.167.8.87
+ stacklight_telemetry_node02_address: 10.167.4.87
stacklight_telemetry_node02_hostname: mtr02
- stacklight_telemetry_node03_address: 10.167.8.88
+ stacklight_telemetry_node03_address: 10.167.4.88
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
- fluentd_enabled: 'True'
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
- tenant_vlan: '2417'
+ stacklight_long_term_storage_type: prometheus
+ prometheus_relay_bind_port: 9094
+ tenant_vlan: '2406'
upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'True'
+ vnf_onboarding_enabled: 'False'
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index a1bd850..a70b9f3 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -7,6 +7,8 @@
interfaces:
ens3:
role: single_dhcp
+ ens4:
+ role: single_static_ctl
# Physical nodes
kvm01.cookied-bm-mcp-dvr-vxlan.local:
@@ -42,7 +44,7 @@
enp9s0f1:
role: bond0_ab_ovs_vlan_ctl
- cmp001.cookied-bm-mcp-dvr-vxlan.local:
+ cmp01.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_compute_node01
roles:
- openstack_compute
@@ -50,14 +52,14 @@
- linux_system_codename_xenial
interfaces:
enp9s0f0:
- role: single_dhcp
+ role: single_mgm_dhcp
enp9s0f1:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- single_address: ${_param:openstack_compute_node01_control_address}
- tenant_address: ${_param:openstack_compute_node01_tenant_address}
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ single_address: 10.167.4.105
+ tenant_address: 10.167.6.105
- cmp002.cookied-bm-mcp-dvr-vxlan.local:
+ cmp02.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_compute_node02
roles:
- openstack_compute
@@ -65,12 +67,11 @@
- linux_system_codename_xenial
interfaces:
enp9s0f0:
- role: single_dhcp
+ role: single_mgm_dhcp
enp9s0f1:
- role: bond0_ab_ovs_vxlan_ctl_mesh
- single_address: ${_param:openstack_compute_node02_control_address}
- tenant_address: ${_param:openstack_compute_node02_tenant_address}
-
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ single_address: 10.167.4.105
+ tenant_address: 10.167.6.105
gtw01.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_gateway_node01
@@ -79,7 +80,7 @@
- linux_system_codename_xenial
interfaces:
enp2s0f0:
- role: single_dhcp
+ role: single_mgm_dhcp
enp2s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
@@ -90,6 +91,6 @@
- linux_system_codename_xenial
interfaces:
enp2s0f0:
- role: single_dhcp
+ role: single_mgm_dhcp
enp2s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
index 7435fc8..37d0b14 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-vcp-environment.yaml
@@ -14,7 +14,6 @@
reclass_storage_name: openstack_control_node02
roles:
- openstack_control
- - features_designate
- linux_system_codename_xenial
interfaces:
ens2:
@@ -37,9 +36,10 @@
reclass_storage_name: openstack_database_node01
roles:
- openstack_database_leader
- - features_designate_database
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -49,6 +49,8 @@
- openstack_database
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -56,9 +58,10 @@
reclass_storage_name: openstack_database_node03
roles:
- openstack_database
- - features_designate_database
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -68,6 +71,8 @@
- openstack_message_queue
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -77,6 +82,8 @@
- openstack_message_queue
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -86,6 +93,8 @@
- openstack_message_queue
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -93,9 +102,10 @@
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- - features_designate_proxy
- linux_system_codename_xenial
interfaces:
+ ens2:
+ role: single_dhcp
ens3:
role: single_ctl
@@ -103,35 +113,63 @@
reclass_storage_name: openstack_proxy_node02
roles:
- openstack_proxy
- - features_designate_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens2:
+ role: single_dhcp
+ ens3:
+ role: single_ctl
+
+ cid01.cookied-bm-mcp-dvr-vxlan.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
- linux_system_codename_xenial
interfaces:
ens3:
role: single_ctl
- mtr01.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node01
+ cid02.cookied-bm-mcp-dvr-vxlan.local:
+ reclass_storage_name: cicd_control_node02
roles:
- - stacklight_telemetry
+ - cicd_control_manager
- linux_system_codename_xenial
interfaces:
ens3:
role: single_ctl
- mtr02.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node02
+ cid03.cookied-bm-mcp-dvr-vxlan.local:
+ reclass_storage_name: cicd_control_node03
roles:
- - stacklight_telemetry
+ - cicd_control_manager
- linux_system_codename_xenial
interfaces:
ens3:
role: single_ctl
- mtr03.cookied-bm-mcp-dvr-vxlan.local:
- reclass_storage_name: stacklight_telemetry_node03
- roles:
- - stacklight_telemetry
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
+# mon01.cookied-bm-mcp-dvr-vxlan.local:
+# reclass_storage_name: stacklight_server_node01
+# roles:
+# - stacklightv2_server_leader
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+#
+# mon02.cookied-bm-mcp-dvr-vxlan.local:
+# reclass_storage_name: stacklight_server_node02
+# roles:
+# - stacklightv2_server
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
+#
+# mon03.cookied-bm-mcp-dvr-vxlan.local:
+# reclass_storage_name: stacklight_server_node03
+# roles:
+# - stacklightv2_server
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 9e176ab..89895ea 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -6,51 +6,14 @@
{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_KVM03 with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
-# Other salt model repository parameters see in shared-salt.yaml
-# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
-# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
-
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
-{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
-
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
-{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-
-- description: "Workaround for rack01 compute generator"
- cmd: |
- set -e;
- # Remove rack01 key
- . /root/venv-reclass-tools/bin/activate;
- reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
-
- # Add openstack_compute_node definition from system
- reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml --merge;
-
- # Set ipaddresses for our nodes
- reclass-tools add-key parameters._param.openstack_compute_node01_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_control_address {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.3 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address {{ SHARED.IPV4_NET_TENANT_PREFIX }}.31 /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/config.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd" "runtest" "auditd" ') }}
@@ -58,23 +21,21 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
-
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
-
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
-- description: "Lab03 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+- description: "WR for changing image to proposed"
cmd: |
set -e;
- set -x;
- KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
- apt-get install -y sshuttle;
- sshuttle -r ${KVM01_DEPLOY_ADDRESS} {{ SHARED.IPV4_NET_CONTROL }} -D >/dev/null;
+ apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
+ [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
+ . /root/venv-reclass-tools/bin/activate;
+ pip install git+https://github.com/dis-xcom/reclass-tools;
+ reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-dvr-vxlan/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 1, delay: 10}
skip_fail: false
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml;
@@ -83,16 +44,35 @@
retry: {count: 1, delay: 5}
skip_fail: true
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
########################################
# Spin up Control Plane VMs on KVM nodes
########################################
+- description: Hack resolv.conf on nodes for internal services access
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Execute 'libvirt' states to create necessary libvirt networks
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls libvirt
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Create VMs for control plane
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'kvm*' state.sls salt.control
node_name: {{ HOSTNAME_CFG01 }}
@@ -131,6 +111,15 @@
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+- description: Add cpm nodes to /etc/hosts
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.105 cmp01.cookied-bm-mcp-dvr-vxlan.local cmp01' >> /etc/hosts";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.4.106 cmp02.cookied-bm-mcp-dvr-vxlan.local cmp02' >> /etc/hosts";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
deleted file mode 100644
index f4b05d0..0000000
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ /dev/null
@@ -1,193 +0,0 @@
-{% from 'cookied-bm-mcp-dvr-vxlan/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
-# Install docker swarm.
-- description: Configure docker service
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install docker swarm on master node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Send grains to the swarm slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Refresh modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Rerun swarm on slaves to proper token population
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure slave nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: List registered Docker swarm nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install keepalived on mon nodes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'mon*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check the VIP on StackLight mon nodes
- cmd: |
- SL_VIP=$(salt --out=newline_values_only "mon01*" pillar.get _param:cluster_vip_address);
- echo "_param:cluster_vip_address (vip): ${SL_VIP}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-# Install slv2 infra
-- description: Install Mongo if target matches
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install telegraf
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure fluentd
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Install kibana client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Check influix db
- cmd: |
- INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
- if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Install prometheus alertmanager
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: run docker state
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 120}
- skip_fail: false
-
-- description: docker ps
- cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 60; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index 12016f5..3f4f128 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
expire: False
bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Block access to SSH while node is preparing
+ #- cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -38,7 +38,6 @@
- sudo ifdown ens3
- sudo ip r d default || true # remove existing default route to get it from dhcp
- sudo ifup ens3
- #- sudo route add default gw {gateway} {interface_name}
# Create swap
- fallocate -l 4G /swapfile
@@ -49,43 +48,21 @@
############## TCP Cloud cfg01 node ##################
#- sleep 120
- # - echo "Preparing base OS"
+ - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - apt-get update
-
- # # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # ########################################################
- # # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- # ########################################################
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
write_files:
- - path: /etc/default/grub.d/97-enable-grub-menu.cfg
- content: |
- GRUB_RECORDFAIL_TIMEOUT=30
- GRUB_TIMEOUT=3
- GRUB_TIMEOUT_STYLE=menu
-
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
index ca4b062..983a026 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -52,9 +52,10 @@
#- sleep 120
# - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
# - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- # # Configure Ubuntu mirrors
+ # Configure Ubuntu mirrors
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
# - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
@@ -67,13 +68,13 @@
# - apt-get clean
# - eatmydata apt-get update && apt-get -y upgrade
- # # Install common packages
+ # Install common packages
# - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
- # # Install salt-minion and stop it until it is configured
+ # Install salt-minion and stop it until it is configured
# - eatmydata apt-get install -y salt-minion && service salt-minion stop
- # # Install latest kernel
+ # Install latest kernel
# - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
########################################################
@@ -96,4 +97,4 @@
auto lo
iface lo inet loopback
auto {interface_name}
- iface {interface_name} inet dhcp
\ No newline at end of file
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index 7985929..9168b7f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # # Block access to SSH while node is preparing
- # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -38,7 +36,6 @@
# Prepare network connection
- sudo ifup {interface_name}
- #- sudo route add default gw {gateway} {interface_name}
# Create swap
- fallocate -l 4G /swapfile
@@ -46,38 +43,7 @@
- mkswap /swapfile
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- # - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # # Configure Ubuntu mirrors
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- # - apt-get clean
- # - eatmydata apt-get update && apt-get -y upgrade
-
- # # Install common packages
- # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # # Install salt-minion and stop it until it is configured
- # - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- # - echo "Allow SSH access ..."
- # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index ab28e9b..25c98bc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -6,19 +6,19 @@
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
-{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
-{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
-{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
-{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.3') %}
-{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.31') %}
-{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.164.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.4.253') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.164.11') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.164.12') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.164.13') %}
+{% set ETH0_IP_ADDRESS_CMP01 = os_env('ETH0_IP_ADDRESS_CMP01', '172.16.164.3') %}
+{% set ETH0_IP_ADDRESS_CMP02 = os_env('ETH0_IP_ADDRESS_CMP02', '172.16.164.31') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.164.4') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.164.5') %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
@@ -39,7 +39,7 @@
address_pools:
admin-pool01:
- net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.49.0/26:26') }}
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '172.16.164.0/26:26') }}
params:
ip_reserved:
gateway: +62
@@ -48,8 +48,8 @@
default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
- default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_CMP01 }}: {{ ETH0_IP_ADDRESS_CMP01 }}
+ default_{{ HOSTNAME_CMP02 }}: {{ ETH0_IP_ADDRESS_CMP02 }}
default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
ip_ranges:
@@ -58,6 +58,7 @@
net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.4.0/24:24') }}
params:
ip_reserved:
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
gateway: +1
l2_network_device: +1
@@ -69,7 +70,7 @@
l2_network_device: +1
external-pool01:
- net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
params:
ip_reserved:
gateway: +1
@@ -99,6 +100,9 @@
dhcp: false
parent_iface:
phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+ private:
+ parent_iface:
+ phys_dev: !os_env CONTROL_IFACE
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
@@ -137,16 +141,16 @@
l2_network_device: admin
interface_model: *interface_model
mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
network_config:
ens3:
networks:
- admin
- #ens4:
- # networks:
- # - private
+ ens4:
+ networks:
+ - private
- name: default
@@ -307,13 +311,13 @@
- enp9s0f1
- - name: {{ HOSTNAME_CMP001 }}
+ - name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
ipmi_user: !os_env IPMI_USER
ipmi_password: !os_env IPMI_PASSWORD
ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP001 # hostname or IP address
+ ipmi_host: !os_env IPMI_HOST_CMP01 # hostname or IP address
ipmi_lan_interface: lanplus
ipmi_port: 623
@@ -339,16 +343,9 @@
interfaces:
- label: enp9s0f0
l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP01
- label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
-# - label: enp9s0f0
-# mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
-# - label: enp5s0f1
-# mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
-# - label: enp5s0f2
-# mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
-
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP01
network_config:
enp9s0f0:
networks:
@@ -363,13 +360,13 @@
- - name: {{ HOSTNAME_CMP002 }}
+ - name: {{ HOSTNAME_CMP02 }}
role: salt_minion
params:
ipmi_user: !os_env IPMI_USER
ipmi_password: !os_env IPMI_PASSWORD
ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CMP002 # hostname or IP address
+ ipmi_host: !os_env IPMI_HOST_CMP02 # hostname or IP address
ipmi_lan_interface: lanplus
ipmi_port: 623
@@ -395,15 +392,9 @@
interfaces:
- label: enp9s0f0
l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
+ mac_address: !os_env ETH0_MAC_ADDRESS_CMP02
- label: enp9s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
-# - label: eth3
-# mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
-# - label: eth2
-# mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
-# - label: eth4
-# mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
+ mac_address: !os_env ETH1_MAC_ADDRESS_CMP02
network_config:
enp9s0f0:
networks:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
index bcb3ec3..7bf4d2e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
@@ -45,6 +45,7 @@
cmp001.cookied-bm-mcp-ocata-contrail.local:
reclass_storage_name: openstack_compute_node01
roles:
+ - openstack_compute
- features_lvm_backend
- linux_system_codename_xenial
interfaces:
@@ -61,6 +62,7 @@
cmp002.cookied-bm-mcp-ocata-contrail.local:
reclass_storage_name: openstack_compute_node02
roles:
+ - openstack_compute
- features_lvm_backend
- linux_system_codename_xenial
interfaces:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 742a607..a66ea71 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -94,7 +94,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:volume' state.sls cinder
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check cinder list
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index 731548c..54d1298 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -1,9 +1,8 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import LAB_CONFIG_NAME with context %}
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import DOMAIN_NAME with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM01 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM02 with context %}
-{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_KVM03 with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_TRUSTY_IMAGE_URL with context %}
+{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import CUSTOM_VCP_XENIAL_IMAGE_URL with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
@@ -31,8 +30,6 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
-
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
@@ -45,38 +42,31 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "WR for changing image to proposed"
+{%- if CUSTOM_VCP_TRUSTY_IMAGE_URL != '' %}
+
+- description: "Change trusty image to custom"
cmd: |
- set -e;
- # Add message_queu host for opencontrail
+ echo "CUSTOM_TRUSTY_IMAGE is {{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}";
. /root/venv-reclass-tools/bin/activate;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ocata-contrail/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ocata-contrail/infra/init.yml;
+ reclass-tools add-key parameters._param.salt_control_trusty_image "{{ CUSTOM_VCP_TRUSTY_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+{%- endif %}
-{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+{%- if CUSTOM_VCP_XENIAL_IMAGE_URL != '' %}
-
-- description: "Workaround for PROD-14060"
+- description: "Change xenial image to custom"
cmd: |
- set -e;
- # Add tenant and single addresses for computes
- salt-call reclass.cluster_meta_set deploy_address 172.16.49.73 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
-
- salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
- salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
-
+ echo "CUSTOM_XENIAL_IMAGE is {{ CUSTOM_VCP_XENIAL_IMAGE_URL }}";
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools add-key parameters._param.salt_control_xenial_image "{{ CUSTOM_VCP_XENIAL_IMAGE_URL }}" /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/init.yml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endif %}
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
@@ -93,7 +83,33 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+- description: Update minion information
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_grains &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' mine.update &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar && sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun openssh after env model is generated
+ cmd: |
+ salt-call state.sls openssh
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Execute linux.network.host one more time after salt.minion to apply dynamically registered hosts on the cluster nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.network.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
########################################
@@ -126,7 +142,7 @@
- description: Hack resolv.conf on VCP nodes for internal services access
cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not kvm* and not cmp* and not gtw* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
+ salt --hard-crash --state-output=mixed --state-verbose=False -C '* and not cfg*' cmd.run "echo 'nameserver 172.18.208.44' > /etc/resolv.conf;"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -154,3 +170,25 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Lab04 workaround: Give each node root acces with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: "Lab04 workaround: Control network access from cfg01 node using sshuttle via kvm01"
+ cmd: |
+ set -e;
+ set -x;
+ KVM01_DEPLOY_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:infra_kvm_node01_deploy_address);
+ apt-get install -y sshuttle;
+ sshuttle -r ${KVM01_DEPLOY_ADDRESS} 10.167.8.0/24 -D >/dev/null;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 4045fe8..925c795 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -73,7 +73,7 @@
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 30}
skip_fail: false
- description: Configure Alerta if it is exists
@@ -112,6 +112,15 @@
retry: {count: 1, delay: 10}
skip_fail: false
+- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: Install elasticsearch server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
@@ -253,5 +262,5 @@
- description: Run salt minion to create cert files
cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 3, delay: 15}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index f50f0b6..d520d62 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -1,5 +1,7 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set CUSTOM_VCP_TRUSTY_IMAGE_URL = os_env('CUSTOM_VCP_TRUSTY_IMAGE_URL', '') %}
+{% set CUSTOM_VCP_XENIAL_IMAGE_URL = os_env('CUSTOM_VCP_XENIAL_IMAGE_URL', '') %}
#{# set DOMAIN_NAME = os_env('LAB_CONFIG_NAME', 'physical_mcp11_ovs_dpdk') + '.local' #}
{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-bm-mcp-ocata-contrail') %}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index b8c6bd8..b0f75c4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -4,6 +4,8 @@
{% from 'cookied-bm-mcp-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
# Install OpenStack control services
@@ -226,4 +228,13 @@
'. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
+
+- description: temp WR
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ifdown br-prv; ifup br-prv'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 64713fe..802c2ee 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -7,6 +7,8 @@
interfaces:
ens3:
role: single_dhcp
+ ens4:
+ role: single_static_ctl
# Physical nodes
kvm01.cookied-bm-mcp-ovs-dpdk.local:
@@ -60,6 +62,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ enp5s0f1:
+ role: sriov
cmp02.cookied-bm-mcp-ovs-dpdk.local:
reclass_storage_name: openstack_compute_node02
@@ -79,6 +83,8 @@
enp3s0f1:
role: bond_dpdk_prv_lacp
dpdk_pci: "0000:03:00.1"
+ enp5s0f1:
+ role: sriov
gtw01.cookied-bm-mcp-ovs-dpdk.local:
reclass_storage_name: openstack_gateway_node01
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index 6293886..cfe1145 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -14,25 +14,12 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "auditd"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
-- description: "WR for changing image to proposed"
- cmd: |
- set -e;
- apt-get -y install python-virtualenv python-pip build-essential python-dev libssl-dev;
- [[ -d /root/venv-reclass-tools ]] || virtualenv /root/venv-reclass-tools;
- . /root/venv-reclass-tools/bin/activate;
- pip install git+https://github.com/dis-xcom/reclass-tools;
- reclass-tools add-key parameters._param.salt_control_xenial_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-16-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ovs-dpdk/infra/init.yml;
- reclass-tools add-key parameters._param.salt_control_trusty_image 'http://ci.mcp.mirantis.net:8085/images/ubuntu-14-04-x64-mcpproposed.qcow2' /srv/salt/reclass/classes/cluster/cookied-bm-mcp-ovs-dpdk/infra/init.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
node_name: {{ HOSTNAME_CFG01 }}
@@ -56,21 +43,13 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: "Workaround for PROD-18834: Pre-install linux-headers package"
- cmd: salt 'cmp*' cmd.run "apt-get install -y linux-headers-$(uname -r)";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: "Workaround for PROD-17975: Pre-install ovs packages to update alternatives to DPDK"
+- description: WR for correct acces to git repo from jenkins on cfg01 node
cmd: |
- set -ex;
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux.system.repo
- salt 'cmp*' cmd.run "apt-get install -y openvswitch-switch openvswitch-switch-dpdk";
- salt 'cmp*' cmd.run "service openvswitch-switch stop";
- salt 'cmp*' cmd.run "rm -f /var/lib/openvswitch/*";
- salt 'cmp*' cmd.run "update-alternatives --remove ovs-vswitchd /usr/lib/openvswitch-switch/ovs-vswitchd";
- salt 'cmp*' cmd.run "service openvswitch-switch start";
+ git clone --mirror https://github.com/Mirantis/mk-pipelines.git /home/repo/mk/mk-pipelines/;
+ git clone --mirror https://github.com/Mirantis/pipeline-library.git /home/repo/mcp-ci/pipeline-library/;
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
+
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -133,8 +112,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
-
- description: Add cpm nodes to /etc/hosts
cmd: |
salt --hard-crash --state-output=mixed --state-verbose=False -C '*' cmd.run "echo '10.167.11.105 cmp01.cookied-bm-mcp-ovs-dpdk.local cmp01' >> /etc/hosts";
@@ -143,23 +120,16 @@
retry: {count: 1, delay: 10}
skip_fail: true
-- description: Enable sriov interfaces
- cmd: |
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'cmp*' cmd.run "echo 7 > /sys/class/net/enp5s0f1/device/sriov_numvfs"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
-- description: Temporary WR
- cmd: |
- ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub;
- pub_key=`cat /root/.ssh/id_rsa.pub`;
- salt '*' cmd.run "echo $pub_key >> /root/.ssh/authorized_keys";
- salt '*' cmd.run "service sshd restart";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
-
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: "Give each node root access with key from cfg01"
+ cmd: |
+ set -e;
+ set -x;
+ key=$(ssh-keygen -y -f /root/.ssh/id_rsa);
+ salt '*' cmd.run "echo $key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index ae10126..23eb24c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -11,6 +11,7 @@
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH2_IP_ADDRESS_CFG01', '10.167.11.253') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.13') %}
@@ -57,6 +58,7 @@
net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.167.11.0/24:24') }}
params:
ip_reserved:
+ virtual_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
gateway: +1
l2_network_device: +1
@@ -98,6 +100,9 @@
dhcp: false
parent_iface:
phys_dev: !os_env IRONIC_LAB_PXE_IFACE_0
+ private:
+ parent_iface:
+ phys_dev: !os_env CONTROL_IFACE
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
@@ -136,16 +141,16 @@
l2_network_device: admin
interface_model: *interface_model
mac_address: !os_env ETH1_MAC_ADDRESS_CFG01
- #- label: ens4
- # l2_network_device: private
- # interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
network_config:
ens3:
networks:
- admin
- #ens4:
- # networks:
- # - private
+ ens4:
+ networks:
+ - private
- name: default
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
new file mode 100644
index 0000000..1cedd52
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-dpdk-pipeline.yaml
@@ -0,0 +1,60 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-baremetal-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set LAB_CONFIG_NAME = 'cookied-bm-dpdk-pipeline' %}
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-dpdk-pipeline') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/" + SALT_VERSION + REPOSITORY_SUITE + " main") %}
+{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Remove rack01 key
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
index d27b6ae..dc4d2bc 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -2,73 +2,38 @@
{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-dvr-vxlan' %}
+{% set SALT_VERSION = os_env('SALT_VERSION', '2017.7') %}
# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
# Path to the context files used to render Cluster and Environment models
-{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
-{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
-{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
-{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
+{% set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
+{% set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
+{% set CONTROL_VLAN = os_env('CONTROL_VLAN', '2403') %}
+{% set TENANT_VLAN = os_env('TENANT_VLAN', '2406') %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'proposed') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-- description: Re-install all the fromulas
- cmd: |
- set -e;
- apt-get install -y salt-formula-*
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Sync formulas to service
- cmd: |
- set -e;
- RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
- FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
- [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
- for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
- #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
- formula_service=${formula_service//-/$'_'};
- if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
- ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
- fi;
- done
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-- description: Temporary WR for cinder backend defined by default in reclass.system
- cmd: |
- sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
. /root/venv-reclass-tools/bin/activate;
# Remove rack01 key
reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
# Add openstack_compute_node definition from system
reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
-
- # Set ipaddresses for our nodes
- reclass-tools add-key parameters._param.openstack_compute_node01_control_address 10.167.4.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_control_address 10.167.4.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.6.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
- reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.6.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
-
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
- description: Temporary workaround for removing cinder-volume from CTL nodes
cmd: |
sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
@@ -77,13 +42,15 @@
retry: {count: 1, delay: 5}
skip_fail: true
-#- description: Refresh pillars and install dependencies for salt-master
-# cmd: |
-# salt '*' saltutil.refresh_pillar; sleep 5;
-# salt '*' state.sls salt.master;
-# salt '*' saltutil.refresh_pillar; sleep 5;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 125b6e1..95b6442 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -17,37 +17,14 @@
{# set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") #}
{% import 'shared-salt.yaml' as SHARED with context %}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
-
-- description: Re-install all the fromulas
- cmd: |
- set -e;
- apt-get install -y salt-formula-*
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
-- description: Sync formulas to service
- cmd: |
- set -e;
- RECLASS_ROOT=${RECLASS_ROOT:-/srv/salt/reclass/};
- FORMULAS_PATH=${FORMULAS_PATH:-/usr/share/salt-formulas};
- [ ! -d ${RECLASS_ROOT}/classes/service ] && mkdir -p ${RECLASS_ROOT}/classes/service;
- for formula_service in $(ls /usr/share/salt-formulas/reclass/service/); do
- #Since some salt formula names contain "-" and in symlinks they should contain "_" adding replacement;
- formula_service=${formula_service//-/$'_'};
- if [ ! -L "${RECLASS_ROOT}/classes/service/${formula_service}" ]; then
- ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} ${RECLASS_ROOT}/classes/service/${formula_service};
- fi;
- done
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
@@ -69,5 +46,15 @@
retry: {count: 1, delay: 5}
skip_fail: true
+- description: Temporary WR for correct bridge name according to envoronment templates
+ cmd: |
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+ sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
+ salt '*' saltutil.refresh_pillar;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index 98e2784..653b461 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -331,13 +331,6 @@
retry: {count: 3, delay: 5}
skip_fail: false
-- description: Create manila type
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Create CIFS and NFS share and check it status
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index 7417c09..bf1ab55 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -316,13 +316,6 @@
retry: {count: 3, delay: 15}
skip_fail: false
-- description: Create manila type
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Create CIFS and NFS share and check it status
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index abed769..4d64fe4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -310,13 +310,6 @@
retry: {count: 3, delay: 15}
skip_fail: false
-- description: Create manila type
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Create CIFS and NFS share and check it status
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index e858bcd..0b9e418 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -329,13 +329,6 @@
retry: {count: 3, delay: 15}
skip_fail: false
-- description: Create manila type
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Create CIFS and NFS share and check it status
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
index 0436506..ba1899c 100644
--- a/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-offline-ssl/openstack.yaml
@@ -274,13 +274,6 @@
retry: {count: 3, delay: 5}
skip_fail: false
-- description: Create manila type
- cmd: |
- salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Create CIFS and NFS share and check it status
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila create CIFS 1 --share-type=default';
diff --git a/tcp_tests/tests/system/test_pipeline_deploy.py b/tcp_tests/tests/system/test_pipeline_deploy.py
index 81e5e8b..1d2e79f 100644
--- a/tcp_tests/tests/system/test_pipeline_deploy.py
+++ b/tcp_tests/tests/system/test_pipeline_deploy.py
@@ -16,6 +16,7 @@
from tcp_tests.managers.jenkins.client import JenkinsClient
from tcp_tests import logger
+from tcp_tests import settings
LOG = logger.logger
@@ -73,4 +74,40 @@
build_id=build[1])['result']
assert result == 'SUCCESS', "Deploy CICD was failed"
+ @pytest.mark.fail_snapshot
+ def test_pipeline_dpdk(self, show_step, underlay,
+ salt_deployed, tempest_actions):
+ """Deploy bm via pipeline
+
+ Scenario:
+ 1. Prepare salt on hosts.
+ .........................
+ """
+ nodes = underlay.node_names()
+ LOG.info("Nodes - {}".format(nodes))
+ cfg_node = 'cfg01.cookied-bm-mcp-ovs-dpdk.local'
+ salt_api = salt_deployed.get_pillar(
+ cfg_node, '_param:jenkins_salt_api_url')
+ salt_api = salt_api[0].get(cfg_node)
+ jenkins = JenkinsClient(
+ host='http://172.16.49.2:8081',
+ username='admin',
+ password='r00tme')
+
+ # Creating param list for openstack deploy
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = salt_api
+ params['STACK_INSTALL'] = 'core,kvm,cicd,ovs,openstack'
+ show_step(4)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', "Deploy openstack was failed"
+
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
LOG.info("*************** DONE **************")