Merge "[mitaka,newton,ocata] Remove cinder-volume from ctl, and add it to cmp nodes"
diff --git a/jobs/pipelines/deploy-cicd-and-run-tests.groovy b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
index 3e96c84..55dda48 100644
--- a/jobs/pipelines/deploy-cicd-and-run-tests.groovy
+++ b/jobs/pipelines/deploy-cicd-and-run-tests.groovy
@@ -32,21 +32,19 @@
currentBuild.result = 'SUCCESS'
} catch (e) {
- common.printMsg("Deploy is failed: " + e.message , "red")
+ common.printMsg("Deploy is failed: " + e.message , "purple")
+ report_text = e.message
+ def snapshot_name = "deploy_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} deploy_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
- report_text = e.message
+ shared.devops_snapshot_info(snapshot_name)
throw e
} finally {
shared.create_deploy_result_report(steps, currentBuild.result, report_text)
@@ -60,22 +58,19 @@
}
} catch (e) {
- common.printMsg("Tests are failed: " + e.message, "red")
+ common.printMsg("Tests are failed: " + e.message, "purple")
+ def snapshot_name = "tests_failed"
shared.run_cmd("""\
dos.py suspend ${ENV_NAME} || true
- dos.py snapshot ${ENV_NAME} tests_failed || true
+ dos.py snapshot ${ENV_NAME} ${snapshot_name} || true
""")
- throw e
- } finally {
if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
shared.run_cmd("""\
dos.py resume ${ENV_NAME} || true
""")
- } else {
- shared.run_cmd("""\
- dos.py destroy ${ENV_NAME} || true
- """)
}
+ shared.devops_snapshot_info(snapshot_name)
+ throw e
}
}
@@ -83,12 +78,21 @@
throttle(['fuel_devops_environment']) {
node ("${NODE_NAME}") {
try {
+ // run deploy stages
deploy(shared, common, steps)
+ // run test stages
test(shared, common, steps)
} catch (e) {
- common.printMsg("Job is failed: " + e.message, "red")
+ common.printMsg("Job is failed: " + e.message, "purple")
throw e
} finally {
+ // shutdown the environment if required
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "true") {
+ shared.run_cmd("""\
+ dos.py destroy ${ENV_NAME} || true
+ """)
+ }
+ // report results to testrail
shared.swarm_testrail_report(steps)
}
}
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index ce32c24..64c8783 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -103,7 +103,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 538f5ea..5ace2ca 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -66,7 +66,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 78e363f..9a6b1d1 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -66,7 +66,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 5d7bd8d..0dd2d7a 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -69,13 +69,23 @@
py.test --junit-xml=nosetests.xml ${RUN_TEST_OPTS}
- dos.py suspend ${ENV_NAME}
- dos.py snapshot ${ENV_NAME} test_completed
""")
+
+ def snapshot_name = "test_completed"
+ shared.run_cmd("""\
+ dos.py suspend ${ENV_NAME}
+ dos.py snapshot ${ENV_NAME} ${snapshot_name}
+ """)
+ if ("${env.SHUTDOWN_ENV_ON_TEARDOWN}" == "false") {
+ shared.run_cmd("""\
+ dos.py resume ${ENV_NAME}
+ """)
+ }
+ shared.devops_snapshot_info(snapshot_name)
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 3da7c04..c43b3bb 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -133,7 +133,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed", "purple")
throw e
} finally {
// reporting is failed for some reason
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 7f002ed..dcf05da 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -55,7 +55,7 @@
if (job_info.getResult() != "SUCCESS") {
currentBuild.result = job_info.getResult()
def build_number = job_info.getNumber()
- common.printMsg("Job '${job_name}' failed, getting details", "red")
+ common.printMsg("Job '${job_name}' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
export JOB_NAME=${job_name}
export BUILD_NUMBER=${build_number}
@@ -83,7 +83,7 @@
def job_url = "${build_url}"
currentBuild.result = build_status
if (junit_report_filename) {
- common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "red")
+ common.printMsg("Job '${job_url}' failed with status ${build_status}, getting details", "purple")
step($class: 'hudson.plugins.copyartifact.CopyArtifact',
projectName: job_name,
selector: specific("${build_number}"),
@@ -94,10 +94,8 @@
def String junit_report_xml = readFile("${junit_report_filename}")
def String junit_report_xml_pretty = new XmlUtil().serialize(junit_report_xml)
- // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
- def String junit_report_xml_filtered = junit_report_xml_pretty.replaceAll("<","<").replaceAll(">", ">")
def String msg = "Job '${job_url}' failed with status ${build_status}, JUnit report:\n"
- throw new Exception(msg + junit_report_xml_filtered)
+ throw new Exception(msg + junit_report_xml_pretty)
} else {
throw new Exception("Job '${job_url}' failed with status ${build_status}, please check the console output.")
}
@@ -340,7 +338,7 @@
""")
} catch (e) {
def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_day01
@@ -371,7 +369,7 @@
""")
} catch (e) {
def common = new com.mirantis.mk.Common()
- common.printMsg("Product job 'deploy_openstack' failed, getting details", "red")
+ common.printMsg("Product job 'deploy_openstack' failed, getting details", "purple")
def workflow_details=run_cmd_stdout("""\
. ./tcp_tests/utils/env_salt
. ./tcp_tests/utils/env_jenkins_cicd
@@ -398,6 +396,31 @@
}
}
+def devops_snapshot_info(snapshot_name) {
+ // Print helper message after snapshot
+ def common = new com.mirantis.mk.Common()
+
+ def SALT_MASTER_IP=run_cmd_stdout("""\
+ . ./tcp_tests/utils/env_salt
+ echo \$SALT_MASTER_IP
+ """).trim().split().last()
+ def login = "root" // set fixed 'root' login for now
+ def password = "r00tme" // set fixed 'root' login for now
+ def key_file = "${env.WORKSPACE}/id_rsa" // set fixed path in the WORKSPACE
+ def VENV_PATH='/home/jenkins/fuel-devops30'
+
+ common.printMsg("""\
+#########################
+# To revert the snapshot:
+#########################
+. ${VENV_PATH}/bin/activate;
+dos.py revert ${ENV_NAME} ${snapshot_name};
+dos.py resume ${ENV_NAME};
+# dos.py time-sync ${ENV_NAME}; # Optional\n
+ssh -i ${key_file} ${login}@${SALT_MASTER_IP} # Optional password: ${password}
+""", "cyan")
+}
+
def devops_snapshot(stack) {
// Make the snapshot with name "${stack}_deployed"
// for all VMs in the environment.
@@ -418,6 +441,7 @@
cp \$(pwd)/${ENV_NAME}_salt_deployed.ini \$(pwd)/${ENV_NAME}_${stack}_deployed.ini
fi
""")
+ devops_snapshot_info("${stack}_deployed")
}
def get_steps_list(steps) {
@@ -429,11 +453,15 @@
// <filename> is name of the XML report file that will be created
// <status> is one of the 'success', 'skipped', 'failure' or 'error'
// 'error' status is assumed as 'Blocker' in TestRail reporter
+
+ // Replace '<' and '>' to '<' and '>' to avoid conflicts between xml tags in the message and JUnit report
+ def String text_filtered = text.replaceAll("<","<").replaceAll(">", ">")
+
def script = """\
<?xml version=\"1.0\" encoding=\"utf-8\"?>
<testsuite>
<testcase classname=\"${classname}\" name=\"${name}\" time=\"0\">
- <${status} message=\"${status_message}\">${text}</${status}>
+ <${status} message=\"${status_message}\">${text_filtered}</${status}>
<system-out>${stdout}</system-out>
<system-err>${stderr}</system-err>
</testcase>
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index f7f9c9f..54c2cdd 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -9,7 +9,7 @@
pytest>=2.9,<=3.2.5
docker-py
docker-compose==1.7.1
-urllib3
+urllib3==1.23
junit-xml
jinja2>=2.9
jira
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 8b4bffa..0361835 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -327,10 +327,10 @@
default='sbPfel23ZigJF3Bm'),
ct.Cfg('kubernetes_docker_package', ct.String(), default=''),
ct.Cfg('kubernetes_hyperkube_image', ct.String(),
- default='{}/mirantis/kubernetes/hyperkube-amd64:v1.10.4-4'.format(
+ default='{}/mirantis/kubernetes/hyperkube-amd64:v1.11.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_pause_image', ct.String(),
- default='{}/mirantis/kubernetes/pause-amd64:v1.10.4-4'.format(
+ default='{}/mirantis/kubernetes/pause-amd64:v1.11.3-2'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
default='{}/mirantis/projectcalico/calico/node:v3.1.3'.format(
@@ -357,17 +357,18 @@
ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_virtlet_image', ct.String(),
- help="", default='mirantis/virtlet:v1.1.0'),
+ help="", default='mirantis/virtlet:v1.4.1'),
ct.Cfg('kubernetes_dns', ct.Boolean(),
help="", default=True),
ct.Cfg('kubernetes_externaldns_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_externaldns_image', ct.String(),
- help="", default='mirantis/external-dns:latest'),
+ help="", default='{}/mirantis/external-dns/external-dns:'
+ 'v0.5.6-2'.format(settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_externaldns_provider', ct.String(),
help="", default='coredns'),
ct.Cfg('kubernetes_coredns_enabled', ct.Boolean(),
- help="", default=False),
+ help="", default=True),
ct.Cfg('kubernetes_metallb_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_ingressnginx_enabled', ct.Boolean(),
@@ -389,9 +390,9 @@
default=False),
ct.Cfg('k8s_conformance_image', ct.String(),
default='docker-prod-virtual.docker.mirantis.net/mirantis/'
- 'kubernetes/k8s-conformance:v1.10.4-4'),
+ 'kubernetes/k8s-conformance:v1.11.3-2'),
ct.Cfg('k8s_update_chain', ct.String(),
- default='v1.9.8-4 v1.10.4-4')
+ default='v1.9.8-4 v1.10.4-4 v1.11.3-2')
]
day1_cfg_config_opts = [
diff --git a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
index 6f390af..0699684 100644
--- a/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-pike-k8s-contrail/salt-context-cookiecutter-k8s-contrail.yaml
@@ -32,8 +32,6 @@
bmk_enabled: 'False'
ceph_enabled: 'False'
auditd_enabled: 'False'
- kubernetes_coredns_enabled: False
- kubernetes_kubedns_enabled: True
cicd_control_node01_address: 10.167.8.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.8.92
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
index bf26588..0414b30 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico-sl/cookiecutter-context-k8s-sl.yaml
@@ -191,4 +191,4 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
- vnf_onboarding_enabled: 'False'
+ vnf_onboarding_enabled: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
index 6b63c60..f042844 100644
--- a/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
+++ b/tcp_tests/templates/cookied-cicd-k8s-calico/cookiecutter-context-k8s.yaml
@@ -161,4 +161,4 @@
tenant_network_subnet: 10.167.6.0/24
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
- use_default_network_scheme: 'False'
+ use_default_network_scheme: 'False'
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
index f82701a..111520b 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -185,3 +185,20 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/loop1'
+ openstack_share_address: 172.16.10.68
+ openstack_share_node01_address: 172.16.10.69
+ openstack_share_node02_address: 172.16.10.70
+ openstack_share_node03_address: 172.16.10.71
+ openstack_share_node01_deploy_address: 192.168.10.69
+ openstack_share_node02_deploy_address: 192.168.10.70
+ openstack_share_node03_deploy_address: 192.168.10.71
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_share_node02_hostname: share02
+ openstack_share_node03_hostname: share03
+ designate_backend: bind
+ designate_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
index 0f806cf..04c1e4c 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-pike-dvr.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,16 +10,13 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
-# - features_designate_pool_manager_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -27,15 +24,13 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -43,15 +38,13 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-pike-dvr.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
-# - features_designate_pool_manager_database
-# - features_designate_pool_manager
- linux_system_codename_xenial
interfaces:
ens3:
@@ -59,11 +52,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-pike-dvr.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
-# - features_designate_pool_manager_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -71,7 +63,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -82,7 +74,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -93,7 +85,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -104,7 +96,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -115,7 +107,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -126,7 +118,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -137,7 +129,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -148,7 +140,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -159,7 +151,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-pike-dvr.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -171,7 +163,7 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-pike-dvr.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -186,7 +178,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-pike-dvr.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -201,36 +193,57 @@
ens6:
role: bond1_ab_ovs_floating
-# dns01.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node01
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node01_address}
-#
-# dns02.mcp11-ovs-dpdk.local:
-# reclass_storage_name: openstack_dns_node02
-# roles:
-# - features_designate_pool_manager_dns
-# - linux_system_codename_xenial
-# classes:
-# - system.linux.system.repo.mcp.extra
-# - system.linux.system.repo.mcp.apt_mirantis.openstack
-# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
-# - system.linux.system.repo.mcp.apt_mirantis.saltstack
-# interfaces:
-# ens3:
-# role: single_dhcp
-# ens4:
-# role: single_ctl
-# single_address: ${_param:openstack_dns_node02_address}
+ share01.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share02.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_share_node02
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share03.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_share_node03
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns01.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-pike-dvr.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
index 6e69e30..a6b2cc6 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/core.yaml
@@ -1,119 +1,19 @@
{% from 'cookied-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
-# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
index 8c5bee9..000f4f9 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/openstack.yaml
@@ -35,65 +35,24 @@
node_name: {{ HOSTNAME_CFG01 }}
{%- endif %}
+# Install OpenStack control services
+
{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-#- description: Install powerdns
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@powerdns:server' state.sls powerdns.server
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_BIND=true) }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
diff --git a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
index f135f1e..698c854 100644
--- a/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-dvr/underlay.yaml
@@ -30,8 +30,11 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
template:
@@ -61,9 +64,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -89,9 +95,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -117,9 +126,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -145,9 +157,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
-# default_{{ HOSTNAME_DNS01 }}: +111
-# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -711,54 +726,132 @@
interfaces: *all_interfaces
network_config: *all_network_config
-# - name: {{ HOSTNAME_DNS01 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
-#
-# - name: {{ HOSTNAME_DNS02 }}
-# role: salt_minion
-# params:
-# vcpu: !os_env SLAVE_NODE_CPU, 1
-# memory: !os_env SLAVE_NODE_MEMORY, 2048
-# boot:
-# - hd
-# cloud_init_volume_name: iso
-# cloud_init_iface_up: ens3
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 150
-# backing_store: mcp_ubuntu_1604_image
-# format: qcow2
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-# capacity: 1
-# format: raw
-# device: cdrom
-# bus: ide
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces: *all_interfaces
-# network_config: *all_network_config
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
index bd458cd..131d153 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -185,3 +185,20 @@
tenant_vlan: '20'
upstream_proxy_enabled: 'False'
use_default_network_scheme: 'False'
+ manila_enabled: 'True'
+ manila_share_backend: 'lvm'
+ manila_lvm_volume_name: 'manila-volume'
+ manila_lvm_devices: '/dev/loop1'
+ openstack_share_address: 172.16.10.68
+ openstack_share_node01_address: 172.16.10.69
+ openstack_share_node02_address: 172.16.10.70
+ openstack_share_node03_address: 172.16.10.71
+ openstack_share_node01_deploy_address: 192.168.10.69
+ openstack_share_node02_deploy_address: 192.168.10.70
+ openstack_share_node03_deploy_address: 192.168.10.71
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_share_node02_hostname: share02
+ openstack_share_node03_hostname: share03
+ designate_backend: powerdns
+ designate_enabled: 'True'
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
index 4c7091b..2fd6295 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/_context-environment.yaml
@@ -1,5 +1,5 @@
nodes:
- cfg01.mcp11-ovs-dpdk.local:
+ cfg01.mcp-pike-ovs.local:
reclass_storage_name: infra_config_node01
roles:
- infra_config
@@ -10,17 +10,13 @@
ens4:
role: single_ctl
- ctl01.mcp11-ovs-dpdk.local:
+ ctl01.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node01
roles:
- infra_kvm
- openstack_control_leader
- openstack_database_leader
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
- # - features_designate_bind9_keystone
- linux_system_codename_xenial
interfaces:
ens3:
@@ -28,16 +24,13 @@
ens4:
role: single_ctl
- ctl02.mcp11-ovs-dpdk.local:
+ ctl02.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node02
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9_dns
- # - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -45,15 +38,13 @@
ens4:
role: single_ctl
- ctl03.mcp11-ovs-dpdk.local:
+ ctl03.mcp-pike-ovs.local:
reclass_storage_name: openstack_control_node03
roles:
- infra_kvm
- openstack_control
- openstack_database
- openstack_message_queue
- # - features_designate_bind9_database
- # - features_designate_bind9
- linux_system_codename_xenial
interfaces:
ens3:
@@ -61,11 +52,10 @@
ens4:
role: single_ctl
- prx01.mcp11-ovs-dpdk.local:
+ prx01.mcp-pike-ovs.local:
reclass_storage_name: openstack_proxy_node01
roles:
- openstack_proxy
- # - features_designate_bind9_proxy
- linux_system_codename_xenial
interfaces:
ens3:
@@ -73,7 +63,7 @@
ens4:
role: single_ctl
- mon01.mcp11-ovs-dpdk.local:
+ mon01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node01
roles:
- stacklightv2_server_leader
@@ -84,7 +74,7 @@
ens4:
role: single_ctl
- mon02.mcp11-ovs-dpdk.local:
+ mon02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node02
roles:
- stacklightv2_server
@@ -95,7 +85,7 @@
ens4:
role: single_ctl
- mon03.mcp11-ovs-dpdk.local:
+ mon03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_server_node03
roles:
- stacklightv2_server
@@ -106,7 +96,7 @@
ens4:
role: single_ctl
- log01.mcp11-ovs-dpdk.local:
+ log01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node01
roles:
- stacklight_log_leader_v2
@@ -117,7 +107,7 @@
ens4:
role: single_ctl
- log02.mcp11-ovs-dpdk.local:
+ log02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node02
roles:
- stacklight_log
@@ -128,7 +118,7 @@
ens4:
role: single_ctl
- log03.mcp11-ovs-dpdk.local:
+ log03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_log_node03
roles:
- stacklight_log
@@ -139,7 +129,7 @@
ens4:
role: single_ctl
- mtr01.mcp11-ovs-dpdk.local:
+ mtr01.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node01
roles:
- stacklight_telemetry_leader
@@ -150,7 +140,7 @@
ens4:
role: single_ctl
- mtr02.mcp11-ovs-dpdk.local:
+ mtr02.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node02
roles:
- stacklight_telemetry
@@ -161,7 +151,7 @@
ens4:
role: single_ctl
- mtr03.mcp11-ovs-dpdk.local:
+ mtr03.mcp-pike-ovs.local:
reclass_storage_name: stacklight_telemetry_node03
roles:
- stacklight_telemetry
@@ -173,7 +163,7 @@
role: single_ctl
# Generator-based computes. For compatibility only
- cmp<<count>>.mcp11-ovs-dpdk.local:
+ cmp<<count>>.mcp-pike-ovs.local:
reclass_storage_name: openstack_compute_rack01
roles:
- openstack_compute
@@ -188,7 +178,7 @@
ens6:
role: bond1_ab_ovs_floating
- gtw01.mcp11-ovs-dpdk.local:
+ gtw01.mcp-pike-ovs.local:
reclass_storage_name: openstack_gateway_node01
roles:
- openstack_gateway
@@ -202,3 +192,58 @@
role: bond0_ab_ovs_vxlan_mesh
ens6:
role: bond1_ab_ovs_floating
+
+ dns01.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share01.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_share_node01
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share02.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_share_node02
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share03.mcp-pike-ovs.local:
+ reclass_storage_name: openstack_share_node03
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
index 78acdf1..4a4dfac 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/core.yaml
@@ -1,119 +1,18 @@
{% from 'cookied-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
-
+{% import 'shared-core.yaml' as SHARED_CORE with context %}
{% import 'shared-backup-restore.yaml' as BACKUP with context %}
# Install support services
-- description: Install keepalived on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster and *01*' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-- description: Install keepalived
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keepalived:cluster' state.sls keepalived
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
+{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-- description: Install glusterfs
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.service
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-- description: Setup glusterfs on primary controller
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-- description: Check the gluster status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-- description: Install RabbitMQ on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
+{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-- description: Install RabbitMQ
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' state.sls rabbitmq
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the rabbitmq status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on first server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:master' state.sls galera
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install Galera on other servers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:slave' state.sls galera -b 1
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check mysql status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-
-- description: Install haproxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' state.sls haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check haproxy status
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.status haproxy
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Restart rsyslog
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@haproxy:proxy' service.restart rsyslog
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Install memcached on all controllers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@memcached:server' state.sls memcached
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check the VIP
- cmd: |
- OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
- echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+{{ SHARED_CORE.MACRO_CHECK_VIP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
index 0fbe5b2..de8e65e 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/openstack.yaml
@@ -20,59 +20,16 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
-# isntall designate
-#- description: Install bind
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@bind:server' state.sls bind
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
-#- description: Install designate
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-# -C 'I@designate:server' state.sls designate -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 5, delay: 10}
-# skip_fail: false
-
{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
-- description: Deploy horizon dashboard
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@horizon:server' state.sls horizon
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_DESIGNATE(INSTALL_POWERDNS=true) }}
-- description: Deploy nginx proxy
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@nginx:server' state.sls nginx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HORIZON() }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_COMPUTE(CELL_MAPPING=true) }}
-# Install compute node
-
-- description: Apply formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-
-- description: Re-apply(as in doc) formulas for compute node
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Check IP on computes
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
- 'ip a'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 10, delay: 30}
- skip_fail: false
+{{ SHARED_OPENSTACK.MACRO_INSTALL_MANILA() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
index c657839..9c13b64 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/salt.yaml
@@ -28,21 +28,3 @@
{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
-
-- description: Hack gtw node
- cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp01 node
- cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Hack cmp02 node
- cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
index 89cf22b..95ffa33 100644
--- a/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-pike-ovs/underlay.yaml
@@ -30,7 +30,12 @@
{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -59,7 +64,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -85,7 +95,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +711
ip_ranges:
dhcp: [+90, -10]
@@ -111,7 +126,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -137,7 +157,12 @@
default_{{ HOSTNAME_MTR02 }}: +87
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -699,3 +724,133 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
index 93faa15..4ff3212 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr-ssl/_context-environment.yaml
@@ -18,7 +18,6 @@
- openstack_database_leader
- openstack_message_queue
- linux_system_codename_xenial
- - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -33,7 +32,6 @@
- openstack_database
- openstack_message_queue
- linux_system_codename_xenial
- - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -48,7 +46,6 @@
- openstack_database
- openstack_message_queue
- linux_system_codename_xenial
- - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -227,4 +224,26 @@
ens3:
role: single_dhcp
ens4:
+ role: single_ctl
+
+ dns01.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ dns02.mcp-queens-dvr-ssl.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - openstack_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
\ No newline at end of file