Merge "Update mcp-local-aptly cfg-drive"
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index fe744ec..6cc2721 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -93,7 +93,8 @@
if hasattr(request.node, 'rep_call') and \
(request.node.rep_call.passed or request.node.rep_call.failed)\
and grab_virtlet_result:
- artifact_name = utils.extract_name_from_mark(grab_virtlet_result) \
+ files = utils.extract_name_from_mark(grab_virtlet_result) \
or "{}".format(func_name)
- k8s_deployed.download_virtlet_conformance_log(artifact_name)
+ k8s_deployed.extract_file_to_node()
+ k8s_deployed.download_virtlet_conformance_log(files)
request.addfinalizer(test_fin)
diff --git a/tcp_tests/fixtures/salt_fixtures.py b/tcp_tests/fixtures/salt_fixtures.py
index aff28dc..58c8509 100644
--- a/tcp_tests/fixtures/salt_fixtures.py
+++ b/tcp_tests/fixtures/salt_fixtures.py
@@ -35,7 +35,7 @@
@pytest.mark.revert_snapshot(ext.SNAPSHOT.salt_deployed)
@pytest.fixture(scope='function')
def salt_deployed(revert_snapshot, request, config,
- hardware, underlay, salt_actions):
+ hardware, underlay, salt_actions, grab_versions, snapshot):
"""Fixture to get or install salt service on environment
:param revert_snapshot: fixture that reverts snapshot that is specified
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
index eacbec9..7502df2 100644
--- a/tcp_tests/fixtures/underlay_fixtures.py
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -110,7 +110,7 @@
return None
-@pytest.fixture(scope='function', autouse=True)
+@pytest.fixture(scope='function')
def snapshot(request, hardware):
"""Fixture for creating snapshot at the end of test if it's needed
@@ -198,7 +198,7 @@
return underlay
-@pytest.fixture(scope='function', autouse=True)
+@pytest.fixture(scope='function')
def grab_versions(request, func_name, underlay):
"""Fixture for grab package versions at the end of test
diff --git a/tcp_tests/managers/jenkins/__init__.py b/tcp_tests/managers/jenkins/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/managers/jenkins/__init__.py
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
new file mode 100644
index 0000000..f781305
--- /dev/null
+++ b/tcp_tests/managers/jenkins/client.py
@@ -0,0 +1,65 @@
+import time
+
+import jenkins
+
+from devops.helpers import helpers
+
+
+class JenkinsClient(object):
+
+ def __init__(self, host=None, username=None, password=None):
+ host = host or 'http://172.16.44.33:8081'
+ username = username or 'admin'
+ password = password or 'r00tme'
+ self.__client = jenkins.Jenkins(
+ host,
+ username=username,
+ password=password)
+
+ def jobs(self):
+ return self.__client.get_jobs()
+
+ def find_jobs(self, name):
+ return filter(lambda x: name in x['fullname'], self.jobs())
+
+ def job_info(self, name):
+ return self.__client.get_job_info(name)
+
+ def list_builds(self, name):
+ return self.job_info(name).get('builds')
+
+ def build_info(self, name, build_id):
+ return self.__client.get_build_info(name, build_id)
+
+ def job_params(self, name):
+ job = self.job_info(name)
+ job_params = next(
+ p for p in job['property'] if
+ 'hudson.model.ParametersDefinitionProperty' == p['_class'])
+ job_params = job_params['parameterDefinitions']
+ return job_params
+
+ def make_defults_params(self, name):
+ job_params = self.job_params(name)
+ def_params = dict(
+ [(j['name'], j['defaultParameterValue']['value'])
+ for j in job_params])
+ return def_params
+
+ def run_build(self, name, params=None):
+ params = params or self.make_defults_params(name)
+ self.__client.build_job(name, params)
+ time.sleep(10) # wait while jobs started:
+ build_id = self.job_info(name)['lastBuild']['number']
+ return name, build_id
+
+ def wait_end_of_build(self, name, build_id, timeout=600):
+
+ def building():
+ return not self.build_info(name, build_id)['building']
+
+ helpers.wait(
+ building,
+ timeout=timeout,
+ timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
+ ' still'.format(name, build_id))
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 1126011..5e8ea21 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -505,8 +505,8 @@
"xenial-server-cloudimg-amd64-disk1.img"
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -image {2} "
- "-sshuser ubuntu -memoryLimit 1024 "
+ "-include-cloud-init-tests -junitOutput report.xml "
+ "-image {2} -sshuser ubuntu -memoryLimit 1024 "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
@@ -515,6 +515,7 @@
else:
cmd = ("set -o pipefail; "
"docker run --net=host {0} /virtlet-e2e-tests "
+ "-junitOutput report.xml "
"-alsologtostderr -cluster-url http://127.0.0.1:8080 "
"-ginkgo.focus '\[Conformance\]' "
"| tee {1}".format(
@@ -530,11 +531,37 @@
LOG.info("Test results stderr: {}".format(stderr))
return result
- def download_virtlet_conformance_log(self,
- log_file='virtlet_conformance.log'):
+ def extract_file_to_node(self, container='virtlet',
+ file_path='report.xml'):
+ """
+ Download file from docker container to node
+
+ :param container: Full name of part of name
+ :param file_path: File path in container
+ :return:
+ """
+ with self.__underlay.remote(
+ node_name=self.ctl_host) as remote:
+ cmd = ("docker ps --all | grep {0} |"
+ " awk '{{print $1}}'".format(container))
+ result = remote.check_call(cmd)
+ container_id = result['stdout'][0].strip()
+ cmd = "docker start {}".format(container_id)
+ remote.check_call(cmd)
+ cmd = "docker cp {0}:/{1} .".format(container_id, file_path)
+ remote.check_call(cmd)
+
+ def download_virtlet_conformance_log(self, files):
+ """
+ Download JUnit report and conformance logs from cluster
+ :param files:
+ :return:
+ """
master_host = self.__config.salt.salt_master_host
with self.__underlay.remote(host=master_host) as r:
- cmd = "rsync {0}:/root/{1} /root/".format(self.ctl_host, log_file)
- r.check_call(cmd, raise_on_err=False)
- LOG.info("Downloading the artifact {0}".format(log_file))
- r.download(destination=log_file, target=os.getcwd())
+ for log_file in files:
+ cmd = "rsync {0}:/root/{1} /root/".format(self.ctl_host,
+ log_file)
+ r.check_call(cmd, raise_on_err=False)
+ LOG.info("Downloading the artifact {0}".format(log_file))
+ r.download(destination=log_file, target=os.getcwd())
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 2780d9b..bac459a 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
+import requests
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
from tcp_tests import logger
@@ -39,6 +40,55 @@
self.execute_commands(commands,
label='Install OpenStack services')
self.__config.openstack.openstack_installed = True
+ h_data = self.get_horizon_data()
+ self.__config.openstack.horizon_host = h_data['horizon_host']
+ self.__config.openstack.horizon_port = h_data['horizon_port']
+ self.__config.openstack.horizon_user = h_data['horizon_user']
+ self.__config.openstack.horizon_password = h_data['horizon_password']
+ self.auth_in_horizon(
+ h_data['horizon_host'],
+ h_data['horizon_port'],
+ h_data['horizon_user'],
+ h_data['horizon_password'])
+
+ def get_horizon_data(self):
+ horizon_data = {}
+ tgt = 'I@nginx:server and not cfg*'
+ pillar_host = ('nginx:server:site:nginx_ssl_redirect'
+ '_openstack_web:host:name')
+ pillar_port = ('nginx:server:site:nginx_ssl_redirect'
+ '_openstack_web:host:port')
+ hosts = self._salt.get_pillar(tgt=tgt, pillar=pillar_host)
+ host = set([ip for item in hosts for node, ip
+ in item.items() if ip])
+ if host:
+ host = host.pop()
+ ports = self._salt.get_pillar(tgt=tgt, pillar=pillar_port)
+
+ port = set([port for item in ports for node, port
+ in item.items() if port])
+ if port:
+ port = port.pop()
+ tgt = 'I@keystone:server and ctl01*'
+ pillar_user = 'keystone:server:admin_name'
+ pillar_password = 'keystone:server:admin_password'
+ users = self._salt.get_pillar(tgt=tgt, pillar=pillar_user)
+ user = set([user for item in users for node, user
+ in item.items() if user])
+ if user:
+ user = user.pop()
+ passwords = self._salt.get_pillar(tgt=tgt, pillar=pillar_password)
+ pwd = set([pwd for item in passwords for node, pwd
+ in item.items() if pwd])
+ if pwd:
+ pwd = pwd.pop()
+ horizon_data.update({'horizon_host': host})
+ horizon_data.update({'horizon_port': port})
+ horizon_data.update({'horizon_user': user})
+ horizon_data.update({'horizon_password': pwd})
+ LOG.info("Data from pillars {}".format(horizon_data))
+
+ return horizon_data
def run_tempest(
self,
@@ -121,3 +171,37 @@
LOG.info('Reboot (warm restart) nodes {0}'.format(node_names))
self.warm_shutdown_openstack_nodes(node_names, timeout=timeout)
self.warm_start_nodes(node_names)
+
+ def auth_in_horizon(self, host, port, user, password):
+ client = requests.session()
+ url = "http://{0}:{1}".format(
+ self.__config.openstack.horizon_host,
+ self.__config.openstack.horizon_port)
+ # Retrieve the CSRF token first
+ client.get(url, verify=False) # sets cookie
+ if not len(client.cookies):
+ login_data = dict(
+ username=self.__config.openstack.horizon_user,
+ password=self.__config.openstack.horizon_password,
+ next='/')
+ resp = client.post(url, data=login_data,
+ headers=dict(Referer=url), verify=False)
+ LOG.debug("Horizon resp {}".format(resp))
+ assert 200 == resp.status_code, ("Failed to auth in "
+ "horizon. Response "
+ "{0}".format(resp.status_code))
+ else:
+ login_data = dict(
+ username=self.__config.openstack.horizon_user,
+ password=self.__config.openstack.horizon_password,
+ next='/')
+ csrftoken = client.cookies.get('csrftoken', None)
+ if csrftoken:
+ login_data['csrfmiddlewaretoken'] = csrftoken
+
+ resp = client.post(url, data=login_data,
+ headers=dict(Referer=url), verify=False)
+ LOG.debug("Horizon resp {}".format(resp))
+ assert 200 == resp.status_code, ("Failed to auth in "
+ "horizon. Response "
+ "{0}".format(resp.status_code))
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index fc56afa..ae72941 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -173,7 +173,8 @@
# Updated to replace the OpenStackManager method run_tempest
def run_tempest(self, conf_name='/var/lib/lvm_mcp.conf',
pattern='set=smoke', concurrency=0, timeout=None,
- report_prefix='', report_types=None):
+ report_prefix='', report_types=None,
+ designate_plugin=True):
"""Run tempest tests
:param conf_name: tempest config placed in the rally container
@@ -182,6 +183,7 @@
to take the amount of the cores on the node
<self._node_name>.
:param timeout: stop tempest tests after specified timeout.
+ :param designate_plugin: enabled by default plugin for designate
:param report_prefix: str, prefix for report filenames. Usually the
output of the fixture 'func_name'
:param report_types: list of the report types that need to download
@@ -189,23 +191,36 @@
None by default.
"""
report_types = report_types or []
-
- cmd = (
- "cat > /root/rally/install_tempest.sh << EOF\n"
- "rally verify create-verifier"
- " --type tempest "
- " --name tempest-verifier"
- " --source /var/lib/tempest"
- " --version {tempest_tag}"
- " --system-wide\n"
- "rally verify add-verifier-ext"
- " --source /var/lib/designate-tempest-plugin"
- " --version {designate_tag}\n"
- "rally verify configure-verifier --extend {tempest_conf}\n"
- "rally verify configure-verifier --show\n"
- "EOF".format(tempest_tag=self.tempest_tag,
- designate_tag=self.designate_tag,
- tempest_conf=conf_name))
+ if not designate_plugin:
+ cmd = (
+ "cat > /root/rally/install_tempest.sh << EOF\n"
+ "rally verify create-verifier"
+ " --type tempest "
+ " --name tempest-verifier"
+ " --source /var/lib/tempest"
+ " --version {tempest_tag}"
+ " --system-wide\n"
+ "rally verify configure-verifier --extend {tempest_conf}\n"
+ "rally verify configure-verifier --show\n"
+ "EOF".format(tempest_tag=self.tempest_tag,
+ tempest_conf=conf_name))
+ else:
+ cmd = (
+ "cat > /root/rally/install_tempest.sh << EOF\n"
+ "rally verify create-verifier"
+ " --type tempest "
+ " --name tempest-verifier"
+ " --source /var/lib/tempest"
+ " --version {tempest_tag}"
+ " --system-wide\n"
+ "rally verify add-verifier-ext"
+ " --source /var/lib/designate-tempest-plugin"
+ " --version {designate_tag}\n"
+ "rally verify configure-verifier --extend {tempest_conf}\n"
+ "rally verify configure-verifier --show\n"
+ "EOF".format(tempest_tag=self.tempest_tag,
+ designate_tag=self.designate_tag,
+ tempest_conf=conf_name))
with self._underlay.remote(node_name=self._node_name) as remote:
LOG.info("Create install_tempest.sh")
remote.check_call(cmd)
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 1ff5324..06e7d0b 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -38,14 +38,15 @@
'runStates': 'run_states',
}
- def __init__(self, config, underlay, host=None, port='6969'):
+ def __init__(self, config, underlay, host=None, port='6969',
+ username=None, password=None):
self.__config = config
self.__underlay = underlay
self.__port = port
self.__host = host
self.__api = None
- self.__user = settings.SALT_USER
- self.__password = settings.SALT_PASSWORD
+ self.__user = username or settings.SALT_USER
+ self.__password = password or settings.SALT_PASSWORD
self._salt = self
super(SaltManager, self).__init__(config=config, underlay=underlay)
@@ -60,6 +61,10 @@
self.execute_commands(commands=commands,
label="Install and configure salt")
+ def change_creds(self, username, password):
+ self.__user = username
+ self.__password = password
+
@property
def port(self):
return self.__port
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index a1ee3a7..68ff0de 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -93,8 +93,8 @@
target_node_name = [node_name for node_name
in self.__underlay.node_names()
if node_to_run in node_name]
- cmd = ("cd {0}; "
- ". venv-stacklight-pytest/bin/activate;"
+ cmd = (". venv-stacklight-pytest/bin/activate;"
+ "cd {0}; "
"export VOLUME_STATUS='available';"
"pytest -k {1} {2}".format(
tests_path,
@@ -105,7 +105,7 @@
as node_remote:
LOG.debug("Run {0} on the node {1}".format(
cmd, target_node_name[0]))
- result = node_remote.execute(cmd)
+ result = node_remote.check_call(cmd, verbose=True)
LOG.debug("Test execution result is {}".format(result))
return result
@@ -114,8 +114,8 @@
target_node_name = [node_name for node_name
in self.__underlay.node_names()
if node_to_run in node_name]
- cmd = ("cd {0}; "
- ". venv-stacklight-pytest/bin/activate;"
+ cmd = (". venv-stacklight-pytest/bin/activate;"
+ "cd {0}; "
"export VOLUME_STATUS='available';"
"pip install pytest-json;"
"pytest --json=report.json -k {1} {2}".format(
@@ -127,9 +127,9 @@
as node_remote:
LOG.debug("Run {0} on the node {1}".format(
cmd, target_node_name[0]))
- node_remote.execute(cmd)
- res = node_remote.execute('cd {0}; cat report.json'.format(
- tests_path))
+ node_remote.check_call(cmd, verbose=True)
+ res = node_remote.check_call('cd {0}; cat report.json'.format(
+ tests_path), verbose=True)
LOG.debug("Test execution result is {}".format(res['stdout']))
result = json.loads(res['stdout'][0])
return result['report']['tests']
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 7d3da96..5194239 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -247,13 +247,16 @@
"""
ssh_data = self.__ssh_data(node_name=node_name, host=host,
address_pool=address_pool)
+ ssh_auth = ssh_client.SSHAuth(
+ username=ssh_data['login'],
+ password=ssh_data['password'],
+ keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
+ for key in ssh_data['keys']])
+
return ssh_client.SSHClient(
host=ssh_data['host'],
port=ssh_data['port'] or 22,
- username=ssh_data['login'],
- password=ssh_data['password'],
- private_keys=[rsakey.RSAKey(file_obj=StringIO.StringIO(key))
- for key in ssh_data['keys']])
+ auth=ssh_auth)
def local(self):
"""Get Subprocess instance for local operations like:
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index 381be12..b06b805 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -19,3 +19,4 @@
setuptools<=36.2.0
netaddr
mock>=1.2
+python-jenkins
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index a1b296a..f90000c 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -52,9 +52,10 @@
SALT_PASSWORD = os.environ.get('SALT_PASSWORD', 'hovno12345!')
DOCKER_REGISTRY = os.environ.get('DOCKER_REGISTRY',
- 'docker-prod-virtual.docker.mirantis.net')
+ 'docker-prod-local.artifactory.mirantis.com')
DOCKER_NAME = os.environ.get('DOCKER_NAME',
'mirantis/oscore/rally-tempest:latest')
+DOCKER_IMAGES_SL_TAG = os.environ.get('DOCKER_IMAGES_SL_TAG', 'latest')
PATTERN = os.environ.get('PATTERN', None)
RUN_TEMPEST = get_var_as_bool('RUN_TEMPEST', False)
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index dacdb22..ad4cce3 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -174,6 +174,14 @@
ct.Cfg('openstack_steps_path', ct.String(),
help="Path to YAML with steps to deploy openstack",
default=_default_openstack_steps),
+ ct.Cfg('horizon_host', ct.IPAddress(),
+ help="", default='0.0.0.0'),
+ ct.Cfg('horizon_port', ct.String(),
+ help="", default='5000'),
+ ct.Cfg('horizon_user', ct.String(),
+ help="", default='admin'),
+ ct.Cfg('horizon_password', ct.String(),
+ help="", default='workshop'),
]
openstack_opts = [
ct.Cfg('openstack_installed', ct.Boolean(),
@@ -197,20 +205,26 @@
help="Path to YAML with steps to deploy sl",
default=_default_sl_prepare_tests_steps_path),
ct.Cfg('docker_image_alertmanager', ct.String(),
- default='{}/openstack-docker/alertmanager:latest'.format(
- settings.DOCKER_REGISTRY)),
+ default='{0}/openstack-docker/alertmanager:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
ct.Cfg('docker_image_pushgateway', ct.String(),
- default='{}/openstack-docker/pushgateway:latest'.format(
- settings.DOCKER_REGISTRY)),
+ default='{0}/openstack-docker/pushgateway:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
ct.Cfg('docker_image_prometheus', ct.String(),
- default='{}/openstack-docker/prometheus:latest'.format(
- settings.DOCKER_REGISTRY)),
+ default='{0}/openstack-docker/prometheus:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
ct.Cfg('docker_image_remote_agent', ct.String(),
- default='{}/openstack-docker/telegraf:latest'.format(
- settings.DOCKER_REGISTRY)),
+ default='{0}/openstack-docker/telegraf:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
ct.Cfg('docker_image_remote_storage_adapter', ct.String(),
- default='{}/openstack-docker/remote_storage_adapter:latest'.format(
- settings.DOCKER_REGISTRY)),
+ default='{0}/openstack-docker/remote_storage_adapter:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
+ ct.Cfg('docker_image_prometheus_relay', ct.String(),
+ default='{0}/openstack-docker/prometheus_relay:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
+ ct.Cfg('docker_image_grafana', ct.String(),
+ default='{0}/mirantis/external/grafana:{1}'.format(
+ settings.DOCKER_REGISTRY, settings.DOCKER_IMAGES_SL_TAG)),
# SalesForce connection options for pushkin
ct.Cfg('sfdc_sandbox_enabled', ct.String(), default='False'),
ct.Cfg('sfdc_auth_url', ct.String(), default=''),
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index 3799ba1..342e4c8 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -53,7 +53,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -171,7 +171,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
@@ -291,30 +291,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-
-- description: Run tests
- cmd: |
- if [[ {{ PATTERN }} == "false" ]]; then
- docker run --rm --net=host -e SOURCE_FILE=/home/rally/keystonercv3 -e SET=full -e CUSTOM="--pattern tempest --detail" -e CONCURRENCY=2 -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e DO_CLEANUP_RESOURCES=false -v /root/keystonercv3:/home/rally/keystonercv3 -v /root/rally_reports:/home/rally/rally_reports -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-local.artifactory.mirantis.com/mirantis/oscore/rally-tempest
- else
- docker run --rm --net=host -e SOURCE_FILE=/home/rally/keystonercv3 -e SET=full -e CUSTOM="--pattern {{ PATTERN }} --detail" -e CONCURRENCY=2 -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e DO_CLEANUP_RESOURCES=false -v /root/keystonercv3:/home/rally/keystonercv3 -v /root/rally_reports:/home/rally/rally_reports -v /etc/ssl/certs/:/etc/ssl/certs/ docker-prod-local.artifactory.mirantis.com/mirantis/oscore/rally-tempest
- fi
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /root
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
-
-- description: Download html results
- download:
- remote_path: /root
- remote_filename: "report_*.html"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 2c5b112..74a4666 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -141,6 +141,7 @@
stacklight_telemetry_node03_address: 10.167.8.88
stacklight_telemetry_node03_hostname: mtr03
stacklight_version: '2'
+ fluentd_enabled: 'True'
tenant_network_gateway: 10.167.6.1
tenant_network_netmask: 255.255.255.0
tenant_network_subnet: 10.167.6.0/24
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 722816c..bcf5dbb 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -52,12 +52,14 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
index c27fc4a..ed3636c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+- description: Configure fluentd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 120}
skip_fail: false
- description: docker ps
- cmd: sleep 120; salt -C 'I@docker:swarm' dockerng.ps
+ cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 60; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 60; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
index a910fa8..6026c30 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -49,36 +49,36 @@
############## TCP Cloud cfg01 node ##################
#- sleep 120
- - echo "Preparing base OS"
+ # - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ # # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
- - apt-get clean
- - apt-get update
+ # - apt-get clean
+ # - apt-get update
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+ # # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
+ # # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ # ########################################################
+ # # Node is ready, allow SSH access
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..a0b1c88
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
index b7508f4..49f28dd 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,8 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -50,38 +50,34 @@
############## TCP Cloud cfg01 node ##################
#- sleep 120
- - echo "Preparing base OS"
+ # - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ # # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+ # # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
+ # # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
########################################################
# Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
########################################################
write_files:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a68021f..a1d6090 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -23,6 +23,7 @@
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
---
aliases:
@@ -30,6 +31,7 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
template:
devops_settings:
@@ -332,7 +334,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces:
- label: enp9s0f0
@@ -388,7 +390,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces:
- label: enp9s0f0
@@ -442,7 +444,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces:
- label: enp2s0f0
@@ -489,7 +491,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces:
- label: enp2s0f0
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
index 21b0703..31e6778 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/lab04-physical-inventory.yaml
@@ -51,7 +51,7 @@
interfaces:
enp2s0f1:
role: single_mgm
- deploy_address: 172.16.49.72
+ deploy_address: 172.16.49.73
enp5s0f0:
role: bond2_contrail_dpdk_prv
dpdk_pci: "'0000:05:00.0'"
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index cdfb908..0de3fd7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -50,7 +50,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -211,7 +211,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
@@ -382,33 +382,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: true
-
-{%- if RUN_TEMPEST %}
-- description: Run tests
- cmd: |
- if [[ {{ PATTERN }} == "false" ]]; then
- salt "ctl01*" cmd.run "docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -v /root/:/home/rally sandriichenko/rally_tempest_docker:mcp1.1_ocata >> image.output"
- else
- salt "ctl01*" cmd.run "docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally sandriichenko/rally_tempest_docker:mcp1.1_ocata >> image.output"
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download xml and html results
- cmd: |
- d=$(date +%Y-%m-%d)
- ctl01_ip=`salt --out=newline_values_only 'ctl01*' grains.get ip4_interfaces:ens2`;
- scp root@$ctl01_ip:/root/report*.xml /root/report_$d.xml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
-{%- endif %}
-
-#- description: Download html results
-# download:
-# remote_path: /root
-# remote_filename: "report_*.html"
-# local_path: {{ os_env('PWD') }}
-# node_name: {{ HOSTNAME_CFG01 }}
-# skip_fail: true
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
index 80f389e..265d897 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -73,7 +73,7 @@
openstack_compute_node02_address: 10.167.8.102
openstack_compute_node01_single_address: 10.167.8.101
openstack_compute_node02_single_address: 10.167.8.102
- openstack_compute_node01_deploy_address: 172.16.49.72
+ openstack_compute_node01_deploy_address: 172.16.49.73
openstack_compute_node02_deploy_address: 172.16.49.74
openstack_control_address: 10.167.8.10
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index 304bdab..c91448f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -74,7 +74,7 @@
openstack_compute_node02_address: 10.167.8.102
openstack_compute_node01_single_address: 10.167.8.101
openstack_compute_node02_single_address: 10.167.8.102
- openstack_compute_node01_deploy_address: 172.16.49.72
+ openstack_compute_node01_deploy_address: 172.16.49.73
openstack_compute_node02_deploy_address: 172.16.49.74
openstack_control_address: 10.167.8.10
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
index 98eaa56..6f29def 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-environment.yaml
@@ -235,11 +235,11 @@
ens3:
role: single_ctl
- bmk01.cookied-bm-mcp-ocata-contrail.local:
- reclass_storage_name: openstack_benchmark_node01
- roles:
- - openstack_benchmark
- - linux_system_codename_xenial
- interfaces:
- ens3:
- role: single_ctl
+# bmk01.cookied-bm-mcp-ocata-contrail.local:
+# reclass_storage_name: openstack_benchmark_node01
+# roles:
+# - openstack_benchmark
+# - linux_system_codename_xenial
+# interfaces:
+# ens3:
+# role: single_ctl
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index efa6773..dd847c4 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -63,17 +63,6 @@
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
-- description: "Workaround for haproxy without listen"
- cmd: |
- set -e;
- git clone https://gerrit.mcp.mirantis.net/salt-formulas/haproxy;
- cd haproxy/;
- git fetch https://gerrit.mcp.mirantis.net/salt-formulas/haproxy refs/changes/39/11339/1 && git checkout FETCH_HEAD;
- cp haproxy/files/haproxy.cfg /srv/salt/env/prd/haproxy/files/;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: true
-
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
- description: (REMOVE asap) Hack CFG of Hash sum mismatch
@@ -87,6 +76,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
- description: "Workaround for rack01 compute generator"
cmd: |
set -e;
@@ -114,7 +105,7 @@
cmd: |
set -e;
# Add tenant and single addresses for computes
- salt-call reclass.cluster_meta_set deploy_address 172.16.49.72 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set deploy_address 172.16.49.73 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 9e14bf2..8d131b7 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 120}
skip_fail: false
- description: docker ps
- cmd: sleep 120; salt -C 'I@docker:swarm' dockerng.ps
+ cmd: sleep 120; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 60; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 60; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
index c6314ad..2bb48f0 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,8 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -49,35 +49,35 @@
############## TCP Cloud cfg01 node ##################
#- sleep 120
- - echo "Preparing base OS"
+ # - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget);
# Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
- - apt-get clean
- - apt-get update
+ # - apt-get clean
+ # - apt-get update
# Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
# Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
########################################################
# Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
########################################################
write_files:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..07a6936
--- /dev/null
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,100 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # # Block access to SSH while node is preparing
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ # - echo "Preparing base OS"
+ - echo "nameserver 172.18.208.44" > /etc/resolv.conf;
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Install salt-minion and stop it until it is configured
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
+
+ # Install latest kernel
+ # - eatmydata apt-get install -y {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }}
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ # - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
index c3ffe17..e8e6345 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml
@@ -19,7 +19,7 @@
bootcmd:
# Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -50,38 +50,34 @@
############## TCP Cloud cfg01 node ##################
#- sleep 120
- - echo "Preparing base OS"
+ # - echo "Preparing base OS"
- echo "nameserver 172.18.208.44" > /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+ # - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ # - which wget >/dev/null || (apt-get update; apt-get install -y wget)
# Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ # - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+ # - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ # - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ # - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ # - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
+ # - apt-get clean
+ # - eatmydata apt-get update && apt-get -y upgrade
# Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+ # - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
# Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
+ # - eatmydata apt-get install -y salt-minion && service salt-minion stop
########################################################
# Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
+ # - echo "Allow SSH access ..."
+ # - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
########################################################
write_files:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
index 3e83922..2d0a783 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/underlay.yaml
@@ -17,7 +17,7 @@
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.72') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
# {% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
@@ -26,6 +26,7 @@
{% import 'cookied-bm-mcp-ocata-contrail/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'cookied-bm-mcp-ocata-contrail/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
---
aliases:
@@ -33,6 +34,7 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
template:
@@ -233,7 +235,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -251,14 +253,14 @@
cloudinit_user_data: *cloudinit_user_data
interfaces:
- - label: enp2s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp2s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
network_config:
- enp2s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -266,7 +268,7 @@
- control
aggregation: active-backup
parents:
- - enp2s0f1
+ - enp9s0f1
- name: {{ HOSTNAME_KVM02 }}
role: salt_minion
@@ -280,7 +282,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -298,14 +300,14 @@
cloudinit_user_data: *cloudinit_user_data
interfaces:
- - label: enp2s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp2s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
network_config:
- enp2s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -313,7 +315,7 @@
- control
aggregation: active-backup
parents:
- - enp2s0f1
+ - enp9s0f1
- name: {{ HOSTNAME_KVM03 }}
role: salt_minion
@@ -328,7 +330,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
# cloud_init_iface_up: eno1 # see 'interfaces' below.
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -347,16 +349,16 @@
interfaces:
# - label: eno1
- - label: enp2s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
# - label: eno2
- - label: enp2s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
network_config:
# eno1:
- enp2s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -364,7 +366,7 @@
- control
aggregation: active-backup
parents:
- - enp2s0f1
+ - enp9s0f1
- name: {{ HOSTNAME_CMP001 }}
@@ -395,7 +397,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- label: enp2s0f0
@@ -455,7 +457,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
# - label: eno1
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
index 49f3c1f..8c214e3 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/openstack.yaml
@@ -53,7 +53,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -179,7 +179,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
@@ -254,19 +254,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
@@ -330,30 +330,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-
-- description: Run tests
- cmd: |
- if [[ {{ PATTERN }} == "false" ]]; then
- docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
- else
- docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
- fi
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /root
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
-
-- description: Download html results
- download:
- remote_path: /root
- remote_filename: "report_*.html"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
index e2ee183..f26d7ee 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/oss.yaml
@@ -158,19 +158,21 @@
# Aptly
#------
-- description: "Wait for Aptly to come up in container..."
- cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
- 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
- while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version && break; sleep 2; done'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
- skip_fail: false
+#### Steps are commented due to PROD-17598
-- description: "Setup Aptly"
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 10}
- skip_fail: false
+#- description: "Wait for Aptly to come up in container..."
+# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' cmd.run
+# 'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
+# while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8084/api/version && break; sleep 2; done'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 15}
+# skip_fail: false
+
+#- description: "Setup Aptly"
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@aptly:server' state.sls aptly
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 3, delay: 10}
+# skip_fail: false
# OpenLDAP
#---------
@@ -211,7 +213,7 @@
#--------
- description: "Waiting for Jenkins to come up in container..."
- cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@jenkins:client' cmd.run
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:jenkins' cmd.run
'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
export JENKINS_CLIENT_USER=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_user);
export JENKINS_CLIENT_PASSWORD=$(salt-call --out=newline_values_only pillar.get _param:jenkins_client_password);
@@ -235,7 +237,7 @@
- description: "Waiting for postgresql database to come up in container..."
# cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
# 'while true; do if docker service logs postgresql_db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
- cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+ cmd: timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
'while true; do if docker service logs postgresql_postgresql-db | grep -q "ready to accept"; then break; else sleep 5; fi; done'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
@@ -245,7 +247,7 @@
"1. State postgresql.client cannot insert values into 'pushkin' database because it is created empty,\n"
"2. Container with Pushkin cannot start and fill the database scheme until state postgresql.client created users.")
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' state.sls postgresql.client -b 1 &&
- timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@postgresql:client' cmd.run
+ timeout 300 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:postgresql' cmd.run
'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:8887/apps && break; sleep 2; done'
node_name: {{ HOSTNAME_CFG01 }}
@@ -256,7 +258,7 @@
#--------
- description: Waiting for Rundeck to come up in container...
- cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@rundeck:client' cmd.run
+ cmd: timeout 30 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:client:stack:rundeck' cmd.run
'export CICD_CONTROL_ADDRESS=$(salt-call --out=newline_values_only pillar.get _param:cluster_vip_address);
while true; do curl -sf http://${CICD_CONTROL_ADDRESS}:4440 && break; sleep 2; done'
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index a5006d4..8022d47 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -132,6 +132,7 @@
salt_master_management_address: 10.167.5.15
stacklight_enabled: 'True'
stacklight_version: '2'
+ fluentd_enabled: 'True'
stacklight_monitor_address: 10.167.4.70
stacklight_monitor_hostname: mon
stacklight_monitor_node01_address: 10.167.4.71
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index 6037981..2a86269 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -19,12 +19,14 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins" "fluentd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
- description: "Workaround for combined roles: remove unnecessary classes"
cmd: |
set -e;
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
index 1a6a7e3..962035c 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/sl.yaml
@@ -19,105 +19,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+- description: Configure fluentd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -126,60 +126,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
- cmd: |
- {#- For cookiecutter-generated model, use overrides.yml from environment model instead of cluster model #}
- {%- set OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml' %}
- {%- for sl_opt, value in config.sl_deploy.items() %}
- {%- if value|string() %}
- salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }} file_name={{ OVERRIDES_FILENAME }};
- {%- endif %}
- {%- endfor %}
- salt '*' saltutil.refresh_pillar;
- sleep 10
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
index 2ac0aa1..3e70fd8 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -47,38 +45,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
index 69b710f..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -47,41 +45,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
index 66cd81c..995f855 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/underlay.yaml
@@ -21,6 +21,7 @@
{% import 'cookied-mcp-ocata-dop-sl2/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-dop-sl2/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
---
aliases:
@@ -28,6 +29,7 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
template:
devops_settings:
@@ -215,6 +217,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
@@ -267,7 +275,7 @@
volumes:
- name: system
capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -305,7 +313,7 @@
volumes:
- name: system
capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -331,7 +339,7 @@
volumes:
- name: system
capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -357,7 +365,7 @@
volumes:
- name: system
capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -386,7 +394,7 @@
volumes:
- name: system
capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -415,7 +423,7 @@
volumes:
- name: system
capacity: {{ os_env('CTL_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -444,7 +452,7 @@
volumes:
- name: system
capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -473,7 +481,7 @@
volumes:
- name: system
capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -502,7 +510,7 @@
volumes:
- name: system
capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -531,7 +539,7 @@
volumes:
- name: system
capacity: {{ os_env('MON_NODE_VOLUME_SIZE', 150) }}
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -569,7 +577,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: &all_interfaces
@@ -620,7 +628,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: *all_interfaces
network_config: *all_network_config
@@ -646,7 +654,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: *all_interfaces
network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
index 9609fa4..3882976 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/openstack.yaml
@@ -53,7 +53,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -179,7 +179,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
@@ -254,19 +254,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
@@ -372,30 +372,3 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-
-- description: Run tests
- cmd: |
- if [[ {{ PATTERN }} == "false" ]]; then
- docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
- else
- docker run --rm --net=host -e TEMPEST_CONF=lvm_mcp.conf -e SKIP_LIST=mcp_skip.list -e SOURCE_FILE=keystonercv3 -v /etc/ssl/certs/:/etc/ssl/certs/ -e CUSTOM='--pattern {{ PATTERN }}' -v /root/:/home/rally docker-sandbox.sandbox.mirantis.net/rally-tempest/rally-tempest:with_designate >> image.output
- fi
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-- description: Download xml results
- download:
- remote_path: /root
- remote_filename: "report_*.xml"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
-
-- description: Download html results
- download:
- remote_path: /root
- remote_filename: "report_*.html"
- local_path: {{ os_env('PWD') }}
- node_name: {{ HOSTNAME_GTW01 }}
- skip_fail: true
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
index 45514bb..0ad8e5e 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
@@ -25,6 +25,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/environment/' + ENVIRONMENT_MODEL_INVENTORY_NAME + '/overrides.yml') }}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
index 0668eb7..dd64b90 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,56 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
- cmd: |
- {% for sl_opt, value in config.sl_deploy.items() %}
- {% if value|string() %}
- salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
- {% endif %}
- {% endfor %}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
index a00d531..19ae10b 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -47,38 +45,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
new file mode 100644
index 0000000..319c007
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml
@@ -0,0 +1,59 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 16G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
index 69b710f..319c007 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -47,41 +45,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Install salt-minion and stop it until it is configured
- - eatmydata apt-get install -y salt-minion && service salt-minion stop
-
- # Install latest kernel
- - eatmydata apt-get install -y linux-generic-hwe-16.04
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - reboot
- ########################################################
-
write_files:
- path: /etc/default/grub.d/97-enable-grub-menu.cfg
content: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
index 2511ca7..ef002fe 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/underlay.yaml
@@ -44,6 +44,7 @@
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'cookied-mcp-ocata-dvr-vxlan/underlay--user-data1604-hwe.yaml' as CLOUDINIT_USER_DATA_1604_HWE with context %}
---
aliases:
@@ -51,6 +52,7 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_1604_hwe {{ CLOUDINIT_USER_DATA_1604_HWE }}
template:
devops_settings:
@@ -331,12 +333,18 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
role: salt_master
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 4096
boot:
- hd
@@ -383,7 +391,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -421,7 +429,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -447,7 +455,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -552,7 +560,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -578,7 +586,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -604,7 +612,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -630,7 +638,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -656,7 +664,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -682,7 +690,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -708,7 +716,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -737,7 +745,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -766,7 +774,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -795,7 +803,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -821,7 +829,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -847,7 +855,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -873,7 +881,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -899,7 +907,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -925,7 +933,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -951,7 +959,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -977,7 +985,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -1003,7 +1011,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -1029,7 +1037,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -1067,7 +1075,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: &all_interfaces
@@ -1118,7 +1126,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: *all_interfaces
network_config: *all_network_config
@@ -1144,7 +1152,7 @@
device: cdrom
bus: ide
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
+ cloudinit_user_data: *cloudinit_user_data_1604_hwe
interfaces: *all_interfaces
network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/run_test.sh b/tcp_tests/templates/cookied-model-generator/run_test.sh
new file mode 100755
index 0000000..213afea
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/run_test.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. /home/jenkins/fuel-devops30/bin/activate
+
+export ENV_NAME=model-generator
+export VENV_PATH=/home/jenkins/fuel-devops30
+# Image must be uploaded to the storage pool
+export CFG01_DAY01_VOLUME_NAME=cfg01-day01.qcow2
+export SHUTDOWN_ENV_ON_TEARDOWN=true
+export PYTHONIOENCODING=UTF-8
+export REPOSITORY_SUITE=testing
+
+export LAB_CONFIG_NAME=cookied-model-generator
+export LAB_CONTEXT_NAME=cookied-mcp-ocata-dop-sl2
+#export LAB_CONTEXT_NAME=cookied-mcp-ocata-dvr-vxlan
+#export LAB_CONTEXT_NAME=cookied-bm-mcp-dvr-vxlan
+#export LAB_CONTEXT_NAME=cookied-bm-mcp-ocata-contrail
+
+export DOMAIN_NAME=${LAB_CONTEXT_NAME}.local
+export SALT_STEPS_PATH=templates/${LAB_CONFIG_NAME}/salt_${LAB_CONTEXT_NAME}.yaml
+
+export TEST_GROUP=test_generate_model
+
+export STORAGE_POOL_NAME=second_pool
+
+export MAKE_SNAPSHOT_STAGES=false
+
+dos.py erase ${ENV_NAME}
+
+cd tcp_tests
+py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
new file mode 100644
index 0000000..24a6c8b
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-dvr-vxlan.yaml
@@ -0,0 +1,63 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-dvr-vxlan' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','cookied-bm-mcp-dvr-vxlan') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-lab03-environment.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Remove rack01 key
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+
+ # Set ipaddresses for our nodes
+ reclass-tools add-key parameters._param.openstack_compute_node01_control_address 10.167.4.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_control_address 10.167.4.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node01_tenant_address 10.167.6.3 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools add-key parameters._param.openstack_compute_node02_tenant_address 10.167.6.31 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+#- description: Refresh pillars and install dependencies for salt-master
+# cmd: |
+# salt '*' saltutil.refresh_pillar; sleep 5;
+# salt '*' state.sls salt.master;
+# salt '*' saltutil.refresh_pillar; sleep 5;
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
new file mode 100644
index 0000000..4da1add
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ocata-contrail.yaml
@@ -0,0 +1,64 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-bm-mcp-ocata-contrail' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME','physical-cookied-bm-mcp-ocata-contrail') %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-contrail-dpdk.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml','lab04-physical-inventory.yaml'] %}
+{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2422') %}
+{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2423') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: Temporary WR for cinder backend defined by default in reclass.system
+ cmd: |
+ sed -i 's/backend\:\ {}//g' /srv/salt/reclass/classes/system/cinder/control/cluster.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: "Workaround for rack01 compute generator"
+ cmd: |
+ set -e;
+ . /root/venv-reclass-tools/bin/activate;
+ # Remove rack01 key
+ reclass-tools del-key parameters.reclass.storage.node.openstack_compute_rack01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add openstack_compute_node definition from system
+ reclass-tools add-key 'classes' 'system.reclass.storage.system.openstack_compute_multi' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml --merge;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
+
+- description: "Workaround for PROD-14060"
+ cmd: |
+ set -e;
+ # Add tenant and single addresses for computes
+ salt-call reclass.cluster_meta_set deploy_address 172.16.49.72 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set tenant_address 192.168.0.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set single_address 10.167.8.101 /srv/salt/reclass/nodes/_generated/cmp001.cookied-bm-mcp-ocata-contrail.local.yml
+
+ salt-call reclass.cluster_meta_set deploy_address 172.16.49.74 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set tenant_address 192.168.0.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+ salt-call reclass.cluster_meta_set single_address 10.167.8.102 /srv/salt/reclass/nodes/_generated/cmp002.cookied-bm-mcp-ocata-contrail.local.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
new file mode 100644
index 0000000..d1693c1
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dop-sl2.yaml
@@ -0,0 +1,44 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dop-sl2' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-cicd_oss.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
new file mode 100644
index 0000000..d404b3a
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr-vxlan.yaml
@@ -0,0 +1,17 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-ocata-dvr-vxlan' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = 'salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
new file mode 100644
index 0000000..bb17c15
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-dvr.yaml
@@ -0,0 +1,50 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
new file mode 100644
index 0000000..b711673
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-ocata-ovs.yaml
@@ -0,0 +1,44 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-ocata-ovs' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-ocata-ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..c823df7
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-dvr.yaml
@@ -0,0 +1,50 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
new file mode 100644
index 0000000..47427e3
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-pike-ovs.yaml
@@ -0,0 +1,44 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% set LAB_CONFIG_NAME = 'virtual-mcp-pike-ovs' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-pike-ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--meta-data.yaml b/tcp_tests/templates/cookied-model-generator/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..c871146
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/underlay--user-data-cfg01.yaml
@@ -0,0 +1,98 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Enable grub menu using updated config below
+ - update-grub
+
+ # Create swap
+ #- fallocate -l 16G /swapfile
+ #- chmod 600 /swapfile
+ #- mkswap /swapfile
+ #- swapon /swapfile
+ #- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+
+ - apt-get clean
+ - apt-get update
+
+ # Ensure that the salt-master service is ready to receive requests
+ - salt-key -y -D
+ - service salt-master restart
+ - service salt-minion restart
+ - apt-get install -y salt-formula-*
+ - for f in $(ls -1 /usr/share/salt-formulas/reclass/service); do ln -s /usr/share/salt-formulas/reclass/service/$f /srv/salt/reclass/classes/service/ || true; done
+ - salt-call --timeout=180 test.ping
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
+
+ # Fix hardcoded IP address in the minion.conf
+ - path: /etc/salt/minion.d/minion.conf
+ content: |
+ master: 127.0.0.1
+ id: cfg01.{{ DOMAIN_NAME }}
diff --git a/tcp_tests/templates/cookied-model-generator/underlay.yaml b/tcp_tests/templates/cookied-model-generator/underlay.yaml
new file mode 100644
index 0000000..e90a8c8
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/underlay.yaml
@@ -0,0 +1,86 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-model-generator') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01') %}
+
+{% import 'cookied-model-generator/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-model-generator/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', LAB_CONFIG_NAME + '_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +15
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: {{ os_env('CONNECTION_STRING', 'qemu:///system') }}
+ storage_pool_name: {{ os_env('STORAGE_POOL_NAME', 'default') }}
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: {{ os_env('DRIVER_USE_HOST_CPU', true) }}
+ use_hugepages: {{ os_env('DRIVER_USE_HUGEPAGES', false) }}
+
+ network_pools:
+ admin: admin-pool01
+
+ l2_network_devices:
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: {{ os_env('CFG_NODE_CPU', 4) }}
+ memory: {{ os_env('CFG_NODE_MEMORY', 4096) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CFG_NODE_VOLUME_SIZE', 150) }}
+ shared_backing_store_name: {{ os_env('CFG01_DAY01_VOLUME_NAME') }}
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
diff --git a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
index dcc2933..3afdf7f 100644
--- a/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
+++ b/tcp_tests/templates/ironic_standalone/underlay--user-data-ironic.yaml
@@ -89,7 +89,7 @@
- chmod a+r /httpboot/coreos_production_pxe*
- echo "Download ubuntu cloudinit image"
- - wget https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img -O /httpboot/xenial-server-cloudimg-amd64.qcow2
+ - wget {{ os_env('IMAGE_URL1604', 'https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img') }} -O /httpboot/xenial-server-cloudimg-amd64.qcow2
########################################################
# Node is ready, allow SSH access
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
index 89eef69..8802687 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/openstack.yaml
@@ -72,7 +72,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -198,7 +198,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
index 37e3ad4..4927c4f 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 15}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
index d7d2b24..ba4afdd 100644
--- a/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
+++ b/tcp_tests/templates/mcp-ocata-local-repo-dvr/underlay.yaml
@@ -149,6 +149,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_APT01 }}
@@ -251,7 +257,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -292,7 +298,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -321,7 +327,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -350,7 +356,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -376,7 +382,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -402,7 +408,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -428,7 +434,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
diff --git a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
index 4d0ce7b..441b300 100644
--- a/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
+++ b/tcp_tests/templates/mk24_lab_ovs_dvr_vlan_bm/openstack.yaml
@@ -159,7 +159,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt
new file mode 100644
index 0000000..a3297a8
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/Readme.txt
@@ -0,0 +1 @@
+PoC templates. Do not use!
\ No newline at end of file
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml
new file mode 100644
index 0000000..c782dfa
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/common-services.yaml
@@ -0,0 +1,16 @@
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+
+- description: Approve cfg01 ssh key for jenkins user
+ cmd: mkdir -p /var/lib/jenkins/.ssh && ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && chown jenkins /var/lib/jenkins/.ssh/known_hosts
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+
+- description: Install jq for parse json output
+ cmd: apt install -y jq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
new file mode 100644
index 0000000..745df96
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/openstack.yaml
@@ -0,0 +1,161 @@
+{% from 'physical_mcp11_ovs_dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
new file mode 100644
index 0000000..04185ea
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -0,0 +1,26 @@
+{% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+# {% from 'physical-mcp-ocata-offline-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+# Environment model name stored in https://github.com/Mirantis/tcp-qa/tree/master/tcp_tests/environments
+# {% set ENVIRONMENT_MODEL_NAME = os_env('ENVIRONMENT_MODEL_NAME','physical-mcp-ocata-offline-ovs') %}
+
+# {% import 'shared-salt.yaml' as SHARED with context %}
+
+- description: Wait while a salt-minion is started
+ cmd: timeout 90s bash -c 'while ! systemctl is-active salt-minion; do sleep 10; echo salt-minion isnt run; done'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Refresh pillars on master node
+ cmd: sleep 90; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Sync all salt resources on master node
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
new file mode 100644
index 0000000..6978bd3
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
@@ -0,0 +1,62 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - cd /root/config-drive && /bin/bash -xe ./user-data
+
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6978bd3
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,62 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Mount config drive
+ - mkdir /root/config-drive
+ - mount /dev/sr0 /root/config-drive
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ # Run user data script from config drive
+ - cd /root/config-drive && /bin/bash -xe ./user-data
+
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
new file mode 100644
index 0000000..aab7cde
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
@@ -0,0 +1,79 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ #- sudo ifup eth0
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - apt-get install linux-generic-hwe-16.04 -y
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+
+ auto {interface_name}
+ iface {interface_name} inet dhcp
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
new file mode 100644
index 0000000..ca5c171
--- /dev/null
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -0,0 +1,378 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'physical-mcp-ocata-offline-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'physical-mcp-ocata-offline-ovs') %}
+#{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'offline-ocata-vxlan.local') %}
+{% set HOSTNAME_APT = os_env('HOSTNAME_CFG01', 'apt.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_APT01', 'cfg01.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM04', 'kvm04.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM05', 'kvm05.' + DOMAIN_NAME) %}
+{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM06', 'kvm06.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW03 = os_env('HOSTNAME_GTW03', 'gtw03.' + DOMAIN_NAME) %}
+
+{% set HOSTNAME_CTL = os_env('HOSTNAME_CTL', 'ctl.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS = os_env('HOSTNAME_DBS', 'dbs.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS01 = os_env('HOSTNAME_DBS01', 'dbs01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS02 = os_env('HOSTNAME_DBS02', 'dbs02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DBS03 = os_env('HOSTNAME_DBS03', 'dbs03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG = os_env('HOSTNAME_MSG', 'msg.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG01 = os_env('HOSTNAME_MSG01', 'msg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG02 = os_env('HOSTNAME_MSG02', 'msg02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MSG03 = os_env('HOSTNAME_MSG03', 'msg03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON = os_env('HOSTNAME_MON', 'mon.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG = os_env('HOSTNAME_LOG', 'log.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR = os_env('HOSTNAME_MTR', 'mtr.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX02 = os_env('HOSTNAME_PRX02', 'prx02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX03 = os_env('HOSTNAME_PRX03', 'prx03.' + DOMAIN_NAME) %}
+
+
+
+{% set ETH0_IP_ADDRESS_APT = os_env('ETH0_IP_ADDRESS_ATP', '10.10.0.14') %}
+{% set ETH0_IP_ADDRESS_CFG01 = os_env('ETH0_IP_ADDRESS_CFG01', '10.10.0.15') %}
+{% set ETH0_IP_ADDRESS_PRX01 = os_env('ETH0_IP_ADDRESS_PRX01', '10.10.0.11') %}
+{% set ETH0_IP_ADDRESS_PRX02 = os_env('ETH0_IP_ADDRESS_PRX02', '10.10.0.12') %}
+{% set ETH0_IP_ADDRESS_PRX03 = os_env('ETH0_IP_ADDRESS_PRX03', '10.10.0.13') %}
+{% set ETH0_IP_ADDRESS_CTL = os_env('ETH0_IP_ADDRESS_CTL', '10.10.0.10') %}
+{% set ETH0_IP_ADDRESS_CTL01 = os_env('ETH0_IP_ADDRESS_CTL01', '10.10.0.11') %}
+{% set ETH0_IP_ADDRESS_CTL02 = os_env('ETH0_IP_ADDRESS_CTL02', '10.10.0.12') %}
+{% set ETH0_IP_ADDRESS_CTL03 = os_env('ETH0_IP_ADDRESS_CTL03', '10.10.0.13') %}
+{% set ETH0_IP_ADDRESS_MSG = os_env('ETH0_IP_ADDRESS_MSG', '10.10.0.40') %}
+{% set ETH0_IP_ADDRESS_MSG01 = os_env('ETH0_IP_ADDRESS_MSG01', '10.10.0.41') %}
+{% set ETH0_IP_ADDRESS_MSG02 = os_env('ETH0_IP_ADDRESS_MSG02', '10.10.0.42') %}
+{% set ETH0_IP_ADDRESS_MSG03 = os_env('ETH0_IP_ADDRESS_MSG03', '10.10.0.43') %}
+{% set ETH0_IP_ADDRESS_DBS = os_env('ETH0_IP_ADDRESS_DBS', '10.10.0.50') %}
+{% set ETH0_IP_ADDRESS_DBS01 = os_env('ETH0_IP_ADDRESS_DBS01', '10.10.0.51') %}
+{% set ETH0_IP_ADDRESS_DBS02 = os_env('ETH0_IP_ADDRESS_DBS02', '10.10.0.52') %}
+{% set ETH0_IP_ADDRESS_DBS03 = os_env('ETH0_IP_ADDRESS_DBS03', '10.10.0.53') %}
+{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '10.10.0.241') %}
+{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '10.10.0.242') %}
+{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '10.10.0.243') %}
+{% set ETH0_IP_ADDRESS_KVM04 = os_env('ETH0_IP_ADDRESS_KVM04', '10.10.0.244') %}
+{% set ETH0_IP_ADDRESS_KVM05 = os_env('ETH0_IP_ADDRESS_KVM05', '10.10.0.245') %}
+{% set ETH0_IP_ADDRESS_KVM06 = os_env('ETH0_IP_ADDRESS_KVM06', '10.10.0.246') %}
+{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '10.10.0.101') %}
+{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '10.10.0.102') %}
+{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '10.10.0.224') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.225') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '10.10.0.226') %}
+
+
+{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '10.11.0.15') %}
+{% set ETH1_IP_ADDRESS_PRX01 = os_env('ETH1_IP_ADDRESS_PRX01', '10.11.0.11') %}
+{% set ETH1_IP_ADDRESS_PRX02 = os_env('ETH1_IP_ADDRESS_PRX02', '10.11.0.12') %}
+{% set ETH1_IP_ADDRESS_PRX03 = os_env('ETH1_IP_ADDRESS_PRX03', '10.11.0.13') %}
+{% set ETH1_IP_ADDRESS_CTL = os_env('ETH1_IP_ADDRESS_CTL', '10.11.0.10') %}
+{% set ETH1_IP_ADDRESS_CTL01 = os_env('ETH1_IP_ADDRESS_CTL01', '10.11.0.11') %}
+{% set ETH1_IP_ADDRESS_CTL02 = os_env('ETH1_IP_ADDRESS_CTL02', '10.11.0.12') %}
+{% set ETH1_IP_ADDRESS_CTL03 = os_env('ETH1_IP_ADDRESS_CTL03', '10.11.0.13') %}
+{% set ETH1_IP_ADDRESS_MSG = os_env('ETH1_IP_ADDRESS_MSG', '10.11.0.40') %}
+{% set ETH1_IP_ADDRESS_MSG01 = os_env('ETH1_IP_ADDRESS_MSG01', '10.11.0.41') %}
+{% set ETH1_IP_ADDRESS_MSG02 = os_env('ETH1_IP_ADDRESS_MSG02', '10.11.0.42') %}
+{% set ETH1_IP_ADDRESS_MSG03 = os_env('ETH1_IP_ADDRESS_MSG03', '10.11.0.43') %}
+{% set ETH1_IP_ADDRESS_DBS = os_env('ETH1_IP_ADDRESS_DBS', '10.11.0.50') %}
+{% set ETH1_IP_ADDRESS_DBS01 = os_env('ETH1_IP_ADDRESS_DBS01', '10.11.0.51') %}
+{% set ETH1_IP_ADDRESS_DBS02 = os_env('ETH1_IP_ADDRESS_DBS02', '10.11.0.52') %}
+{% set ETH1_IP_ADDRESS_DBS03 = os_env('ETH1_IP_ADDRESS_DBS03', '10.11.0.53') %}
+{% set ETH1_IP_ADDRESS_KVM01 = os_env('ETH1_IP_ADDRESS_KVM01', '10.11.0.241') %}
+{% set ETH1_IP_ADDRESS_KVM02 = os_env('ETH1_IP_ADDRESS_KVM02', '10.11.0.242') %}
+{% set ETH1_IP_ADDRESS_KVM03 = os_env('ETH1_IP_ADDRESS_KVM03', '10.11.0.243') %}
+{% set ETH1_IP_ADDRESS_KVM04 = os_env('ETH1_IP_ADDRESS_KVM04', '10.11.0.244') %}
+{% set ETH1_IP_ADDRESS_KVM05 = os_env('ETH1_IP_ADDRESS_KVM05', '10.11.0.245') %}
+{% set ETH1_IP_ADDRESS_KVM06 = os_env('ETH1_IP_ADDRESS_KVM06', '10.11.0.246') %}
+{% set ETH1_IP_ADDRESS_CMP001 = os_env('ETH1_IP_ADDRESS_CMP001', '10.11.0.101') %}
+{% set ETH1_IP_ADDRESS_CMP002 = os_env('ETH1_IP_ADDRESS_CMP002', '10.11.0.102') %}
+{% set ETH1_IP_ADDRESS_GTW01 = os_env('ETH1_IP_ADDRESS_GTW01', '10.11.0.224') %}
+{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.225') %}
+{% set ETH1_IP_ADDRESS_GTW02 = os_env('ETH1_IP_ADDRESS_GTW02', '10.11.0.226') %}
+
+{% set ETH2_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.44.33') %}
+
+{% set ETH0_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH0_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+{% set ETH1_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH1_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+{% set ETH2_IP_ADDRESS_CFG01_PREFIX = '.'.join(ETH2_IP_ADDRESS_CFG01.split('.')[0:3]) %}
+
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'phy-mcp-ocata-offline-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ managment-pool01:
+ net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH1_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH1_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH1_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH1_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_KVM04 }}: {{ ETH1_IP_ADDRESS_KVM04 }}
+ default_{{ HOSTNAME_KVM05 }}: {{ ETH1_IP_ADDRESS_KVM05 }}
+ default_{{ HOSTNAME_KVM06 }}: {{ ETH1_IP_ADDRESS_KVM06 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH1_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH1_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_GTW01 }}: {{ ETH1_IP_ADDRESS_GTW01 }}
+ default_{{ HOSTNAME_GTW02 }}: {{ ETH1_IP_ADDRESS_GTW02 }}
+ default_{{ HOSTNAME_CTL }}: {{ ETH1_IP_ADDRESS_CTL }}
+ default_{{ HOSTNAME_CTL01 }}: {{ ETH1_IP_ADDRESS_CTL02 }}
+ default_{{ HOSTNAME_CTL02 }}: {{ ETH1_IP_ADDRESS_CTL03 }}
+ default_{{ HOSTNAME_CTL03 }}: {{ ETH1_IP_ADDRESS_CTL04 }}
+ default_{{ HOSTNAME_MSG }}: {{ ETH1_IP_ADDRESS_MSG }}
+ default_{{ HOSTNAME_MSG01 }}: {{ ETH1_IP_ADDRESS_MSG02 }}
+ default_{{ HOSTNAME_MSG02 }}: {{ ETH1_IP_ADDRESS_MSG03 }}
+ default_{{ HOSTNAME_MSG03 }}: {{ ETH1_IP_ADDRESS_MSG04 }}
+ default_{{ HOSTNAME_MON }}: {{ ETH1_IP_ADDRESS_MON }}
+ default_{{ HOSTNAME_MON01 }}: {{ ETH1_IP_ADDRESS_MON01 }}
+ default_{{ HOSTNAME_MON02 }}: {{ ETH1_IP_ADDRESS_MON02 }}
+ default_{{ HOSTNAME_MON03 }}: {{ ETH1_IP_ADDRESS_MON03 }}
+ default_{{ HOSTNAME_DBS }}: {{ ETH1_IP_ADDRESS_DBS }}
+ default_{{ HOSTNAME_DBS01 }}: {{ ETH1_IP_ADDRESS_DBS02 }}
+ default_{{ HOSTNAME_DBS02 }}: {{ ETH1_IP_ADDRESS_DBS03 }}
+ default_{{ HOSTNAME_DBS03 }}: {{ ETH1_IP_ADDRESS_DBS04 }}
+ default_{{ HOSTNAME_LOG }}: {{ ETH1_IP_ADDRESS_LOG }}
+ default_{{ HOSTNAME_LOG01 }}: {{ ETH1_IP_ADDRESS_LOG02 }}
+ default_{{ HOSTNAME_LOG02 }}: {{ ETH1_IP_ADDRESS_LOG03 }}
+ default_{{ HOSTNAME_LOG03 }}: {{ ETH1_IP_ADDRESS_LOG04 }}
+ default_{{ HOSTNAME_MTR }}: {{ ETH1_IP_ADDRESS_MTR }}
+ default_{{ HOSTNAME_MTR01 }}: {{ ETH1_IP_ADDRESS_MTR02 }}
+ default_{{ HOSTNAME_MTR02 }}: {{ ETH1_IP_ADDRESS_MTR03 }}
+ default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
+
+ admin-pool01:
+ net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/24:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_APT }}: {{ ETH0_IP_ADDRESS_APT }}
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH0_IP_ADDRESS_CFG01 }}
+ default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
+ default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
+ default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
+ default_{{ HOSTNAME_KVM04 }}: {{ ETH0_IP_ADDRESS_KVM04 }}
+ default_{{ HOSTNAME_KVM05 }}: {{ ETH0_IP_ADDRESS_KVM05 }}
+ default_{{ HOSTNAME_KVM06 }}: {{ ETH0_IP_ADDRESS_KVM06 }}
+ default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
+ default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
+ default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
+ default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ default_{{ HOSTNAME_CTL }}: {{ ETH0_IP_ADDRESS_CTL }}
+ default_{{ HOSTNAME_CTL01 }}: {{ ETH0_IP_ADDRESS_CTL02 }}
+ default_{{ HOSTNAME_CTL02 }}: {{ ETH0_IP_ADDRESS_CTL03 }}
+ default_{{ HOSTNAME_CTL03 }}: {{ ETH0_IP_ADDRESS_CTL04 }}
+ default_{{ HOSTNAME_MSG }}: {{ ETH0_IP_ADDRESS_MSG }}
+ default_{{ HOSTNAME_MSG01 }}: {{ ETH0_IP_ADDRESS_MSG02 }}
+ default_{{ HOSTNAME_MSG02 }}: {{ ETH0_IP_ADDRESS_MSG03 }}
+ default_{{ HOSTNAME_MSG03 }}: {{ ETH0_IP_ADDRESS_MSG04 }}
+ default_{{ HOSTNAME_MON }}: {{ ETH0_IP_ADDRESS_MON }}
+ default_{{ HOSTNAME_MON01 }}: {{ ETH0_IP_ADDRESS_MON01 }}
+ default_{{ HOSTNAME_MON02 }}: {{ ETH0_IP_ADDRESS_MON02 }}
+ default_{{ HOSTNAME_MON03 }}: {{ ETH0_IP_ADDRESS_MON03 }}
+ default_{{ HOSTNAME_DBS }}: {{ ETH0_IP_ADDRESS_DBS }}
+ default_{{ HOSTNAME_DBS01 }}: {{ ETH0_IP_ADDRESS_DBS02 }}
+ default_{{ HOSTNAME_DBS02 }}: {{ ETH0_IP_ADDRESS_DBS03 }}
+ default_{{ HOSTNAME_DBS03 }}: {{ ETH0_IP_ADDRESS_DBS04 }}
+ default_{{ HOSTNAME_LOG }}: {{ ETH0_IP_ADDRESS_LOG }}
+ default_{{ HOSTNAME_LOG01 }}: {{ ETH0_IP_ADDRESS_LOG02 }}
+ default_{{ HOSTNAME_LOG02 }}: {{ ETH0_IP_ADDRESS_LOG03 }}
+ default_{{ HOSTNAME_LOG03 }}: {{ ETH0_IP_ADDRESS_LOG04 }}
+ default_{{ HOSTNAME_MTR }}: {{ ETH0_IP_ADDRESS_MTR }}
+ default_{{ HOSTNAME_MTR01 }}: {{ ETH0_IP_ADDRESS_MTR02 }}
+ default_{{ HOSTNAME_MTR02 }}: {{ ETH0_IP_ADDRESS_MTR03 }}
+ default_{{ HOSTNAME_MTR03 }}: {{ ETH0_IP_ADDRESS_MTR04 }}
+
+
+ public-pool01:
+ net: {{ os_env('PUBLIC_ADDRESS_POOL01', '172.16.44.0/22:22') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: {{ ETH2_IP_ADDRESS_CFG01 }}
+
+
+ groups:
+
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ admin: admin-pool01
+ managment: managment-pool01
+ public: public-pool01
+
+ l2_network_devices:
+ # Ironic management interface
+ managment:
+ address_pool: managment-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env ADMIN_BRIDGE
+
+ admin: # deploy
+ address_pool: admin-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env DEPLOY_BRIDGE
+
+ public:
+ address_pool: public-pool01
+ dhcp: false
+ forward:
+ mode: bridge
+ parent_iface:
+ phys_dev: !os_env PUBLIC_BRIDGE
+
+ #admin:
+ # address_pool: admin-pool01
+ # dhcp: true
+
+ #group_volumes:
+ #
+ #
+ #
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ shared_backing_store_name: !os_env CFG01_VOLUME_NAME
+ format: qcow2
+ - name: config
+ format: raw
+ device: cdrom
+ bus: ide
+ source_image: !os_env CFG01_CONFIG_PATH
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin # deploy
+ interface_model: *interface_model
+
+ - label: ens4
+ l2_network_device: managment
+ interface_model: *interface_model
+
+ - label: ens5
+ l2_network_device: public
+ interface_model: *interface_model
+
+ #- label: ens6
+ # l2_network_device: admin
+ # interface_model: *interface_model
+
+
+ network_config:
+ ens3:
+ networks:
+ - admin # deploy
+ ens4:
+ networks:
+ - managment
+ ens5:
+ networks:
+ - public
+ #ens6:
+ # networks:
+ # - admin
+
+ - name: {{ HOSTNAME_APT }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 550
+ shared_backing_store_name: !os_env APT_VOLUME_NAME
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ # capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ source_image: !os_env APT_CONFIG_PATH
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin # deploy
+ interface_model: *interface_model
+ #- label: ens4
+ # l2_network_device: admin
+ # interface_model: *interface_model
+
+ network_config:
+ ens3:
+ networks:
+ - admin
+ #ens4:
+ # networks:
+ # - admin
+
diff --git a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
index ef6273e..e01ee0f 100644
--- a/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
+++ b/tcp_tests/templates/physical_mcp11_dvr/openstack.yaml
@@ -46,7 +46,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -159,7 +159,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
index 745df96..83674b2 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/openstack.yaml
@@ -46,7 +46,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -151,7 +151,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
new file mode 100644
index 0000000..2c664a8
--- /dev/null
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml
@@ -0,0 +1,116 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ #- sudo ifup eth0
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ ########################################################
+ # Node is ready, allow SSH access
+ #- echo "Allow SSH access ..."
+ #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - apt-get install {{ os_env('LINUX_KERNEL_HWE_PACKAGE_NAME', 'linux-image-extra-4.10.0-42-generic') }} -y
+ - reboot
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ # The loopback network interface
+ auto lo
+ iface lo inet loopback
+
+ auto {interface_name}
+ iface {interface_name} inet dhcp
+
+ - path: /etc/udev/rules.d/70-persistent-net.rules
+ owner: root:root
+ permissions: '0644'
+ content: |
+ # kvm01
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM01') }}", NAME="enp2s0f1"
+ # kvm02
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM02') }}", NAME="enp2s0f1"
+ # kvm03
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_KVM03') }}", NAME="eno1"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_KVM03') }}", NAME="eno2"
+ # cmp001
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP001') }}", NAME="enp3s0f1"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f1"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP001') }}", NAME="enp5s0f2"
+ # cmp002
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_CMP002') }}", NAME="eno1"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_CMP002') }}", NAME="eth0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH2_MAC_ADDRESS_CMP002') }}", NAME="eth3"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH3_MAC_ADDRESS_CMP002') }}", NAME="eth2"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH4_MAC_ADDRESS_CMP002') }}", NAME="eth4"
+ # gtw01
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f0"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW01') }}", NAME="enp3s0f1"
+ # gtw02
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH0_MAC_ADDRESS_GTW02') }}", NAME="eno1"
+ SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{{ '{{address}}' }}=="{{ os_env('ETH1_MAC_ADDRESS_GTW02') }}", NAME="eno2"
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
index 9c7d6a3..fa175b2 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay--user-data.yaml
@@ -67,10 +67,8 @@
########################################################
# Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- - apt-get install linux-generic-hwe-16.04 -y
- - reboot
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
########################################################
write_files:
diff --git a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
index 38f33bd..0dfe7a4 100644
--- a/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
+++ b/tcp_tests/templates/physical_mcp11_ovs_dpdk/underlay.yaml
@@ -25,6 +25,7 @@
{% import 'physical_mcp11_ovs_dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
{% import 'physical_mcp11_ovs_dpdk/underlay--user-data.yaml' as CLOUDINIT_USER_DATA with context %}
+{% import 'physical_mcp11_ovs_dpdk/underlay--user-data-hwe.yaml' as CLOUDINIT_USER_DATA_HWE with context %}
---
aliases:
@@ -32,6 +33,7 @@
- &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data {{ CLOUDINIT_USER_DATA }}
+ - &cloudinit_user_data_hwe {{ CLOUDINIT_USER_DATA_HWE }}
template:
@@ -387,7 +389,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- label: enp3s0f0
@@ -445,7 +447,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- label: eno1
@@ -502,7 +504,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- label: enp3s0f0
@@ -549,7 +551,7 @@
# for store image with cloud-init metadata.
cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
+ cloudinit_user_data: *cloudinit_user_data_hwe
interfaces:
- label: eno1
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 2476555..e34e519 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -13,13 +13,13 @@
{% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/xenial ' + REPOSITORY_SUITE + ' salt') %}
+{% set FORMULA_REPOSITORY = os_env('FORMULA_REPOSITORY', 'deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME} ' + REPOSITORY_SUITE + ' salt extra') %}
{% set FORMULA_GPG = os_env('FORMULA_GPG', 'http://apt.mirantis.com/public.gpg') %}
-{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3 xenial main") %}
-{% set SALT_GPG = os_env('SALT_GPG', 'http://repo.saltstack.local.test/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub') %}
-{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial main universe restricted") %}
-{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-updates main universe restricted") %}
-{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb http://archive.ubuntu.com/ubuntu/ xenial-security main universe restricted") %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://apt.mirantis.com/${DISTRIB_CODENAME}/salt/2016.3 " + REPOSITORY_SUITE + " main") %}
+{% set SALT_GPG = os_env('SALT_GPG', 'http://apt.mirantis.com/public.gpg') %}
+{% set UBUNTU_REPOSITORY = os_env('UBUNTU_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME} main restricted universe") %}
+{% set UBUNTU_UPDATES_REPOSITORY = os_env('UBUNTU_UPDATES_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-updates main restricted universe") %}
+{% set UBUNTU_SECURITY_REPOSITORY = os_env('UBUNTU_SECURITY_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE + "/ubuntu/ ${DISTRIB_CODENAME}-security main restricted universe") %}
{# Address pools for reclass cluster model are taken in the following order:
# 1. environment variables,
@@ -54,6 +54,7 @@
cmd: |
rm -rf trusted* ;
rm -rf /etc/apt/sources.list ;
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
@@ -72,7 +73,26 @@
{%- macro MACRO_INSTALL_SALT_MASTER() %}
{######################################}
- description: Installing salt master on cfg01
- cmd: eatmydata apt-get install -y --allow-unauthenticated reclass git salt-master
+ cmd: |
+ which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ # Configure ubuntu and salt repositories
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
+ echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
+ echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
+ echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
+
+ echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
+ wget -O - {{ FORMULA_GPG }} | apt-key add -;
+ echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
+ wget -O - {{ SALT_GPG }} | apt-key add -;
+
+ apt-get clean
+ apt-get update
+
+ # Install salt-master and reclass
+ eatmydata apt-get install -y --allow-unauthenticated reclass salt-master
+ # Install common packages
+ eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
@@ -191,7 +211,7 @@
find {{ CLUSTER_PATH }} -type f -exec sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {} +
# Disable checkouting the model from remote repository
- cat << 'EOF' > /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml
+ cat << 'EOF' > /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml
classes:
- cluster.{{ CLUSTER_NAME }}.infra.config
parameters:
@@ -219,7 +239,7 @@
{%- macro MACRO_GENERATE_COOKIECUTTER_MODEL(IS_CONTRAIL_LAB=false, CONTROL_VLAN=None, TENANT_VLAN=None, CLUSTER_PRODUCT_MODELS='') %}
{###################################################################}
{%- set CLUSTER_CONTEXT_PATH = '/tmp/' + CLUSTER_CONTEXT_NAME %}
-- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to {{ HOSTNAME_CFG01 }}"
+- description: "[EXPERIMENTAL] Upload cookiecutter-templates context to cfg01.{{ DOMAIN_NAME }}"
upload:
local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
local_filename: {{ CLUSTER_CONTEXT_NAME }}
@@ -288,7 +308,7 @@
export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/salt-models/reclass-system /srv/salt/reclass/classes/system/
# Create the cfg01 node and disable checkouting the model from remote repository
- cat << 'EOF' > /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml
+ cat << 'EOF' > /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml
classes:
- system.openssh.server.team.all
- cluster.{{ CLUSTER_NAME }}.infra.config
@@ -329,7 +349,7 @@
{%- macro MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() %}
{########################################################}
-- description: "[EXPERIMENTAL] Clone 'environment-template' repository to {{ HOSTNAME_CFG01 }}."
+- description: "[EXPERIMENTAL] Clone 'environment-template' repository to cfg01.{{ DOMAIN_NAME }}"
cmd: |
set -e;
mkdir -p /tmp/environment/;
@@ -350,7 +370,7 @@
{%- endif %}
{%- for ENVIRONMENT_CONTEXT_NAME in ENVIRONMENT_CONTEXT_NAMES %}
-- description: "[EXPERIMENTAL] Upload environment inventory to {{ HOSTNAME_CFG01 }}"
+- description: "[EXPERIMENTAL] Upload environment inventory to cfg01.{{ DOMAIN_NAME }}"
upload:
local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
local_filename: {{ ENVIRONMENT_CONTEXT_NAME }}
@@ -369,9 +389,9 @@
reclass-tools del-key parameters.linux.network.interface /srv/salt/reclass/classes/system/;
reclass-tools del-key parameters.linux.network.interface /usr/share/salt-formulas/reclass/;
- if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/{{ HOSTNAME_CFG01 }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
- reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ;
- reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/_generated/{{ HOSTNAME_CFG01 }}.yml --merge ;
+ if ! reclass-tools get-key 'classes' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml | grep -q "environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}$"; then
+ reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}.reclass_datasource_local' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml --merge ;
+ reclass-tools add-key 'classes' 'environment.{{ ENVIRONMENT_MODEL_INVENTORY_NAME }}' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml --merge ;
fi;
node_name: {{ HOSTNAME_CFG01 }}
@@ -417,6 +437,7 @@
set -e;
FORMULA_PATH=${FORMULA_PATH:-/usr/share/salt-formulas};
which wget > /dev/null || (apt-get update; apt-get install -y wget);
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
echo "{{ FORMULA_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_salt.list;
wget -O - "{{ FORMULA_GPG }}" | apt-key add -;
apt-get clean; apt-get update;
@@ -442,10 +463,10 @@
retry: {count: 1, delay: 1}
skip_fail: false
-- description: "*Workaround* remove all cfg01 nodes except {{ HOSTNAME_CFG01 }} to not depend on other clusters in 'reclass --top'"
+- description: "*Workaround* remove all cfg01 nodes except cfg01.{{ DOMAIN_NAME }} to not depend on other clusters in 'reclass --top'"
cmd: |
- # Remove all other nodes except {{ HOSTNAME_CFG01 }} to not rely on them for 'reclass --top'
- find /srv/salt/reclass/nodes/ -type f -not -name {{ HOSTNAME_CFG01 }}.yml -delete
+ # Remove all other nodes except cfg01.{{ DOMAIN_NAME }} to not rely on them for 'reclass --top'
+ find /srv/salt/reclass/nodes/ -type f -not -name cfg01.{{ DOMAIN_NAME }}.yml -delete
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -482,8 +503,28 @@
id: {{ ssh['node_name'] }}
master: {{ config.salt.salt_master_host }}
EOF
+
+ # Configure ubuntu and salt repositories
+ which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ . /etc/lsb-release; # Get DISTRIB_CODENAME variable
+ echo "{{ UBUNTU_REPOSITORY }}" > /etc/apt/sources.list
+ echo "{{ UBUNTU_UPDATES_REPOSITORY }}" >> /etc/apt/sources.list
+ echo "{{ UBUNTU_SECURITY_REPOSITORY }}" >> /etc/apt/sources.list
+
+ echo "{{ SALT_REPOSITORY }}" > /etc/apt/sources.list.d/mcp_saltstack.list;
+ wget -O - {{ SALT_GPG }} | apt-key add -;
+
+ apt-get clean
+ apt-get update
+
+ # Install salt-minion
eatmydata apt-get install -y salt-minion;
- service salt-minion restart; # For case if salt-minion was already installed
+ # Install common packages
+ eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ # Restart salt-minion if it was already installed
+ service salt-minion restart
node_name: {{ ssh['node_name'] }}
retry: {count: 1, delay: 1}
skip_fail: false
@@ -599,8 +640,15 @@
skip_fail: false
{%- endmacro %}
-{%- macro MACRO_GENERATE_INVENTORY() %}
-{#####################################}
+{%- macro MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=false) %}
+{#################################################################}
+
+- description: Refresh pillars before generating nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
- description: Generate inventory for all the nodes to the /srv/salt/reclass/nodes/_generated
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' state.sls reclass
@@ -608,8 +656,19 @@
retry: {count: 1, delay: 5}
skip_fail: false
+{%- if RERUN_SALTMASTER_STATE %}
+- description: Regenerate salt.master states for the newly uploaded/created model
+ cmd: |
+ # Need to refresh installed formulas that are required in the model
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 5;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:master' state.sls salt.master;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+{%- endif %}
+
- description: Refresh pillars on all minions
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar; sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -635,6 +694,15 @@
{%- macro MACRO_BOOTSTRAP_ALL_MINIONS() %}
{########################################}
# Bootstrap all nodes
+
+- description: Get linux kernel version from all minions
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False --out=txt '*' cmd.run 'uname -r' | sort
+ salt --hard-crash --state-output=mixed --state-verbose=False --out=txt '*' cmd.run 'dpkg -l|grep "linux-image-.*-generic"' | sort
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
- description: Configure linux on other nodes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system' state.sls linux
node_name: {{ HOSTNAME_CFG01 }}
@@ -680,16 +748,6 @@
{%- macro MACRO_NETWORKING_WORKAROUNDS() %}
{#########################################}
-#- description: '*Workaround 1/2* of the bug PROD-9576 to get bond0-connectivity *without* rebooting nodes'
-# cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False cmd.run
-# "mkdir -p /tmp/PROD-9576; cd /tmp/PROD-9576; git clone https://gerrit.mcp.mirantis.net/salt-formulas/linux; cd linux;
-# git fetch https://gerrit.mcp.mirantis.net/salt-formulas/linux refs/changes/54/2354/16 && git checkout FETCH_HEAD;
-# cp -f linux/network/interface.sls /srv/salt/env/prd/linux/network/;
-# cp -f linux/map.jinja /srv/salt/env/prd/linux/;"
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
- description: '*Workaround: Load bonding module before call state.linux'
cmd: salt -C "I@linux:network:interface:*:type:bond" cmd.run 'modprobe bonding'
node_name: {{ HOSTNAME_CFG01 }}
@@ -706,8 +764,9 @@
{%- endmacro %}
+
{%- macro ADJUST_K8S_OPTS() %}
-{#########################################}
+{############################}
- description: Set k8s deploy parameters
cmd: |
@@ -722,8 +781,31 @@
{%- endmacro %}
+
+{%- macro ADJUST_SL_OPTS(OVERRIDES_FILENAME='') %}
+{#############################################}
+- description: Set SL docker images deploy parameters
+ cmd: |
+ {#- For cookiecutter-generated model, use overrides.yml from environment model instead of cluster model #}
+ {%- for sl_opt, value in config.sl_deploy.items() %}
+ {%- if value|string() %}
+ {%- if OVERRIDES_FILENAME %}
+ salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }} file_name={{ OVERRIDES_FILENAME }};
+ {%- else %}
+ salt-call reclass.cluster_meta_set name={{ sl_opt }} value={{ value }};
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ salt '*' saltutil.refresh_pillar;
+ sleep 10
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endmacro %}
+
+
{%- macro REGISTER_COMPUTE_NODES() %}
-{#########################################}
+{###################################}
{% for ssh in config.underlay.ssh %}
{% if ssh["node_name"].startswith("cmp") %}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
index ad3b628..8705250 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/ceph.yaml
@@ -39,8 +39,8 @@
- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
- if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt -C 'I@ceph:mgr' state.sls ceph.mgr
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
- description: Install radosgw if exists
cmd: |
- if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt -C 'I@keystone:client' state.sls keystone.client;
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -115,34 +115,34 @@
- description: Connect ceph to glance
cmd: |
- salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Connect ceph to cinder and nova
cmd: |
- salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart cinder volume
cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart nova-compute
cmd: |
- salt -C 'I@nova:compute' service.restart nova-compute;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
index 233f384..1783d6d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
index 34760e5..239d8ba 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ceph-offline/underlay.yaml
@@ -172,6 +172,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_APT01 }}
@@ -274,7 +280,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -315,7 +321,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -344,7 +350,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
new file mode 100644
index 0000000..25422ec
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-cookiecutter-mcp-ocata-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-ocata-dvr.local
+ cluster_name: virtual-mcp-ocata-dvr
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
new file mode 100644
index 0000000..9a04b68
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - features_designate_pool_manager_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ dns01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node01_address}
+
+ dns02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
index a06088a..ed0ee59 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/openstack.yaml
@@ -74,7 +74,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -200,7 +200,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
index 689dcf4..c8204a8 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/salt.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
- description: "Workaround for PROD-14831 , add 'dns' role to cmp01 and cmp02 nodes"
cmd: |
set -e;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
index c5c710a..5795a25 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 15}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
index 6448211..a73ca23 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,37 +42,18 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
+ # Enable grub menu using updated config below
+ - update-grub
write_files:
+ - path: /etc/default/grub.d/97-enable-grub-menu.cfg
+ content: |
+ GRUB_RECORDFAIL_TIMEOUT=30
+ GRUB_TIMEOUT=3
+ GRUB_TIMEOUT_STYLE=menu
+
- path: /etc/network/interfaces
content: |
auto ens3
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
index d52da6a..c6037b8 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-dvr/underlay.yaml
@@ -170,6 +170,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
@@ -222,7 +228,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -263,7 +269,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -292,7 +298,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -321,7 +327,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -347,7 +353,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -373,7 +379,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -399,7 +405,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -532,7 +538,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -558,7 +564,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
index 17450c6..25ec2f0 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/ceph.yaml
@@ -39,8 +39,8 @@
- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
- if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt -C 'I@ceph:mgr' state.sls ceph.mgr
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
- description: Install radosgw if exists
cmd: |
- if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt -C 'I@keystone:client' state.sls keystone.client;
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -115,34 +115,34 @@
- description: Connect ceph to glance
cmd: |
- salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Connect ceph to cinder and nova
cmd: |
- salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart cinder volume
cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart nova-compute
cmd: |
- salt -C 'I@nova:compute' service.restart nova-compute;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
index 617601a..f35c749 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
index 7486632..8b4a50e 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs-ceph/underlay.yaml
@@ -170,6 +170,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
@@ -222,7 +228,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -263,7 +269,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -292,7 +298,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -457,7 +463,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
new file mode 100644
index 0000000..d2aded3
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-cookiecutter-mcp-ocata-ovs.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-ocata-ovs.local
+ cluster_name: virtual-mcp-ocata-ovs
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: U1hx5V31VJfFFBu8fCsk9ebDN2TwuBABTIcptYQ8tmFSlhSxHIkKnJnDsnckgKnH
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: ocata
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: PGah7Ph3IdWuMdAX3ZBLSf5BtlBG1Qhl
+ salt_api_password_hash: $6$kgvztcjH$9B2950AyxRjE2Tj5QNVCnvdrgaFo/u6c59pMoQPqfxs2MTLLU7ywxPTQnDH3cNV.BBEK6FilF9SulWfIfENou0
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
new file mode 100644
index 0000000..18a87be
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/_context-environment.yaml
@@ -0,0 +1,141 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - features_designate_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
index 0950a68..a82a5f6 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -179,7 +179,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
index 5f5064f..11fb74b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/salt.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
index 688f70d..c11350b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/sl.yaml
@@ -20,102 +20,102 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters
- cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -124,44 +124,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
index fccb69d..081bc31 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-ocata-ovs/underlay.yaml
@@ -160,6 +160,12 @@
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for VCP nodes initially based on kvm nodes.
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
+ format: qcow2
nodes:
- name: {{ HOSTNAME_CFG01 }}
@@ -212,7 +218,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -253,7 +259,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -282,7 +288,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
@@ -311,7 +317,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -337,7 +343,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -363,7 +369,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
@@ -389,7 +395,7 @@
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
+ backing_store: mcp_ubuntu_1604_image
format: qcow2
- name: cinder
capacity: 50
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 3c772ec..8b05b63 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -38,8 +38,8 @@
- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
- if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt -C 'I@ceph:mgr' state.sls ceph.mgr
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -96,9 +96,9 @@
- description: Install radosgw if exists
cmd: |
- salt -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt -C 'I@keystone:client' state.sls keystone.client;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
@@ -121,34 +121,34 @@
- description: Connect ceph to glance
cmd: |
- salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Connect ceph to cinder and nova
cmd: |
- salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart cinder volume
cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart nova-compute
cmd: |
- salt -C 'I@nova:compute' service.restart nova-compute;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index b9bfb8d..f4903a9 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
new file mode 100644
index 0000000..3802235
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-cookiecutter-mcp-pike-dvr.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-pike-dvr.local
+ cluster_name: virtual-mcp-pike-dvr
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
new file mode 100644
index 0000000..9a04b68
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/_context-environment.yaml
@@ -0,0 +1,165 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - features_designate_pool_manager_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_pool_manager_database
+ - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ dns01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node01
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node01_address}
+
+ dns02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_dns_node02
+ roles:
+ - features_designate_pool_manager_dns
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+ single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index f9a4127..45ededb 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -74,7 +74,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -200,7 +200,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index a7c06dd..51a2203 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
index 04b5ca7..988fe96 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
@@ -18,105 +18,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -125,44 +125,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 15}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 94bce47..b602748 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -39,8 +39,8 @@
- description: Install ceph mgr if defined(needed only for Luminious)
cmd: |
- if salt -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
- salt -C 'I@ceph:mgr' state.sls ceph.mgr
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' match.pillar 'ceph:mgr' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:mgr' state.sls ceph.mgr
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -97,10 +97,10 @@
- description: Install radosgw if exists
cmd: |
- if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
- salt -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt -C 'I@keystone:client' state.sls keystone.client;
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keystone:client' state.sls keystone.client;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
@@ -124,34 +124,34 @@
- description: Connect ceph to glance
cmd: |
- salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-api;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
- salt -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-api;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-glare;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@glance:server' service.restart glance-registry;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Connect ceph to cinder and nova
cmd: |
- salt -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
- salt -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
- salt -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
- salt -C 'I@ceph:common and I@nova:compute' state.sls nova;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@cinder:controller' state.sls ceph.common,ceph.setup.keyring,cinder;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls ceph.common,ceph.setup.keyring;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' saltutil.sync_grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@ceph:common and I@nova:compute' state.sls nova;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart cinder volume
cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' service.restart cinder-volume;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
- description: Restart nova-compute
cmd: |
- salt -C 'I@nova:compute' service.restart nova-compute;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@nova:compute' service.restart nova-compute;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index d064d37..9d7dbf4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
new file mode 100644
index 0000000..6cbb6a8
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-cookiecutter-mcp-pike-ovs.yaml
@@ -0,0 +1,154 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: virtual-mcp-pike-ovs.local
+ cluster_name: virtual-mcp-pike-ovs
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 8.8.8.8
+ dns_server02: 8.8.4.4
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '100'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: dbs
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: dbs01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: dbs02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: dbs03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: msg
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: msg01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: msg02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: msg03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: pike
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_notification_app_id: '24'
+ oss_notification_sender_password: password
+ oss_notification_smtp_port: '587'
+ oss_notification_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.70
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.107
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.108
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.109
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.107
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.108
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.109
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_notification_address: alerts@localhost
+ stacklight_notification_smtp_host: 127.0.0.1
+ stacklight_telemetry_address: 172.16.10.70
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.107
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.108
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.109
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
new file mode 100644
index 0000000..18a87be
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/_context-environment.yaml
@@ -0,0 +1,141 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - features_designate_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ - features_designate_database
+ - features_designate
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - stacklight_telemetry_leader
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - stacklight_telemetry
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_vlan_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens5:
+ role: bond0_ab_ovs_vxlan_ctl_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index ff9bd81..65f3dab 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -52,7 +52,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -179,7 +179,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 3eb5082..88b25cd 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -17,6 +17,8 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
index 0c37346..a8ca944 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
@@ -20,102 +20,102 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters
- cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -124,44 +124,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
index c056295..3fbb777 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,34 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt b/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
new file mode 100644
index 0000000..b4fa30f
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/Readme.txt
@@ -0,0 +1,7 @@
+Template for deploying mitaka/newton models:
+- virtual-mcp-mitaka-dvr
+- virtual-mcp-mitaka-ovs
+- virtual-mcp-newton-dvr
+- virtual-mcp-newton-ovs
+
+Used by maintenance team.
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
new file mode 100644
index 0000000..3ad67c2
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
new file mode 100644
index 0000000..0ddf871
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -0,0 +1,294 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# isntall designate
+#- description: Install bind
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@bind:server' state.sls bind
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+#
+#- description: Install designate
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@designate:server' state.sls designate -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 5, delay: 10}
+# skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
new file mode 100644
index 0000000..595695f
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -0,0 +1,28 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-sl-os/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: WR run linux state to fix hosts
+ cmd: salt "cfg*" state.sls linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
new file mode 100644
index 0000000..76b76b6
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/sl.yaml
@@ -0,0 +1,176 @@
+{% from 'virtual-mcp-sl-os/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters
+ cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..6448211
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data-cfg01.yaml
@@ -0,0 +1,91 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
new file mode 100644
index 0000000..c056295
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay--user-data1604.yaml
@@ -0,0 +1,80 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
new file mode 100644
index 0000000..955750b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
@@ -0,0 +1,510 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-sl-os/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-sl-os/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-sl-os/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-sl-os') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-sl-os_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml b/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
index 59c350d..8732168 100644
--- a/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-contrail/openstack.yaml
@@ -208,7 +208,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
index 5b292f8..05d07be 100644
--- a/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-dvr/openstack.yaml
@@ -148,7 +148,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
index 87657f9..b8982b9 100644
--- a/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp10-ovs/openstack.yaml
@@ -154,7 +154,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
index 5712ad7..16ada26 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/openstack.yaml
@@ -165,7 +165,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
index ece01ce..d3fb6bc 100644
--- a/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-dvr/salt.yaml
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
index 724c131..2e92e1b 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico-minimal/underlay.yaml
@@ -84,7 +84,10 @@
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
format: qcow2
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
index 43e609f..a650b87 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/salt.yaml
@@ -14,7 +14,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -22,6 +22,8 @@
{{ SHARED.ADJUST_K8S_OPTS() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
{{ SHARED.REGISTER_COMPUTE_NODES() }}
{%- endif %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
index 4338149..831cded 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/sl.yaml
@@ -3,105 +3,105 @@
# Install docker swarm
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+- description: Configure fluentd
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -110,56 +110,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-# Change environment configuration before deploy
-- description: Set SL docker images deploy parameters
- cmd: |
- {% for sl_opt, value in config.sl_deploy.items() %}
- {% if value|string() %}
- salt-call reclass.cluster_meta_set {{ sl_opt }} {{ value }};
- {% endif %}
- {% endfor %}
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 1}
- skip_fail: false
-
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:server' state.sls prometheus -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
index 25c662b..504fd80 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -46,34 +44,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
index c0c8e1f..6fd3272 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -45,34 +43,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
index c5df027..2355c0c 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-calico/underlay.yaml
@@ -98,7 +98,10 @@
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
format: qcow2
- name: cfg01_day01_image # Pre-configured day01 image
source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
index 0062db5..3c37187 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/salt.yaml
@@ -23,6 +23,8 @@
{{ SHARED.ADJUST_K8S_OPTS() }}
+{{ SHARED.ADJUST_SL_OPTS(OVERRIDES_FILENAME='/srv/salt/reclass/classes/cluster/' + SHARED.CLUSTER_NAME + '/stacklight/server.yml') }}
+
{%- if ENABLE_COMPUTES_SELF_REGISTER != '' %}
{{ SHARED.REGISTER_COMPUTE_NODES() }}
{%- endif %}
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
index 14264f0..43d6ad8 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/sl.yaml
@@ -10,7 +10,7 @@
skip_fail: false
- description: Restart keepalived service
- cmd: salt -C 'mon*' cmd.run "systemctl restart keepalived"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "systemctl restart keepalived"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
@@ -25,105 +25,105 @@
skip_fail: false
- description: Configure docker service
- cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install docker swarm on master node
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Send grains to the swarm slave nodes
- cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@docker:swarm' mine.update
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Refresh modules
- cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Rerun swarm on slaves to proper token population
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure slave nodes
- cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: List registered Docker swarm nodes
- cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
# Install slv2 infra
- description: Install telegraf
- cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
cmd: |
- if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
- salt -C 'I@prometheus:exporters' state.sls prometheus
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure collector
- cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana client
- cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Check influix db
cmd: |
- INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
@@ -132,44 +132,44 @@
# Collect grains needed to configure the services
- description: Get grains
- cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Sync modules
- cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Update mine
- cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 10}
skip_fail: false
# Configure the services running in Docker Swarm
- description: Install prometheus alertmanager
- cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: run docker state
- cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: docker ps
- cmd: salt -C 'I@docker:swarm' dockerng.ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Configure Grafana dashboards and datasources
- cmd: sleep 60; salt -C 'I@grafana:client' state.sls grafana.client
+ cmd: sleep 60; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
index 84799cf..4f140a0 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -48,34 +46,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - eatmydata apt-get clean && eatmydata apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
index ccc11b2..2a41ee3 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1404.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,37 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## tcp cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/trusty {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -
- - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list
- - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -
-
- - eatmydata apt-get clean
- - eatmydata apt-get update && eatmydata apt-get -y upgrade
-
- # install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- # Workaround for bug https://mirantis.jira.com/browse/PROD-8214
- - eatmydata apt-get -y install --install-recommends linux-generic-lts-xenial
- - reboot
-
- ########################################################
- # Node is ready, allow SSH access
- #- echo "Allow SSH access ..."
- #- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
index 974682c..5fc02ce 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -46,33 +44,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
index af1b8e6..60094c7 100644
--- a/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp11-k8s-contrail/underlay.yaml
@@ -141,7 +141,10 @@
group_volumes:
- name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
- source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ # http://images.mirantis.com/ubuntu-16-04-x64-latest.qcow2 (preffered)
+ # or
+ # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ source_image: {{ os_env('MCP_IMAGE_PATH1604', os_env('IMAGE_PATH1604')) }}
format: qcow2
- name: cfg01_day01_image # Pre-configured day01 image
diff --git a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
index 0c93c11..afe3b21 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs-dpdk/openstack.yaml
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
index 6ab7940..f9dca1d 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/openstack.yaml
@@ -165,7 +165,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
index f4fab6a..5db2f58 100644
--- a/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp11-ovs/salt.yaml
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
new file mode 100644
index 0000000..34283a1
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/common-services.yaml
@@ -0,0 +1,125 @@
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+- description: remove apparmor
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ '*' cmd.run 'service apparmor stop; service apparmor teardown; update-rc.d -f apparmor remove; apt-get -y remove apparmor'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
new file mode 100644
index 0000000..44024a9
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/openstack.yaml
@@ -0,0 +1,337 @@
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+# Configure cinder-volume salt-call
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
new file mode 100755
index 0000000..976ce45
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/run_test.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. /home/jenkins/fuel-devops30/bin/activate
+pip install -r ./tcp_tests/requirements.txt -U
+pip install psycopg2
+
+export ENV_NAME=virtual-offline-pike-ovs
+export VENV_PATH=/home/jenkins/fuel-devops30
+export IMAGE_PATH1604=/home/jenkins/images/xenial-server-cloudimg-amd64.qcow2
+export SHUTDOWN_ENV_ON_TEARDOWN=false
+export PYTHONIOENCODING=UTF-8
+export LAB_CONFIG_NAME=virtual-offline-pike-ovs
+export CLUSTER_NAME=virtual-offline-pike-ovs
+export REPOSITORY_SUITE=2018.1
+
+export TEST_GROUP=test_mcp_pike_ovs_install
+export RUN_TEMPEST=true
+
+# Offline deploy parameters
+export SALT_MODELS_REF_CHANGE=refs/changes/44/15144/1
+
+export BOOTSTRAP_TIMEOUT=1200
+
+export HOST_APT=10.170.0.226
+export HOST_SALTSTACK=10.170.0.226
+export HOST_ARCHIVE_UBUNTU=10.170.0.226
+export HOST_MIRROR_MCP_MIRANTIS=10.170.0.226
+export HOST_MIRROR_FUEL_INFRA=10.170.0.226
+export HOST_PPA_LAUNCHPAD=10.170.0.226
+
+export SALT_MODELS_SYSTEM_REPOSITORY=https://gerrit.mcp.mirantis.local.test/salt-models/reclass-system
+export SALT_FORMULAS_REPO=https://gerrit.mcp.mirantis.local.test/salt-formulas
+export FORMULA_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial ${REPOSITORY_SUITE} salt extra"
+export FORMULA_GPG="http://apt.mirantis.local.test/public.gpg"
+export SALT_REPOSITORY="deb [arch=amd64] http://apt.mirantis.local.test/ubuntu-xenial/ ${REPOSITORY_SUITE} salt/2016.3 main"
+export SALT_GPG="http://apt.mirantis.local.test/public.gpg"
+export UBUNTU_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial main universe restricted"
+export UBUNTU_UPDATES_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-updates main universe restricted"
+export UBUNTU_SECURITY_REPOSITORY="deb http://mirror.mcp.mirantis.local.test/ubuntu xenial-security main universe restricted"
+
+cd tcp_tests
+py.test -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml -k ${TEST_GROUP}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
new file mode 100644
index 0000000..4aee7a9
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/salt.yaml
@@ -0,0 +1,88 @@
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_APT01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import HOSTNAME_PRX01 with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-offline-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.local.test/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_CONTROL_PREFIX with context %}
+
+#- description: 'Generate nginx cert'
+# cmd: |
+# openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
+# -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.gerrit.com" \
+# -keyout ssl-nginx.key -out ssl-nginx.crt;
+# node_name: {{ HOSTNAME_APT01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+- description: Check nginx APT node is ready
+ cmd: systemctl status nginx;
+ node_name: {{ HOSTNAME_APT01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check dnsmasq on APT node is ready
+ cmd: systemctl status dnsmasq;
+ node_name: {{ HOSTNAME_APT01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_PRX01) }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+- description: 'Workaround of local_repo_url - set to offline image repository structure'
+ cmd: |
+ find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/local_repo_url: .*/local_repo_url: mirror.mcp.mirantis.local.test/g' {} +
+ find /srv/salt/reclass/classes/cluster -type f -exec sed -i 's/aptly_server_address: .*/aptly_server_address: {{ os_env('HOST_APT') }}/g' {} +
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: Hack gtw node
+ cmd: salt 'gtw*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt 'cmp01*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt 'cmp02*' cmd.run "ip addr del {{ IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
new file mode 100644
index 0000000..16c439e
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-apt01.yaml
@@ -0,0 +1,116 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+ - sudo ifup ens4
+ - sudo ifup ens5
+ - sudo ifup ens6
+
+ - rm /etc/resolv.conf
+ - touch /etc/resolv.conf
+ - export LOCAL_DNS_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1)
+ - echo "nameserver $LOCAL_DNS_IP" >> /etc/resolv.conf;
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
+ - echo "supersede domain-name-servers $LOCAL_DNS_IP, 8.8.8.8, 172.18.208.44" >> /etc/dhcp/dhclient.conf
+ - export TERM=linux
+ - export LANG=C
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## Cloud repo01 node ##################
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - eatmydata apt-get clean && apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y salt-minion python-pip git curl tmux byobu iputils-ping traceroute htop tree ntp;
+ - openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=gerrit.mcp.mirantis.local.test" -keyout /root/ssl-nginx.key -out /root/ssl-nginx.crt;
+ - cd /tmp;
+ - git clone https://github.com/TatyankaLeontovich/salt-formula-nginx;
+ - git clone https://github.com/TatyankaLeontovich/salt-dnsmasq;
+ - git clone https://github.com/TatyankaLeontovich/underpillar;
+ - mkdir -p /srv/pillar/;
+ - mkdir -p /srv/salt;
+ - cd /srv/salt;
+ - ln -s /tmp/salt-formula-nginx/nginx;
+ - ln -s /tmp/salt-dnsmasq/dnsmasq;
+ - cp /tmp/underpillar/pillar/*.sls /srv/pillar/;
+ - cp /tmp/underpillar/states/*.sls /srv/salt/;
+ - cp /srv/pillar/top_localdns.sls /srv/pillar/top.sls;
+ - cp /srv/salt/top_localdns.sls /srv/salt/top.sls;
+ - find /srv/pillar/ -type f -exec sed -i "s/==LOCAL_DNS_IP==/${LOCAL_DNS_IP}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_APT==/{{ os_env('HOST_APT', 'apt.mirantis.com') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_SALTSTACK==/{{ os_env('HOST_SALTSTACK', 'repo.saltstack.com') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_ARCHIVE_UBUNTU==/{{ os_env('HOST_ARCHIVE_UBUNTU', 'archive.ubuntu.com') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_MCP_MIRANTIS==/{{ os_env('HOST_MIRROR_MCP_MIRANTIS', 'mirror.mcp.mirantis.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_MIRROR_FUEL_INFRA==/{{ os_env('HOST_MIRROR_FUEL_INFRA', 'mirror.fuel-infra.org') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_PPA_LAUNCHPAD==/{{ os_env('HOST_PPA_LAUNCHPAD', 'ppa.launchpad.net') }}/g" {} +
+ - find /srv/pillar/ -type f -exec sed -i "s/==HOST_GERRIT_MCP_MIRANTIS==/{{ os_env('HOST_GERRIT_MCP_MIRANTIS', 'gerrit.mcp.mirantis.net') }}/g" {} +
+ - salt-call --local --state-output=mixed state.sls dnsmasq;
+ - salt-call --local --state-output=mixed state.sls nginx;
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+ auto ens4
+ iface ens4 inet dhcp
+ auto ens5
+ iface ens5 inet dhcp
+ auto ens6
+ iface ens6 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..79adad5
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,75 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+
+ #- sudo route add default gw {gateway} {interface_name}
+ - rm /etc/resolv.conf
+ - touch /etc/resolv.conf
+ - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
+ - export DNS_IP=$LOCAL_IP".122"
+ - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
+ - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
+ - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ - echo "Preparing base OS"
+ - sleep 160;
+ # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml
new file mode 100644
index 0000000..5a02d24
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay--user-data1604.yaml
@@ -0,0 +1,72 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ - rm /etc/resolv.conf
+ - touch /etc/resolv.conf
+ - LOCAL_IP=$(ifconfig ens3 | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1 | cut -d"." -f1-3)
+ - export DNS_IP=$LOCAL_IP".122"
+ - echo "nameserver $DNS_IP" >> /etc/resolv.conf;
+ - echo "nameserver $LOCAL_IP.1" >> /etc/resolv.conf;
+ - echo "supersede domain-name-servers $DNS_IP" >> /etc/dhcp/dhclient.conf
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ - echo "Preparing base OS"
+ # find /etc/apt/ -type f -exec sed -i "s/ubuntu.com/ubuntu.local.test/g" {} +;
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
new file mode 100644
index 0000000..bf8e8f6
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs/underlay.yaml
@@ -0,0 +1,469 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-offline-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-offline-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-offline-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'virtual-offline-pike-ovs/underlay--user-data-apt01.yaml' as CLOUDINIT_USER_DATA_APT01 with context %}
+
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+ - &cloudinit_user_data_apt01 {{ CLOUDINIT_USER_DATA_APT01 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-offline-pike-ovs') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
+{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-offline-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ l2_network_device: +1
+ default_{{ HOSTNAME_APT01 }}: +122
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +122
+ l2_network_device: +1
+ default_{{ HOSTNAME_APT01 }}: +122
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ l2_network_device: +1
+ default_{{ HOSTNAME_APT01 }}: +122
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ l2_network_device: +1
+ default_{{ HOSTNAME_APT01 }}: +122
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_APT01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_apt01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 8
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
index 0b3825d..2ed45be 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
@@ -51,7 +51,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@keystone:client' state.sls keystone.client
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 2, delay: 5}
skip_fail: false
- description: Check keystone service-list
@@ -164,7 +164,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
- description: Check IP on computes
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 990136b..6cdf0de 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -19,6 +19,12 @@
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+- description: Enable hugepages on cmp nodes
+ cmd: salt 'cmp*' cmd.run "apt-get install -y hugepages; echo 2048 > /proc/sys/vm/nr_hugepages";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
- description: Hack gtw node
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
index 6448211..da7908d 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,35 +42,7 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
-
- echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
- - echo "nameserver 8.8.8.8" >> /etc/resolv.conf;
- - which wget >/dev/null || (apt-get update; apt-get install -y wget);
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - apt-get update
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
write_files:
- path: /etc/network/interfaces
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
index b416c16..3fbb777 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
@@ -18,8 +18,6 @@
expire: False
bootcmd:
- # Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- service sshd restart
@@ -44,37 +42,6 @@
- swapon /swapfile
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
-
- ############## TCP Cloud cfg01 node ##################
- #- sleep 120
- - echo "Preparing base OS"
- - which wget >/dev/null || (apt-get update; apt-get install -y wget)
-
- # Configure Ubuntu mirrors
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
- - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
-
- - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
- - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
- - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
- - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
-
- - apt-get clean
- - eatmydata apt-get update && apt-get -y upgrade
-
- # Install common packages
- - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc hugepages
-
- # Enable on nodes hugepages
- - echo 2048 > /proc/sys/vm/nr_hugepages
-
- ########################################################
- # Node is ready, allow SSH access
- - echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- ########################################################
-
write_files:
- path: /etc/network/interfaces
content: |
diff --git a/tcp_tests/tests/environment/test_generate_model.py b/tcp_tests/tests/environment/test_generate_model.py
new file mode 100644
index 0000000..7fc02bf
--- /dev/null
+++ b/tcp_tests/tests/environment/test_generate_model.py
@@ -0,0 +1,79 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+import os
+
+from tcp_tests import logger
+from tcp_tests import settings
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+class TestGenerateModel(object):
+ """Test class that generates and checks a cluster model"""
+
+ @pytest.mark.fail_snapshot
+ def test_generate_model(self, config, underlay, salt_actions):
+ config.salt.salt_master_host = \
+ underlay.host_by_node_role(
+ node_role=ext.UNDERLAY_NODE_ROLES.salt_master)
+
+ commands = underlay.read_template(config.salt_deploy.salt_steps_path)
+ salt_actions.install(commands)
+
+ node_name = underlay.get_target_node_names('cfg01')[0]
+ arch_model_name = 'model_{0}.tar.gz'.format(settings.ENV_NAME)
+ commands = [
+ {
+ 'description': "Copy generated model to /root/reclass",
+ 'node_name': node_name,
+ 'cmd': ('cp -r /srv/salt/reclass/ /root/;'
+ 'rm -rf /root/reclass/classes/service/;'
+ 'rm -rf /root/reclass/classes/system/;')},
+ {
+ 'description': ("Archive the model to the /root/{0}"
+ .format(arch_model_name)),
+ 'node_name': node_name,
+ 'cmd': ('cd /root/reclass/;'
+ 'tar --warning=no-file-changed -czf /root/{0} ./'
+ .format(arch_model_name))},
+ {
+ 'description': "Download the model to the host",
+ 'node_name': node_name,
+ 'download': {
+ 'remote_path': '/root/',
+ 'remote_filename': arch_model_name,
+ 'local_path': os.getcwd()
+ }},
+ ]
+
+ salt_actions.execute_commands(commands=commands,
+ label="Get the model from the cfg01")
+
+
+# tar_cmd = (
+# 'cp -r /srv/salt/reclass/ /root/;'
+# 'rm -rf /root/reclass/classes/service/;'
+# 'rm -rf /root/reclass/classes/system/;'
+# 'cd /root/reclass/;'
+# 'tar --warning=no-file-changed -czf /root/model_{0}.tar.gz ./'
+# .format(settings.ENV_NAME))
+# with underlay.remote(host=config.salt.salt_master_host) as r:
+# r.check_call(tar_cmd, verbose=True)
+# r.download('model_{0}.tar.gz'.format(settings.ENV_NAME),
+# os.getcwd())
+
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_failover_ceph.py b/tcp_tests/tests/system/test_failover_ceph.py
new file mode 100644
index 0000000..4a68705
--- /dev/null
+++ b/tcp_tests/tests/system/test_failover_ceph.py
@@ -0,0 +1,244 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import pytest
+
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class TestFailoverCeph(object):
+ """Test class for testing MCP ceph failover"""
+
+ def get_ceph_health(self, underlay, node_names):
+ """Get ceph health on the specified nodes
+
+ Returns the dict {<node_name>: <str>, }
+ where <str> is the 'ceph -s' output
+ """
+ res = {
+ node_name: underlay.check_call("ceph -s",
+ node_name=node_name,
+ raise_on_err=False)['stdout_str']
+ for node_name in node_names
+ }
+ return res
+
+ def show_failed_msg(self, failed):
+ return "There are failed tempest tests:\n\n {0}".format(
+ '\n\n '.join([(name + ': ' + detail)
+ for name, detail in failed.items()]))
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_osd_node(self, func_name, underlay, config,
+ openstack_deployed, ceph_deployed,
+ openstack_actions,
+ rally, show_step):
+ """Test restart ceph osd node
+
+ Scenario:
+ 1. Find ceph osd nodes
+ 2. Check ceph health before restart
+ 3. Restart 1 ceph osd node
+ 4. Check ceph health after restart
+ 5. Run tempest smoke after failover
+ 6. Check tempest report for failed tests
+
+ Requiremets:
+ - Salt cluster
+ - OpenStack cluster
+ - Ceph cluster
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ # STEP #1
+ show_step(1)
+ osd_node_names = underlay.get_target_node_names(
+ target='osd')
+
+ # STEP #2
+ show_step(2)
+ # Get the ceph health output before restart
+ health_before = self.get_ceph_health(underlay, osd_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_before))
+
+ # STEP #3
+ show_step(3)
+ openstack_actions.warm_restart_nodes('osd01')
+
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # STEP #4
+ show_step(4)
+ # Get the ceph health output after restart
+ health_after = self.get_ceph_health(underlay, osd_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_after))
+
+ rally.run_container()
+
+ # STEP #5
+ show_step(5)
+ results = rally.run_tempest(pattern='set=smoke',
+ conf_name='/var/lib/ceph_mcp.conf',
+ report_prefix=func_name,
+ designate_plugin=False,
+ timeout=1800)
+ # Step #6
+ show_step(6)
+ assert not results['fail'], self.show_failed_msg(results['fail'])
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_cmn_node(self, func_name, underlay, config,
+ openstack_deployed, ceph_deployed,
+ common_services_actions,
+ salt_actions, openstack_actions,
+ rally, show_step):
+ """Test restart ceph cmn node
+
+ Scenario:
+ 1. Find ceph cmn nodes
+ 2. Check ceph health before restart
+ 3. Restart 1 ceph cmn node
+ 4. Check ceph health after restart
+ 5. Run tempest smoke after failover
+ 6. Check tempest report for failed tests
+
+ Requiremets:
+ - Salt cluster
+ - OpenStack cluster
+ - Ceph cluster
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+ # STEP #1
+ show_step(1)
+ cmn_node_names = underlay.get_target_node_names(
+ target='cmn')
+
+ # STEP #2
+ show_step(2)
+ # Get the ceph health output before restart
+ health_before = self.get_ceph_health(underlay, cmn_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_before))
+
+ # STEP #3
+ show_step(3)
+ openstack_actions.warm_restart_nodes('cmn01')
+
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # STEP #4
+ show_step(4)
+ # Get the ceph health output after restart
+ health_after = self.get_ceph_health(underlay, cmn_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_after))
+
+ rally.run_container()
+
+ # STEP #5
+ show_step(5)
+ results = rally.run_tempest(pattern='set=smoke',
+ conf_name='/var/lib/ceph_mcp.conf',
+ report_prefix=func_name,
+ designate_plugin=False,
+ timeout=1800)
+ # Step #6
+ show_step(6)
+ assert not results['fail'], self.show_failed_msg(results['fail'])
+
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_restart_rgw_node(self, func_name, underlay, config,
+ openstack_deployed, ceph_deployed,
+ common_services_actions,
+ salt_actions, openstack_actions,
+ rally, show_step):
+ """Test restart ceph rgw node
+
+ Scenario:
+ 1. Find ceph rgw nodes
+ 2. Check ceph health before restart
+ 3. Restart 1 ceph rgw node
+ 4. Check ceph health after restart
+ 5. Run tempest smoke after failover
+ 6. Check tempest report for failed tests
+
+ Requiremets:
+ - Salt cluster
+ - OpenStack cluster
+ - Ceph cluster
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # STEP #1
+ show_step(1)
+ rgw_node_names = underlay.get_target_node_names(
+ target='rgw')
+ if not rgw_node_names:
+ pytest.skip('Skip as there are not rgw nodes in deploy')
+
+ # STEP #2
+ show_step(2)
+ # Get the ceph health output before restart
+ health_before = self.get_ceph_health(underlay, rgw_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_before))
+
+ # STEP #3
+ show_step(3)
+ openstack_actions.warm_restart_nodes('rgw01')
+
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ # STEP #4
+ show_step(4)
+ # Get the ceph health output after restart
+ health_after = self.get_ceph_health(underlay, rgw_node_names)
+ assert all(["OK" in p for n, p in health_before.items()]), (
+ "'Ceph health is not ok from node: {0}".format(health_after))
+
+ rally.run_container()
+
+ # STEP #5
+ show_step(5)
+ results = rally.run_tempest(pattern='set=smoke',
+ conf_name='/var/lib/ceph_mcp.conf',
+ designate_plugin=False,
+ report_prefix=func_name,
+ timeout=1800)
+ # Step #6
+ show_step(6)
+ assert not results['fail'], self.show_failed_msg(results['fail'])
+
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_cookied_ocata.py b/tcp_tests/tests/system/test_install_cookied_ocata.py
index 22f4b93..0678365 100644
--- a/tcp_tests/tests/system/test_install_cookied_ocata.py
+++ b/tcp_tests/tests/system/test_install_cookied_ocata.py
@@ -15,6 +15,7 @@
import pytest
from tcp_tests import logger
+from tcp_tests import settings
LOG = logger.logger
@@ -34,6 +35,9 @@
3. Setup compute nodes
"""
+ if settings.RUN_TEMPEST:
+ openstack_deployed.run_tempest(pattern=settings.PATTERN)
+ openstack_deployed.download_tempest_report()
LOG.info("*************** DONE **************")
@pytest.mark.grab_versions
@@ -47,6 +51,9 @@
3. Setup compute nodes
"""
+ if settings.RUN_TEMPEST:
+ openstack_deployed.run_tempest(pattern=settings.PATTERN)
+ openstack_deployed.download_tempest_report()
LOG.info("*************** DONE **************")
@pytest.mark.grab_versions
@@ -60,12 +67,18 @@
2. Setup CICD nodes
3. Setup OpenStack
4. Setup StackLight v2
- 5. Get monitoring nodes
- 6. Check that docker services are running
- 7. Check current prometheus targets are UP
- 8. Run SL component tests
- 9. Download SL component tests report
+ 5. Run Tempest for OpenStack cluster
+ 6. Get monitoring nodes
+ 7. Check that docker services are running
+ 8. Check current prometheus targets are UP
+ 9. Run SL component tests
+ 10. Download SL component tests report
"""
+ show_step(5)
+ if settings.RUN_TEMPEST:
+ openstack_deployed.run_tempest(pattern=settings.PATTERN)
+ openstack_deployed.download_tempest_report()
+
expected_service_list = ['monitoring_remote_storage_adapter',
'monitoring_server',
'monitoring_remote_agent',
@@ -73,23 +86,23 @@
'monitoring_alertmanager',
'monitoring_remote_collector',
'monitoring_pushgateway']
- show_step(5)
+ show_step(6)
mon_nodes = sl_deployed.get_monitoring_nodes()
LOG.debug('Mon nodes list {0}'.format(mon_nodes))
- show_step(6)
+ show_step(7)
sl_deployed.check_docker_services(mon_nodes, expected_service_list)
- show_step(7)
+ show_step(8)
sl_deployed.check_prometheus_targets(mon_nodes)
- show_step(8)
+ show_step(9)
# Run SL component tetsts
sl_deployed.run_sl_functional_tests(
'cfg01',
'/root/stacklight-pytest/stacklight_tests/tests/prometheus')
- show_step(9)
+ show_step(10)
# Download report
sl_deployed.download_sl_test_report(
'cfg01',
diff --git a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
index 9235edb..c6a1f8c 100644
--- a/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
+++ b/tcp_tests/tests/system/test_install_mcp11_ovs_newton.py
@@ -21,7 +21,7 @@
@pytest.mark.deploy
-class TestMcp11Install(object):
+class TestMcp11NewtonInstall(object):
"""Test class for testing mcp11 vxlan deploy"""
@pytest.mark.grab_versions
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 299c7af..b1adedc 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -137,9 +137,10 @@
'/root/stacklight-pytest/stacklight_tests/report.xml')
LOG.info("*************** DONE **************")
+ @pytest.mark.grab_versions
@pytest.mark.fail_snapshot
def test_mcp11_pike_dpdk_install(self, underlay, openstack_deployed,
- show_step):
+ show_step, openstack_actions):
"""Test for deploying an mcp dpdk environment and check it
Scenario:
1. Prepare salt on hosts
@@ -147,3 +148,11 @@
3. Setup compute nodes
"""
LOG.info("*************** DONE **************")
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
new file mode 100644
index 0000000..04d769f
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -0,0 +1,79 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestMcpInstallStacklightOpenstack(object):
+ """Test class for testing mcp11 vxlan deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_os_install(self, underlay, openstack_deployed,
+ openstack_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_sl_os_install(self, underlay, config, openstack_deployed,
+ sl_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Get monitoring nodes
+ 5. Check that docker services are running
+ 6. Check current prometheus targets are UP
+ 7. Run SL component tests
+ 8. Download SL component tests report
+ """
+ mon_nodes = sl_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+ sl_deployed.check_prometheus_targets(mon_nodes)
+
+ # Run SL component tetsts
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ sl_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
new file mode 100644
index 0000000..e94188a
--- /dev/null
+++ b/tcp_tests/tests/system/test_offline.py
@@ -0,0 +1,154 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# import pytest
+
+from tcp_tests import logger
+from tcp_tests.managers.jenkins.client import JenkinsClient
+
+LOG = logger.logger
+
+
+class TestOfflineDeployment(object):
+ """docstring for TestOfflineDeployment"""
+
+ def test_deploy_day1(self, show_step, underlay, common_services_deployed,
+ salt_deployed):
+ """Test for deploying an mcp from day01 images
+
+ Scenario:
+ 1. Approve local ssh key to jenkins
+ 2. Boot CFG and APT virtual machines
+ 3. Setup jq
+ 4. Wait salt master
+ 5. Addition config of MaaS
+ 6. Wait dhcpd server
+ 7. Start comissioning node via MaaS
+ 8. Wait of comissioning node by MaaS
+ 9. Start deploing node via MaaS
+ 10. Wait of deploing node by
+ 11. Accept all keys
+ 12. Run deploy OS job
+
+ """
+ # group = hardware._get_default_node_group()
+ nodes = underlay.node_names()
+ LOG.info("Nodes - {}".format(nodes))
+ cfg_node = 'cfg01.offline-ocata-vxlan.local'
+ verbose = True
+
+ # show_step(1)
+ # cmd = ("mkdir -p /var/lib/jenkins/.ssh && "
+ # "ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && "
+ # "chown jenkins /var/lib/jenkins/.ssh/known_hosts")
+ # underlay.check_call(
+ # node_name=cfg_node, verbose=verbose,
+ # cmd=cmd)
+
+ # show_step(2)
+ # underlay.check_call(node_name=cfg_node, verbose=verbose,
+ # cmd='salt-key')
+
+ # show_step(3)
+ # underlay.check_call(node_name=cfg_node, verbose=verbose,
+ # cmd='apt install -y jq')
+
+ show_step(4)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="""timeout 300s /bin/bash -c 'while ! salt-call test.ping; do echo "salt master still isnt running"; sleep 10; done'""") # noqa
+
+ show_step(5)
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-call saltutil.sync_all')
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.region')
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='maas logout mirantis && '
+ 'maas login mirantis '
+ 'http://localhost/MAAS/api/2.0/ '
+ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="maas mirantis ipranges create "
+ "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
+ "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .id')") # noqa
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd="maas mirantis vlan update "
+ "$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') " # noqa
+ "0 dhcp_on=True primary_rack='cfg01'")
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='maas mirantis sshkeys create '
+ 'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+ show_step(6)
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do echo "dhcpd still isnt running"; sleep 10; done'""") # noqa
+
+ show_step(7)
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.machines')
+ show_step(8)
+ cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-key')
+ show_step(9)
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.machines.deploy')
+ show_step(10)
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='salt-call state.sls maas.machines.wait_for_deployed')
+ underlay.check_call(node_name=cfg_node, verbose=verbose,
+ cmd='salt-key')
+
+ show_step(11)
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose, expected=[0, 1],
+ cmd='salt-key -A -y --include-denied --include-rejected')
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose,
+ cmd='salt-key')
+
+ salt_api = \
+ salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
+ salt_api = salt_api[0].get(cfg_node)
+
+ show_step(12)
+ jenkins = JenkinsClient(
+ host='http://172.16.44.33:8081',
+ username='admin',
+ password='r00tme')
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = salt_api
+ build = jenkins.run_build('deploy_openstack', params)
+
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 2)
+
+ assert \
+ jenkins.build_info(
+ name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
+ "Deploy openstack was failed"
diff --git a/tcp_tests/tests/system/test_opencontrail.py b/tcp_tests/tests/system/test_opencontrail.py
index 9e56780..fcd15f3 100644
--- a/tcp_tests/tests/system/test_opencontrail.py
+++ b/tcp_tests/tests/system/test_opencontrail.py
@@ -41,9 +41,9 @@
args='service ntp stop; ntpd -gq; service ntp start')
if settings.RUN_TEMPEST:
- # openstack_deployed.run_tempest(target='ctl01',
- # pattern=settings.PATTERN)
- openstack_deployed.download_tempest_report(stored_node='cfg01')
+ openstack_deployed.run_tempest(target='ctl01',
+ pattern=settings.PATTERN)
+ openstack_deployed.download_tempest_report(stored_node='ctl01')
LOG.info("*************** DONE **************")
# opencontrail.prepare_tests(
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index e0e8011..7fd593e 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -90,7 +90,8 @@
k8s_deployed.delete_vm(target_yaml)
@pytest.mark.grab_versions
- @pytest.mark.grab_virtlet_results(name='virtlet_conformance.log')
+ @pytest.mark.grab_virtlet_results(name=['virtlet_conformance.log',
+ 'report.xml'])
@pytest.mark.fail_snapshot
def test_virtlet_conformance(self, show_step, config, k8s_deployed,
virtlet_logs):