Merge "Update *-k8s-calico templates"
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
index 15f9c8f..46bf9c8 100644
--- a/tcp_tests/helpers/utils.py
+++ b/tcp_tests/helpers/utils.py
@@ -18,6 +18,7 @@
import StringIO
import time
import traceback
+import signal
import jinja2
import paramiko
@@ -444,3 +445,23 @@
.format(top_fixtures_marks))
return top_fixtures_marks
+
+
+class RunLimit(object):
+ def __init__(self, seconds=60, error_message='Timeout'):
+ self.seconds = seconds
+ self.error_message = error_message
+
+ def handle_timeout(self, signum, frame):
+ raise TimeoutException(self.error_message)
+
+ def __enter__(self):
+ signal.signal(signal.SIGALRM, self.handle_timeout)
+ signal.alarm(self.seconds)
+
+ def __exit__(self, exc_type, value, traceback):
+ signal.alarm(0)
+
+
+class TimeoutException(Exception):
+ pass
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index c62114d..4e1e34a 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -177,6 +177,7 @@
'keepalived:cluster:instance',
message)
+ # keepalived 'priority' can be the same on multiple nodes
if any([priority == prio
for node, prio in vips[address]['nodes'].items()]):
message = (
@@ -188,10 +189,7 @@
address,
vips[address]['nodes'].keys())
)
- raise exceptions.SaltPillarError(
- minion_id,
- 'keepalived:cluster:instance',
- message)
+ LOG.warning("On {0}, {1}".format(minion_id, message))
# Add data to the vips
vips[address]['nodes'][minion_id] = priority
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index 80818cf..1e7f66c 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -16,16 +16,21 @@
from devops import error
from devops.helpers import helpers
+from devops.helpers.helpers import ssh_client
from devops import models
from django import db
from oslo_config import cfg
+from paramiko.ssh_exception import (
+ AuthenticationException,
+ BadAuthenticationType)
+
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import exceptions
+from tcp_tests.helpers import ext
+from tcp_tests import logger
from tcp_tests import settings
from tcp_tests import settings_oslo
-from tcp_tests.helpers import env_config
-from tcp_tests.helpers import ext
-from tcp_tests.helpers import exceptions
-from tcp_tests import logger
LOG = logger.logger
@@ -305,8 +310,29 @@
for node in self.__env.get_nodes(role__in=underlay_node_roles):
LOG.info("Waiting for SSH on node '{0}' / {1} ...".format(
node.name, self.node_ip(node)))
+
+ def _ssh_wait(host,
+ port,
+ username=settings.SSH_NODE_CREDENTIALS['login'],
+ password=settings.SSH_NODE_CREDENTIALS['password'],
+ timeout=0):
+ try:
+ ssh = ssh_client.SSHClient(
+ host=host, port=port,
+ auth=ssh_client.SSHAuth(
+ username=username,
+ password=password))
+ except AuthenticationException:
+ return True
+ except BadAuthenticationType:
+ return True
+ except Exception:
+ return False
+
+ return ssh.execute('echo ok')['exit_code'] == 0
+
helpers.wait(
- lambda: helpers.tcp_ping(self.node_ip(node), 22),
+ lambda: _ssh_wait(self.node_ip(node), 22),
timeout=timeout,
timeout_msg="Node '{}' didn't open SSH in {} sec".format(
node.name, timeout
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index f781305..474713c 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -63,3 +63,6 @@
timeout=timeout,
timeout_msg='Timeout waiting, job {0} are not finished "{1}" build'
' still'.format(name, build_id))
+
+ def get_build_output(self, name, build_id):
+ return self.__client.get_build_console_output(name, build_id)
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 8f6b140..464dc56 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -95,19 +95,23 @@
self,
target='gtw01', pattern=None,
conf_name='lvm_mcp.conf',
- registry=None):
+ registry=None, node_name=None):
if not registry:
registry = ('{0}/{1}'.format(settings.DOCKER_REGISTRY,
settings.DOCKER_NAME))
- target_name = [node_name for node_name
- in self.__underlay.node_names() if target in node_name]
+ if node_name is None and target is not None:
+ target_name = next(
+ name for name in self.__underlay.node_names()
+ if target in name)
+ else:
+ target_name = node_name
cmd = ("apt-get -y install docker.io")
- with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_name) as node_remote:
result = node_remote.execute(cmd, verbose=True)
cmd_iptables = "iptables --policy FORWARD ACCEPT"
- with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_name) as node_remote:
result = node_remote.execute(cmd_iptables, verbose=True)
with self.__underlay.remote(
@@ -139,9 +143,9 @@
"-v /etc/ssl/certs/:/etc/ssl/certs/ {2} >> image.output"
.format(conf_name, pattern, registry))
LOG.info("Running tempest testing on node {0} using the following "
- "command:\n{1}".format(target_name[0], cmd))
+ "command:\n{1}".format(target_name, cmd))
- with self.__underlay.remote(node_name=target_name[0]) as node_remote:
+ with self.__underlay.remote(node_name=target_name) as node_remote:
result = node_remote.execute(cmd, verbose=True)
LOG.debug("Test execution result is {}".format(result))
return result
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 06e7d0b..2b06dc3 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -198,10 +198,20 @@
'password': settings.SSH_NODE_CREDENTIALS['password']
}
- return [
- host(k, next(i for i in v['ipv4'] if i in pool_net))
- for k, v in hosts.items()
- if next(i for i in v['ipv4'] if i in pool_net)]
+ try:
+ ret = [
+ host(k, next(i for i in v['ipv4'] if i in pool_net))
+ for k, v in hosts.items()
+ if next(i for i in v['ipv4'] if i in pool_net)]
+ LOG.debug("Fetched ssh data from salt grains - {}".format(ret))
+ return ret
+ except StopIteration:
+ msg = ("Can't match nodes ip address with network cidr\n"
+ "Managment network - {net}\n"
+ "Host with address - {host_list}".format(
+ net=pool_net,
+ host_list={k: v['ipv4'] for k, v in hosts.items()}))
+ raise StopIteration(msg)
def service_status(self, tgt, service):
result = self.local(tgt=tgt, fun='service.status', args=service)
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 7a94345..ba45e16 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -134,7 +134,7 @@
"cd {tests_path}; "
"export VOLUME_STATUS='available';"
"pip install pytest-json;"
- "pytest --json=report.json {reruns} {reruns_delay}"
+ "pytest --json=report.json {reruns} {reruns_delay} "
"-k {skip_tests} {test_to_run}".format(**{
"tests_path": tests_path,
"skip_tests": ("'not " + skip_tests + "'"
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index fce80cc..2c9ed55 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -175,6 +175,7 @@
else:
ssh_data = ssh
if ssh_data is None:
+ LOG.debug("config_ssh - {}".format(self.config_ssh))
raise Exception('Auth data for node was not found using '
'node_name="{}" , host="{}" , address_pool="{}"'
.format(node_name, host, address_pool))
diff --git a/tcp_tests/report.py b/tcp_tests/report.py
index 46f5995..03530d6 100644
--- a/tcp_tests/report.py
+++ b/tcp_tests/report.py
@@ -88,12 +88,15 @@
LOG.info("Get runs from plan - {}".format(plan_name))
ret = []
plan = t_client.plan(plan_name)
- for e in plan.entries:
- for r in e.runs:
- LOG.info("Run {} #{}".format(r.name, r.id))
- if run_name is not None and r.name != run_name:
- continue
- ret.append(r)
+ if plan:
+ for e in plan.entries:
+ for r in e.runs:
+ LOG.info("Run {} #{}".format(r.name, r.id))
+ if run_name is not None and r.name != run_name:
+ continue
+ ret.append(r)
+ else:
+ LOG.warning("Plan {} is empty".format(plan_name))
return ret
@@ -110,14 +113,11 @@
def get_results(t_client, run):
- _statuses = ('product_failed', 'failed',
- 'prodfailed', 'blocked')
LOG.info("Get results for run - {}".format(run.name))
results = t_client.results(run)
ret = [(run.id, r) for r in results
if r.raw_data()['status_id'] is not None and
- r.raw_data()['defects'] is not None and
- r.status.name.lower() in _statuses]
+ r.raw_data()['defects'] is not None]
for r in ret:
run_id, result = r
test = fetch_test(result.api, result.raw_data()['test_id'], run_id)
@@ -321,14 +321,15 @@
date=datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y"),
table=get_md_table(table))
plan = t_client.plan(plan_name)
- plan.description = text
- plan.api._post(
- 'update_plan/{}'.format(plan.id),
- {
- 'name': plan.name,
- 'description': plan.description,
- 'milestone_id': plan.milestone.id
- })
+ if plan:
+ plan.description = text
+ plan.api._post(
+ 'update_plan/{}'.format(plan.id),
+ {
+ 'name': plan.name,
+ 'description': plan.description,
+ 'milestone_id': plan.milestone.id
+ })
def create_report(**kwargs):
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index d419b33..86ef693 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,6 +1,6 @@
# git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre] # Use this requirement for PostgreSQL
libvirt-python>=3.5.0,<4.1.0 # LGPLv2+
-git+git://github.com/openstack/fuel-devops.git@b8c6fe45a12b091619ba43cc14ab6cf05f0cd8f0 # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
+git+git://github.com/openstack/fuel-devops.git@25d4cc67315132b1b27131977b2e07029b3ffbe1 # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
git+git://github.com/dis-xcom/fuel-devops-driver-ironic
paramiko
six
@@ -13,7 +13,7 @@
junit-xml
jinja2>=2.1
jira
-testrail
+testrail<=0.3.8
functools32
python-k8sclient==0.4.0
salt-pepper<=0.5.3
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 25abbc9..c6a4201 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -293,24 +293,25 @@
default='{}/mirantis/kubernetes/hyperkube-amd64:v1.8.11-9'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/node:latest'.format(
+ default='{}/mirantis/projectcalico/calico/node:v2.6.9'.format(
settings.DOCKER_REGISTRY)),
- ct.Cfg('kubernetes_calicoctl_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/ctl:latest'.format(
+ ct.Cfg('kubernetes_calico_calicoctl_image', ct.String(),
+ default='{}/mirantis/projectcalico/calico/ctl:v1.6.4'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_calico_cni_image', ct.String(),
- default='{}/mirantis/projectcalico/calico/cni:latest'.format(
+ default='{}/mirantis/projectcalico/calico/cni:v1.11.5'.format(
settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_netchecker_enabled', ct.Boolean(),
help="", default=True),
ct.Cfg('kubernetes_netchecker_agent_image', ct.String(),
- default='mirantis/k8s-netchecker-agent:latest'),
+ default='mirantis/k8s-netchecker-agent:v1.2.2'),
ct.Cfg('kubernetes_netchecker_server_image', ct.String(),
- default='mirantis/k8s-netchecker-server:latest'),
+ default='mirantis/k8s-netchecker-server:v1.2.2'),
ct.Cfg('kubernetes_calico_policy_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_calico_policy_image', ct.String(),
- default='calico/kube-policy-controller:v0.5.4'),
+ default='{}/mirantis/projectcalico/calico/kube-controllers:'
+ 'v1.0.4'.format(settings.DOCKER_REGISTRY)),
ct.Cfg('kubernetes_helm_enabled', ct.Boolean(),
help="", default=False),
ct.Cfg('kubernetes_virtlet_enabled', ct.Boolean(),
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
index 596c512..e47f36c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/openstack.yaml
@@ -245,16 +245,10 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
\ No newline at end of file
+- description: Temporary WR
+ cmd: |
+ rc=`salt "ctl01*" cmd.run 'cat /root/keystonercv3' | grep export`;
+ salt 'gtw01*' cmd.run "echo $rc > /root/keystonercv3";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
index 39363be..2b693ab 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt-context-environment.yaml
@@ -55,9 +55,9 @@
deploy_address: 172.16.49.3
enp3s0f1:
role: single_vlan_ctl
- tenant_address: 10.167.12.105
single_address: 10.167.11.105
enp5s0f0:
+ tenant_address: 10.167.12.105
role: single_ovs_dpdk_prv
dpdk_pci: "0000:05:00.0"
@@ -75,9 +75,9 @@
deploy_address: 172.16.49.31
enp3s0f1:
role: single_vlan_ctl
- tenant_address: 10.167.12.106
single_address: 10.167.11.106
enp5s0f0:
+ tenant_address: 10.167.12.106
role: single_ovs_dpdk_prv
dpdk_pci: "0000:05:00.0"
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
index 7b3e2f4..e8eb622 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/salt.yaml
@@ -146,3 +146,14 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: true
+
+- description: Temporary WR
+ cmd: |
+ ssh-keygen -y -f /root/.ssh/id_rsa > /root/.ssh/id_rsa.pub;
+ pub_key=`cat /root/.ssh/id_rsa.pub`;
+ salt '*' cmd.run "echo $pub_key >> /root/.ssh/authorized_keys";
+ salt '*' cmd.run "service sshd restart";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
index d00841d..3f4f128 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay--user-data-cfg01.yaml
@@ -56,9 +56,6 @@
# Install common packages
- eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree sshuttle
- # Use sshuttle to allow SSH access to the model-related control network 10.167.4.0/24 on baremetal/VM nodes from cfg01
- - sshuttle -r {{ ETH0_IP_ADDRESS_KVM01 }} 10.167.4.0/24 -D
-
########################################################
# Node is ready, allow SSH access
#- echo "Allow SSH access ..."
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
index d17abc9..2eb036b 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/lab04-physical-inventory.yaml
@@ -9,45 +9,6 @@
role: single_dhcp
# Physical nodes
- cid01.ocata-cicd.local:
- reclass_storage_name: cicd_control_node01
- roles:
- - cicd_control_leader
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.70
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.91
-
- cid02.ocata-cicd.local:
- reclass_storage_name: cicd_control_node02
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.71
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.92
-
- cid03.ocata-cicd.local:
- reclass_storage_name: cicd_control_node03
- roles:
- - cicd_control_manager
- - linux_system_codename_xenial
- interfaces:
- enp2s0f0:
- role: single_mgm
- deploy_address: 172.16.49.72
- enp2s0f1:
- role: single_vlan_ctl
- single_address: 10.167.8.93
-
kvm01.ocata-cicd.local:
reclass_storage_name: infra_kvm_node01
roles:
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
index 9e8ef5d..1411196 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt-context-environment.yaml
@@ -1,6 +1,33 @@
nodes:
# Virtual Control Plane nodes
+ cid01.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node01
+ roles:
+ - cicd_control_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid02.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node02
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
+ cid03.ocata-cicd.local:
+ reclass_storage_name: cicd_control_node03
+ roles:
+ - cicd_control_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_ctl
+
ctl01.ocata-cicd.local:
reclass_storage_name: openstack_control_node01
roles:
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
index 40e8d68..0d2bdda 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/salt.yaml
@@ -57,14 +57,6 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Remove cicd nodes from VCP, because we have baremetal cicd nodes
- cmd: |
- sed -i 's/\-\ system\.salt\.control\.cluster\.cicd\_control\_cluster//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
-
- description: "Workaround for PROD-16973"
cmd: |
set -e;
diff --git a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
index 90c6227..eb56414 100644
--- a/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-ocata-cicd-pipeline/underlay.yaml
@@ -7,9 +7,6 @@
{% set HOSTNAME_KVM01 = os_env('HOSTNAME_KVM01', 'kvm01.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM02 = os_env('HOSTNAME_KVM02', 'kvm02.' + DOMAIN_NAME) %}
{% set HOSTNAME_KVM03 = os_env('HOSTNAME_KVM03', 'kvm03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP001 = os_env('HOSTNAME_CMP001', 'cmp001.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -19,9 +16,6 @@
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.67') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.68') %}
{% set ETH0_IP_ADDRESS_KVM03 = os_env('ETH0_IP_ADDRESS_KVM03', '172.16.49.69') %}
-{% set ETH0_IP_ADDRESS_CID01 = os_env('ETH0_IP_ADDRESS_CID01', '172.16.49.70') %}
-{% set ETH0_IP_ADDRESS_CID02 = os_env('ETH0_IP_ADDRESS_CID02', '172.16.49.71') %}
-{% set ETH0_IP_ADDRESS_CID03 = os_env('ETH0_IP_ADDRESS_CID03', '172.16.49.72') %}
{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.73') %}
{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.74') %}
# {% set ETH0_IP_ADDRESS_CMP003 = os_env('ETH0_IP_ADDRESS_CMP003', '172.16.167.140') %}
@@ -55,9 +49,6 @@
default_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
default_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
default_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- default_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
- default_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
- default_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
default_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
@@ -67,9 +58,6 @@
virtual_{{ HOSTNAME_KVM01 }}: {{ ETH0_IP_ADDRESS_KVM01 }}
virtual_{{ HOSTNAME_KVM02 }}: {{ ETH0_IP_ADDRESS_KVM02 }}
virtual_{{ HOSTNAME_KVM03 }}: {{ ETH0_IP_ADDRESS_KVM03 }}
- virtual_{{ HOSTNAME_CID01 }}: {{ ETH0_IP_ADDRESS_CID01 }}
- virtual_{{ HOSTNAME_CID02 }}: {{ ETH0_IP_ADDRESS_CID02 }}
- virtual_{{ HOSTNAME_CID03 }}: {{ ETH0_IP_ADDRESS_CID03 }}
virtual_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
virtual_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
# virtual_{{ HOSTNAME_CMP003 }}: {{ ETH0_IP_ADDRESS_CMP003 }}
@@ -377,147 +365,6 @@
parents:
- enp9s0f1
- - name: {{ HOSTNAME_CID01 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CID01 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CID01
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CID01
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp2s0f1
-
- - name: {{ HOSTNAME_CID02}}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CID02 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CID02
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CID02
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp2s0f1
-
- - name: {{ HOSTNAME_CID03 }}
- role: salt_minion
- params:
- ipmi_user: !os_env IPMI_USER
- ipmi_password: !os_env IPMI_PASSWORD
- ipmi_previlegies: OPERATOR
- ipmi_host: !os_env IPMI_HOST_CID03 # hostname or IP address
- ipmi_lan_interface: lanplus
- ipmi_port: 623
-
- root_volume_name: system # see 'volumes' below
- cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 200
-
- # The same as for agent URL, here is an URL to the image that should be
- # used for deploy the node. It should also be accessible from deploying
- # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
- source_image: !os_env IRONIC_SOURCE_IMAGE_URL
- source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
-
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data
-
- interfaces:
- - label: enp2s0f0
- l2_network_device: admin
- mac_address: !os_env ETH0_MAC_ADDRESS_CID03
- - label: enp2s0f1
- mac_address: !os_env ETH1_MAC_ADDRESS_CID03
-
- network_config:
- enp2s0f0:
- networks:
- - admin
- bond0:
- networks:
- - control
- aggregation: active-backup
- parents:
- - enp2s0f1
-
- name: {{ HOSTNAME_CMP001 }}
role: salt_minion
params:
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
index 35bb116..1f2017c 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-bm-mcp-ovs-dpdk.yaml
@@ -11,7 +11,8 @@
{%- set ENVIRONMENT_CONTEXT_NAMES = ['salt-context-vcp-environment.yaml', 'salt-context-environment.yaml'] %}
{%- set CONTROL_VLAN = os_env('CONTROL_VLAN', '2416') %}
{%- set TENANT_VLAN = os_env('TENANT_VLAN', '2417') %}
-
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+{% set SALT_REPOSITORY = os_env('SALT_REPOSITORY', "deb [arch=amd64] http://mirror.mirantis.com/" + REPOSITORY_SUITE+ "/saltstack-2016.3/${DISTRIB_CODENAME} ${DISTRIB_CODENAME} main") %}
{% import 'shared-salt.yaml' as SHARED with context %}
{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL(CONTROL_VLAN=CONTROL_VLAN, TENANT_VLAN=TENANT_VLAN) }}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
index 04185ea..19cc801 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/salt.yaml
@@ -20,7 +20,32 @@
skip_fail: false
- description: Sync all salt resources on master node
- cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
+ cmd: sleep 60; salt-call --hard-crash --state-output=mixed --state-verbose=False saltutil.sync_all && sleep 5
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+
+- description: MaaS auth
+ cmd: maas logout mirantis && maas login mirantis http://localhost:5240/MAAS/api/2.0/ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Set upstream dns in MaaS
+ cmd: sleep 30; maas mirantis maas set-config name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup NTP
+ cmd: salt-call --hard-crash --state-output=mixed --state-verbose=False state.sls ntp.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Wait until salt is up
+ cmd: sleep 60
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
index 6978bd3..838435c 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-apt01.yaml
@@ -48,6 +48,7 @@
# Node is ready, allow SSH access
- echo "Allow SSH access ..."
- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - touch /is_cloud_init_finish
########################################################
write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
index 6978bd3..b850283 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data-cfg01.yaml
@@ -19,10 +19,9 @@
bootcmd:
# Block access to SSH while node is preparing
- - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ - cloud-init-per once sudo echo 'sshd:ALL' >> /etc/hosts.deny
# Enable root access
- sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
- - service sshd restart
output:
all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
@@ -39,6 +38,9 @@
- echo "/swapfile none swap defaults 0 0" >> /etc/fstab
# Run user data script from config drive
+ - ifdown --force ens3; ifconfig ens3 down; ip a flush dev ens3; rm -f /var/run/network/ifstate.ens3
+ - ifdown --force ens4; ifconfig ens4 down; ip a flush dev ens4; rm -f /var/run/network/ifstate.ens4
+ - ifdown --force ens5; ifconfig ens5 down; ip a flush dev ens5; rm -f /var/run/network/ifstate.ens5
- cd /root/config-drive && /bin/bash -xe ./user-data
# Enable root access
@@ -47,7 +49,8 @@
########################################################
# Node is ready, allow SSH access
- echo "Allow SSH access ..."
- - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ - "sed -i -e '/sshd:ALL/d' /etc/hosts.deny"
+ - touch /is_cloud_init_finish
########################################################
write_files:
@@ -60,3 +63,13 @@
ServerAliveCountMax 10
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
index aab7cde..0c365ac 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay--user-data.yaml
@@ -66,6 +66,7 @@
#- sudo iptables -D INPUT -p tcp --dport 22 -j DROP
- apt-get install linux-generic-hwe-16.04 -y
- reboot
+ - touch /is_cloud_init_finish
########################################################
write_files:
diff --git a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
index b0568d3..db9c992 100644
--- a/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
+++ b/tcp_tests/templates/physical-mcp-ocata-offline-ovs/underlay.yaml
@@ -131,7 +131,7 @@
address_pools:
managment-pool01:
- net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/24:24') }}
+ net: {{ os_env('MGMT_ADDRESS_POOL01', '10.11.0.0/16:16') }}
params:
ip_reserved:
gateway: +1
@@ -173,7 +173,7 @@
default_{{ HOSTNAME_MTR03 }}: {{ ETH1_IP_ADDRESS_MTR04 }}
admin-pool01:
- net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/24:24') }}
+ net: {{ os_env('DEPLOY_ADDRESS_POOL01', '10.10.0.0/16:16') }}
params:
ip_reserved:
gateway: +1
diff --git a/tcp_tests/templates/runtest.yml b/tcp_tests/templates/runtest.yml
index 573bd54..c1f1599 100644
--- a/tcp_tests/templates/runtest.yml
+++ b/tcp_tests/templates/runtest.yml
@@ -31,9 +31,6 @@
max_microversion: 2.53
min_compute_nodes: 2
volume_device_name: 'vdc'
- orchestration:
- max_template_size: 5440000
- max_resources_per_stack: 20000
dns_feature_enabled:
# Switch this to designate_admin_api_enabled once [1] is promoted to stable packages
# [1] https://gerrit.mcp.mirantis.net/gitweb?p=salt-formulas/designate.git;a=commit;h=96a3f43f6cf1149559e54a00b5548bdf46333749
@@ -43,11 +40,6 @@
api_v2_quotas: true
api_v2_root_recordsets: true
bug_1573141_fixed: true
- volume-feature-enabled:
- backup: false
- volume:
- storage_protocol: iSCSI
- build_timeout: 300
share:
min_api_microversion: 2.0
max_api_microversion: 2.40
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 98f18e3..c326db1 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -496,7 +496,7 @@
skip_fail: false
- description: Restart salt-master service
- cmd: systemctl restart salt-master;
+ cmd: service salt-master restart;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -581,7 +581,7 @@
-C 'I@salt:master' state.sls openssh &&
salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@salt:master' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
- yes/' /etc/ssh/sshd_config && service ssh reload"
+ yes/' /etc/ssh/sshd_config && sed -i 's/ClientAliveInterval 300/ClientAliveInterval 3000/' /etc/ssh/sshd_config && service ssh reload"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
@@ -754,7 +754,7 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@linux:system and not cfg01*' state.sls openssh &&
salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@linux:system and not cfg01*' cmd.run "sed -i 's/PasswordAuthentication no/PasswordAuthentication
- yes/' /etc/ssh/sshd_config && service ssh reload"
+ yes/' /etc/ssh/sshd_config && sed -i 's/ClientAliveInterval 300/ClientAliveInterval 3000/' /etc/ssh/sshd_config && service ssh reload"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -1058,6 +1058,8 @@
- description: Include class with tempest template into cfg node
cmd: |
sed -i 's/classes\:/classes\:\n- cluster.{{ CLUSTER_NAME }}.infra.runtest/g' /srv/salt/reclass/nodes/_generated/cfg01.{{ DOMAIN_NAME }}.yml;
+ git clone https://gerrit.mcp.mirantis.net/salt-formulas/runtest;
+ cd /root/runtest && git checkout 2468b1f1008ba516fda31e00e588de71447b6fa7 && make install;
salt 'cfg01*' saltutil.refresh_pillar;
salt 'cfg01*' saltutil.sync_all;
node_name: {{ HOSTNAME_CFG01 }}
@@ -1092,16 +1094,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Temp WR for PROD-19442
- cmd: |
- apt-get install crudini -y;
- crudini --verbose --set /root/test/tempest.conf validation connect_method floating;
- crudini --verbose --set /root/test/tempest.conf validation run_validation True;
- crudini --verbose --set /root/test/tempest.conf validation image_ssh_user cirros;
- node_name: {{ HOSTNAME_GTW01 }}
- retry: {count: 1, delay: 30}
- skip_fail: true
-
- description: Run tempest from new docker image
cmd: |
docker run -e ARGS="-r {{TEMPEST_PATTERN }} -w 4 {{ EXCLUDE_TEST_ARGS }}" -v /root/test/tempest.conf:/etc/tempest/tempest.conf -v /tmp/:/tmp/ -v /root/test:/root/tempest -v /etc/ssl/certs/:/etc/ssl/certs/ --rm docker-prod-virtual.docker.mirantis.net/mirantis/cicd/ci-tempest /bin/bash -c "run-tempest"
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
index c43144a..17819f4 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl-barbican/openstack.yaml
@@ -102,7 +102,6 @@
retry: {count: 3, delay: 5}
skip_fail: false
-
- description: Install cinder
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:controller' state.sls cinder -b 1
@@ -290,79 +289,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
@@ -375,25 +301,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -440,7 +347,7 @@
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
- description: Create CIFS and NFS share and check it status
@@ -450,7 +357,7 @@
sleep 5;
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
index fab3ece..ed3bd67 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ssl/openstack.yaml
@@ -218,23 +218,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -284,20 +267,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
'service ntp stop; ntpd -gq; service ntp start'
@@ -305,79 +274,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-- description: Set disks 01
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 02
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Set disks 03
- cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 01
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 02
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Create partitions 03
- cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: create volume_group
- cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install cinder-volume
- cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 01
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 02
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL02 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Temporary WR set enabled backends value 03
- cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
- node_name: {{ HOSTNAME_CTL03 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
@@ -390,25 +286,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Restart cinder volume
- cmd: |
- salt -C 'I@cinder:controller' service.restart cinder-volume;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
- skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
@@ -455,7 +332,7 @@
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila type-create default false --create_share_from_snapshot_support true --revert_to_snapshot_support true --mount_snapshot_support true --snapshot_support true --is_public true'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
- description: Create CIFS and NFS share and check it status
@@ -465,7 +342,7 @@
sleep 5;
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila list';
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
+ retry: {count: 3, delay: 5}
skip_fail: false
{{ SHARED.RUN_NEW_TEMPEST() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index f802f06..cd2ab78 100644
--- a/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -211,23 +211,6 @@
retry: {count: 10, delay: 30}
skip_fail: false
-
- # Upload cirros image
-
-- description: Upload cirros image on ctl01
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Register image in glance
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -277,20 +260,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
'service ntp stop; ntpd -gq; service ntp start'
@@ -298,79 +267,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call PROD-13167
-#- description: Set disks 01
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Set disks 02
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 01
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 02
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 03
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: create volume_group
-# cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Install cinder-volume
-# cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Temporary WR set enabled backends value 01
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Temporary WR set enabled backends value 02
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
@@ -383,25 +279,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Restart cinder volume
-# cmd: |
-# salt -C 'I@cinder:controller' service.restart cinder-volume;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index bc3f4d3..3f15080 100644
--- a/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -168,13 +168,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-#- description: Install gnocchi statsd (optional)
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd and *01*' state.sls gnocchi.statsd &&
-# salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@gnocchi:statsd' state.sls gnocchi.statsd
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 5}
-# skip_fail: false
-
- description: Install panko server
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server and *01*' state.sls panko &&
salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@panko:server' state.sls panko
@@ -216,7 +209,6 @@
retry: {count: 1, delay: 5}
skip_fail: true
-
# Install compute node
- description: Apply formulas for compute node
@@ -287,20 +279,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-#- description: Allow all tcp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Allow all icmp
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
-# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
'service ntp stop; ntpd -gq; service ntp start'
@@ -308,86 +286,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-# Configure cinder-volume salt-call
-#- description: Set disks 01
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Set disks 02
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Set disks 03
-# cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 01
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 02
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Create partitions 03
-# cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: create volume_group
-# cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Install cinder-volume
-# cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-
-- description: Install crudini
- cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-#- description: Temporary WR set enabled backends value 01
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL01 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Temporary WR set enabled backends value 02
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL02 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Temporary WR set enabled backends value 03
-# cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
-# node_name: {{ HOSTNAME_CTL03 }}
-# retry: {count: 1, delay: 30}
-# skip_fail: false
-#
-#- description: Restart cinder volume
-# cmd: |
-# salt -C 'I@cinder:controller' service.restart cinder-volume;
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 2, delay: 5}
-# skip_fail: false
-
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
@@ -400,18 +298,6 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
- description: Install manila-api on first node
cmd: |
salt -C 'I@manila:api and *01*' state.sls manila.api;
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
index fe35460..362804f 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/openstack.yaml
@@ -23,13 +23,13 @@
skip_fail: false
- description: Restart apache due to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl restart apache2"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 restart"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 15}
skip_fail: false
- description: Check apache status to PROD-10477
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "systemctl status apache2"
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 status"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 15}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
index 19e74bd..9084d80 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/salt.yaml
@@ -4,6 +4,7 @@
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
+{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
{% import 'shared-salt.yaml' as SHARED with context %}
@@ -17,6 +18,19 @@
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+{%- if OVERRIDES != '' %}
+{%- for param in OVERRIDES.splitlines() %}
+{%- set key, value = param.replace(' ','').split(':') %}
+- description: Override cluster parameters
+ cmd: |
+ salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name=/srv/salt/reclass/classes/cluster/overrides.yml
+ salt '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endfor %}
+{%- endif %}
+
{{ SHARED.MACRO_GENERATE_INVENTORY() }}
{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
index c2f7bd1..0782243 100644
--- a/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-sl-os/underlay.yaml
@@ -301,84 +301,6 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_MON01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_MON03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 3
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- name: {{ HOSTNAME_PRX01 }}
role: salt_minion
params:
@@ -510,3 +432,81 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
new file mode 100644
index 0000000..fc9f978
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
@@ -0,0 +1,5 @@
+Template for deploying mitaka models with trusty:
+- virtual-mcp-mitaka-dvr-trusty
+- virtual-mcp-mitaka-ovs-trusty
+
+Used by maintenance team.
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/common-services.yaml b/tcp_tests/templates/virtual-mcp-trusty/common-services.yaml
new file mode 100644
index 0000000..7c1d58e
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/common-services.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
new file mode 100644
index 0000000..c1eb756
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -0,0 +1,302 @@
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+
+# Install OpenStack control services
+
+- description: Install glance on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glance -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 30}
+ skip_fail: false
+
+- description: Install keystone service (note that different fernet keys are created on different nodes)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run "service apache2 status"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' and 'glusterfs' system users)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glance:server' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack service list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check glance image-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install nova on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nova:controller' state.sls nova -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 5}
+ skip_fail: false
+
+
+- description: Install cinder
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:controller' state.sls cinder -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check cinder list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install neutron service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:server' state.sls neutron -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install neutron on gtw node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@neutron:gateway' state.sls neutron
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# install designate
+- description: Install bind
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@bind:server' state.sls bind
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: true
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+
+- description: Install heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@heat:server' state.sls heat -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check heat service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonercv3; openstack orchestration resource type list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+
+ # Upload cirros image
+
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: true
+
+- description: Install docker.io on gtw
+ cmd: salt-call cmd.run 'apt-get install docker.io -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy on gtw
+ cmd: |
+ set -e;
+ iptables --policy FORWARD ACCEPT;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
new file mode 100644
index 0000000..db9ba1c
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -0,0 +1,48 @@
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-trusty/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+{% set OVERRIDES = os_env('OVERRIDES', 'override_example: true') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "docker" "kibana"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{%- if OVERRIDES != '' %}
+{%- for param in OVERRIDES.splitlines() %}
+{%- set key, value = param.replace(' ','').split(':') %}
+- description: Override cluster parameters
+ cmd: |
+ salt-call reclass.cluster_meta_set name='{{ key }}' value='{{ value }}' file_name=/srv/salt/reclass/classes/cluster/overrides.yml
+ salt '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{%- endfor %}
+{%- endif %}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+- description: WR run linux state to fix hosts
+ cmd: salt "cfg*" state.sls linux
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install linux-image-extra package on controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@cinder:controller' cmd.run 'apt -y install linux-image-extra-$(uname -r)'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/sl.yaml b/tcp_tests/templates/virtual-mcp-trusty/sl.yaml
new file mode 100644
index 0000000..c517c63
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/sl.yaml
@@ -0,0 +1,176 @@
+{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters
+ cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@fluentd:agent' state.sls fluentd
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da0761b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data-cfg01.yaml
@@ -0,0 +1,90 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget);
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ xenial-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - apt-get update
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml
new file mode 100644
index 0000000..a4acaba
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/underlay--user-data1404.yaml
@@ -0,0 +1,80 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup eth0
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ # Configure Ubuntu mirrors
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty main restricted universe" > /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty-updates main restricted universe" >> /etc/apt/sources.list
+ - echo "deb [arch=amd64] http://mirror.mirantis.com/{{ REPOSITORY_SUITE }}/ubuntu/ trusty-security main restricted universe" >> /etc/apt/sources.list
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/trusty {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3 trusty main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto eth0
+ iface eth0 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml b/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml
new file mode 100644
index 0000000..878a8cd
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-trusty/underlay.yaml
@@ -0,0 +1,434 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-mcp-trusty/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-trusty/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-trusty/underlay--user-data1404.yaml' as CLOUDINIT_USER_DATA1404 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1404 {{ CLOUDINIT_USER_DATA1404 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-trusty') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-trusty_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1404 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1404 # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 12288
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1404
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1404
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 59e348b..53dc1c1 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -260,3 +260,18 @@
)
openstack_actions.download_tempest_report()
LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ def test_bm_deploy(self, config, openstack_deployed,
+ openstack_actions):
+ """Test for deploying an mcp environment on baremetal
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_install_mcp_sl_os.py b/tcp_tests/tests/system/test_install_mcp_sl_os.py
index d416875..54a92f0 100644
--- a/tcp_tests/tests/system/test_install_mcp_sl_os.py
+++ b/tcp_tests/tests/system/test_install_mcp_sl_os.py
@@ -41,8 +41,9 @@
args='service ntp stop; ntpd -gq; service ntp start')
if settings.RUN_TEMPEST:
- openstack_actions.run_tempest(pattern=settings.PATTERN)
- openstack_actions.download_tempest_report()
+ openstack_actions.run_tempest(pattern=settings.PATTERN,
+ target='cfg01')
+ openstack_actions.download_tempest_report(stored_node='cfg01')
LOG.info("*************** DONE **************")
@pytest.mark.grab_versions
diff --git a/tcp_tests/tests/system/test_offline.py b/tcp_tests/tests/system/test_offline.py
index e94188a..51757fd 100644
--- a/tcp_tests/tests/system/test_offline.py
+++ b/tcp_tests/tests/system/test_offline.py
@@ -12,9 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
# import pytest
+import time
+
+from collections import Counter
from tcp_tests import logger
from tcp_tests.managers.jenkins.client import JenkinsClient
+from tcp_tests import settings
+
+from tcp_tests import managers
LOG = logger.logger
@@ -22,119 +28,222 @@
class TestOfflineDeployment(object):
"""docstring for TestOfflineDeployment"""
- def test_deploy_day1(self, show_step, underlay, common_services_deployed,
- salt_deployed):
+ def test_deploy_day1(self, show_step, config, underlay, hardware,
+ common_services_deployed, salt_deployed):
"""Test for deploying an mcp from day01 images
Scenario:
- 1. Approve local ssh key to jenkins
- 2. Boot CFG and APT virtual machines
- 3. Setup jq
- 4. Wait salt master
- 5. Addition config of MaaS
- 6. Wait dhcpd server
- 7. Start comissioning node via MaaS
- 8. Wait of comissioning node by MaaS
- 9. Start deploing node via MaaS
- 10. Wait of deploing node by
- 11. Accept all keys
- 12. Run deploy OS job
+ 1. Wait salt master
+ 2. Addition config of MaaS
+ 3. Wait dhcpd server
+ 4. Start comissioning node via MaaS
+ 5. Wait of comissioning node by MaaS
+ 6. Start deploing node via MaaS
+ 7. Wait of deploing node by
+ 8. Accept all keys
+ 9. Configure and baremetal nodes after MaaS deployment
+ 10. Run deploy OS job
"""
# group = hardware._get_default_node_group()
nodes = underlay.node_names()
LOG.info("Nodes - {}".format(nodes))
cfg_node = 'cfg01.offline-ocata-vxlan.local'
+ tempest_node = 'gtw01.offline-ocata-vxlan.local'
verbose = True
+ ssh_test_key = config.underlay.ssh_keys[0]['public']
- # show_step(1)
- # cmd = ("mkdir -p /var/lib/jenkins/.ssh && "
- # "ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts && "
- # "chown jenkins /var/lib/jenkins/.ssh/known_hosts")
+ show_step(1)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="""timeout 300s /bin/bash -c """
+ """'while ! salt-call test.ping; do """
+ """echo "salt master still isnt running"; sleep 10; done'"""
+ ) # noqa
+
+ show_step(2)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call saltutil.sync_all')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call state.sls maas.region')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='maas logout mirantis && '
+ 'maas login mirantis '
+ 'http://localhost:5240/MAAS/api/2.0/ '
+ 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN' # noqa
+ )
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="maas mirantis maas set-config "
+ "name=upstream_dns value='10.10.0.15 8.8.8.8 8.8.4.4'")
+
# underlay.check_call(
- # node_name=cfg_node, verbose=verbose,
- # cmd=cmd)
+ # node_name=cfg_node,
+ # verbose=verbose,
+ # cmd="maas mirantis ipranges create "
+ # "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
+ # "subnet=$(maas mirantis subnets read | jq '.[] | "
+ # "select(.name==\"10.10.0.0/16\") | .id')")
- # show_step(2)
- # underlay.check_call(node_name=cfg_node, verbose=verbose,
- # cmd='salt-key')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="maas mirantis vlan update "
+ "$(maas mirantis subnets read | jq '.[] | "
+ "select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') "
+ "0 dhcp_on=True primary_rack='cfg01'")
- # show_step(3)
- # underlay.check_call(node_name=cfg_node, verbose=verbose,
- # cmd='apt install -y jq')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='maas mirantis sshkeys create '
+ 'key="$(cat ~root/.ssh/id_rsa.pub)"')
+
+ show_step(3)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do """
+ """echo "dhcpd still isnt running"; sleep 10; done'""")
show_step(4)
underlay.check_call(
node_name=cfg_node,
verbose=verbose,
- cmd="""timeout 300s /bin/bash -c 'while ! salt-call test.ping; do echo "salt master still isnt running"; sleep 10; done'""") # noqa
-
+ cmd='salt-call state.sls maas.machines')
show_step(5)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call saltutil.sync_all')
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call state.sls maas.region')
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='maas logout mirantis && '
- 'maas login mirantis '
- 'http://localhost/MAAS/api/2.0/ '
- 'FTvqwe7ybBp68gPar2:5mcctTAXVL8mns4ef4:zrA9LZwu2tMc8BAZpsPUfwWwTyQnAtDN') # noqa
-
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd="maas mirantis ipranges create "
- "type=dynamic start_ip=10.10.191.255 end_ip=10.10.255.254 "
- "subnet=$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .id')") # noqa
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd="maas mirantis vlan update "
- "$(maas mirantis subnets read | jq '.[] | select(.name==\"10.10.0.0/16\") | .vlan.fabric_id') " # noqa
- "0 dhcp_on=True primary_rack='cfg01'")
-
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd="ssh-keygen -y -f ~root/.ssh/id_rsa > ~root/.ssh/id_rsa.pub")
- underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='maas mirantis sshkeys create '
- 'key="$(cat ~root/.ssh/id_rsa.pub)"')
-
- show_step(6)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd="""timeout 90s /bin/bash -c 'while ! pidof dhcpd; do echo "dhcpd still isnt running"; sleep 10; done'""") # noqa
-
- show_step(7)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-call state.sls maas.machines')
- show_step(8)
- cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
+ cmd = """ timeout 600s bash -c 'hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); while ! [ $(echo "$hosts" | wc -w) -eq 10 ]; do echo "Ready hosts:\n$hosts"; sleep 30; hosts=$(maas mirantis nodes read | jq -r ".[] | select(.node_type_name==\\"Machine\\") | select(.status_name==\\"Ready\\") | .hostname "); done ' """ # noqa
underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
- show_step(9)
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt-call state.sls maas.machines.assign_ip')
+ show_step(6)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
cmd='salt-call state.sls maas.machines.deploy')
- show_step(10)
+ show_step(7)
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
+ node_name=cfg_node,
+ verbose=verbose,
cmd='salt-call state.sls maas.machines.wait_for_deployed')
- underlay.check_call(node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
-
- show_step(11)
underlay.check_call(
- node_name=cfg_node, verbose=verbose, expected=[0, 1],
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+ show_step(8)
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ expected=[0, 1],
cmd='salt-key -A -y --include-denied --include-rejected')
underlay.check_call(
- node_name=cfg_node, verbose=verbose,
- cmd='salt-key')
+ node_name=cfg_node, verbose=verbose, cmd='salt-key')
+
+ show_step(9)
+ cmd = "salt '*' saltutil.refresh_pillar"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ cmd = "salt '*' saltutil.sync_all"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ underlay.check_call(
+ node_name=cfg_node, verbose=verbose, cmd="reclass-salt --top")
+
+ cmd = "salt -C " \
+ "'I@salt:control or I@nova:compute or I@neutron:gateway' " \
+ "cmd.run 'touch /run/is_rebooted'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@salt:control' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@nova:compute' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = "salt --async -C " \
+ "'I@neutron:gateway' cmd.run 'salt-call state.sls " \
+ "linux.system.user,openssh,linux.network;reboot'"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ time.sleep(360) # TODO: Add ssh waiter
+
+ cmd = "salt -C " \
+ "'I@salt:control or I@nova:compute or I@neutron:gateway'" \
+ " test.ping"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ cmd = """salt -C """ \
+ """'I@salt:control or I@nova:compute or I@neutron:gateway' """ \
+ """cmd.run '[ -f "/run/is_rebooted" ] && """ \
+ """echo "Has not been rebooted!" || echo "Rebooted"' """
+ ret = underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+ count = Counter(ret['stdout_str'].split())
+
+ assert count['Rebooted'] == 10, "Should be rebooted 10 baremetal nodes"
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
salt_api = \
salt_deployed.get_pillar(cfg_node, '_param:jenkins_salt_api_url')
salt_api = salt_api[0].get(cfg_node)
- show_step(12)
+ show_step(10)
jenkins = JenkinsClient(
host='http://172.16.44.33:8081',
username='admin',
@@ -144,11 +253,56 @@
build = jenkins.run_build('deploy_openstack', params)
jenkins.wait_end_of_build(
- name=build[0],
- build_id=build[1],
- timeout=60 * 60 * 2)
+ name=build[0], build_id=build[1], timeout=60 * 60 * 2)
+
+ with open("{path}/cfg01_jenkins_deploy_openstack_console.log".format(
+ path=settings.LOGS_DIR), 'w') as f:
+ LOG.info("Save jenkins console log")
+ console_log = \
+ jenkins.get_build_output('deploy_openstack', build[1])
+ f.write(console_log)
assert \
jenkins.build_info(
name=build[0], build_id=build[1])['result'] == 'SUCCESS', \
"Deploy openstack was failed"
+
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key root '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key root '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd="salt '*' ssh.set_auth_key ubuntu '{}'".format(ssh_test_key))
+ underlay.check_call(
+ node_name=cfg_node,
+ verbose=verbose,
+ cmd='salt "*" ssh.set_auth_key ubuntu '
+ '"$(ssh-keygen -y -f ~/.ssh/id_rsa | cut -d " " -f 2)"')
+
+ salt_nodes = salt_deployed.get_ssh_data()
+ nodes_list = \
+ [node for node in salt_nodes
+ if not any(node['node_name'] == n['node_name']
+ for n in config.underlay.ssh)]
+ config.underlay.ssh = config.underlay.ssh + nodes_list
+ underlay.add_config_ssh(nodes_list)
+
+ time.sleep(120) # debug sleep
+ cmd = "salt '*' test.ping"
+ underlay.check_call(node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+ openstack = managers.openstack_manager.OpenstackManager(
+ config, underlay, hardware, salt_deployed)
+
+ if settings.RUN_TEMPEST:
+ openstack.run_tempest(
+ pattern=settings.PATTERN,
+ node_name=tempest_node)
+ openstack.download_tempest_report()