Merge "Changes for cookied-bm-contrail40 and cookied-bm-contrail40-nfv models:"
diff --git a/tcp_tests/managers/k8s/ingresses.py b/tcp_tests/managers/k8s/ingresses.py
index 906dc31..5dd353c 100644
--- a/tcp_tests/managers/k8s/ingresses.py
+++ b/tcp_tests/managers/k8s/ingresses.py
@@ -14,6 +14,8 @@
from kubernetes import client
+from devops.helpers import helpers
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
@@ -41,6 +43,12 @@
self._manager.api.delete_namespaced_ingress(
self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+ def wait_ready(self, timeout=120, interval=2):
+ helpers.wait(
+ lambda: self.read().status.load_balancer.ingress is not None,
+ timeout=timeout, interval=interval)
+ return self
+
class K8sIngressManager(K8sBaseManager):
resource_class = K8sIngress
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 102da9a..cc0a924 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -450,8 +450,6 @@
def get_pod_dom_uuid(self, pod):
uuid_name_map = self.virtlet_execute(
pod.read().spec.node_name, 'virsh list --uuid --name')['stdout']
- LOG.info("HEHEHEH {}".format(uuid_name_map))
- LOG.info("MDAMDMAD {}".format(pod.name))
for line in uuid_name_map:
if line.rstrip().endswith("-{}".format(pod.name)):
return line.split(" ")[0]
@@ -502,6 +500,7 @@
def expose(self, service_type='ClusterIP'):
service_name = "{0}-s{1}".format(self._deployment.name, self._index)
+ self._index += 1
self._svc = self._manager.kubectl.expose(
self._deployment, port=self._port,
service_name=service_name, service_type=service_type)
@@ -518,3 +517,9 @@
def is_service_available(self, svc=None, external=False):
return "Hello Kubernetes!" in self.curl(svc, external=external)
+
+ def delete(self):
+ for svc in self._manager.api.services.list_all(
+ name_prefix="{}-s".format(self._deployment.name)):
+ svc.delete()
+ self._deployment.delete()
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 037dbd8..d76396a 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -53,6 +53,9 @@
_default_openstack_steps = pkg_resources.resource_filename(
__name__, 'templates/{0}/openstack.yaml'.format(
settings.LAB_CONFIG_NAME))
+_default_openstack_resources_steps = pkg_resources.resource_filename(
+ __name__, 'templates/{0}/post_openstack.yaml'.format(
+ settings.LAB_CONFIG_NAME))
_default_opencontrail_prepare_tests_steps_path = \
pkg_resources.resource_filename(
__name__, 'templates/{0}/opencontrail.yaml'.format(
@@ -203,6 +206,9 @@
ct.Cfg('openstack_steps_path', ct.String(),
help="Path to YAML with steps to deploy openstack",
default=_default_openstack_steps),
+ ct.Cfg('openstack_resources_steps_path', ct.String(),
+ help="Path to YAML with steps to deploy openstack",
+ default=_default_openstack_resources_steps),
ct.Cfg('horizon_host', ct.IPAddress(),
help="", default='0.0.0.0'),
ct.Cfg('horizon_port', ct.String(),
@@ -348,6 +354,8 @@
help="", default=False),
ct.Cfg('kubernetes_metallb_enabled', ct.Boolean(),
help="", default=False),
+ ct.Cfg('kubernetes_ingressnginx_enabled', ct.Boolean(),
+ help="", default=False),
ct.Cfg('kubelet_fail_on_swap', ct.Boolean(),
help="", default=False)
]
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
index 089a255..cc658ba 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/openstack.yaml
@@ -15,7 +15,14 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
index 37082f6..5ba2e3f 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail-nfv/sl.yaml
@@ -1,4 +1,5 @@
{% from 'cookied-bm-mcp-ocata-contrail-nfv/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -65,36 +66,31 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
# Install slv2 infra
#Launch containers
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Alerta if it is exists
+- description: Install Mongo if target matches
cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
- description: Install telegraf
@@ -113,19 +109,31 @@
skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -141,23 +149,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
# Install service for the log collection
- description: Configure fluentd
@@ -186,31 +183,23 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# Collect grains needed to configure the services
+ ######################################
+ ######################################
+ ######################################
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -219,39 +208,38 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
index 013fb29..c6a9449 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/openstack.yaml
@@ -15,7 +15,14 @@
{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=true) }}
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER(INSTALL_VOLUME=false) }}
+
+- description: WR Install cinder volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@cinder:volume' state.sls cinder
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON(INSTALL_GATEWAY=false) }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
index 925c795..0a3867a 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/sl.yaml
@@ -1,4 +1,5 @@
{% from 'cookied-bm-mcp-ocata-contrail/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
# Install docker swarm
- description: Configure docker service
@@ -65,36 +66,31 @@
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
# Install slv2 infra
#Launch containers
- description: Install Mongo if target matches
cmd: |
if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 30}
- skip_fail: false
-
-- description: Configure Alerta if it is exists
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: launch prometheus containers
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+ fi
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Check docker ps
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 5, delay: 20}
skip_fail: false
- description: Install telegraf
@@ -112,29 +108,32 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Prometheus exporters, if pillar 'prometheus:collector' exists on any server
- cmd: |
- if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' match.pillar 'prometheus:collector' ; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:collector' state.sls prometheus.collector
- fi
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -150,23 +149,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
# Install service for the log collection
- description: Configure fluentd
@@ -195,31 +183,23 @@
retry: {count: 1, delay: 10}
skip_fail: false
-# Collect grains needed to configure the services
+ ######################################
+ ######################################
+ ######################################
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
+- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
+ retry: {count: 2, delay: 10}
skip_fail: false
- description: Configure Remote Collector in Docker Swarm for Openstack deployments
@@ -228,39 +208,38 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-
-#- description: Install prometheus alertmanager
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-#- description: run docker state
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-#
-#- description: docker ps
-# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
-# node_name: {{ HOSTNAME_CFG01 }}
-# retry: {count: 1, delay: 10}
-# skip_fail: false
-
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 10}
skip_fail: false
-- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 15}
+ retry: {count: 1, delay: 10}
skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
index 2dc0a2c..ffc9bab 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -208,4 +208,15 @@
manila_enabled: 'True'
manila_share_backend: 'lvm'
manila_lvm_volume_name: 'manila-volume'
- manila_lvm_devices: '/dev/loop1'
\ No newline at end of file
+ manila_lvm_devices: '/dev/loop1'
+ openstack_share_address: 172.16.10.68
+ openstack_share_node01_address: 172.16.10.69
+ openstack_share_node02_address: 172.16.10.70
+ openstack_share_node03_address: 172.16.10.71
+ openstack_share_node01_deploy_address: 192.168.10.69
+ openstack_share_node02_deploy_address: 192.168.10.70
+ openstack_share_node03_deploy_address: 192.168.10.71
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_share_node02_hostname: share02
+ openstack_share_node03_hostname: share03
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
index 0deff14..14a228d 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -18,6 +18,7 @@
- openstack_database_leader
- openstack_message_queue
- linux_system_codename_xenial
+ - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -32,6 +33,7 @@
- openstack_database
- openstack_message_queue
- linux_system_codename_xenial
+ - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -46,6 +48,7 @@
- openstack_database
- openstack_message_queue
- linux_system_codename_xenial
+ - openstack_dns
interfaces:
ens3:
role: single_dhcp
@@ -202,4 +205,26 @@
ens3:
role: single_dhcp
ens4:
+ role: single_ctl
+
+ share02.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_share_node02
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share03.mcp-queens-dvr.local:
+ reclass_storage_name: openstack_share_node03
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
index 4b04af1..8c922d2 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
@@ -15,6 +15,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
index 6cc9fd3..c8875a4 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -5,22 +5,22 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
+
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_PROMETEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_COLLECT_GRAINS() }}
-
{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL.MACRO_LAUNCH_CONTAINERS() }}
-
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
index f8c13d4..fe77dbe 100644
--- a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -32,6 +32,8 @@
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
template:
devops_settings:
@@ -61,7 +63,9 @@
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -88,7 +92,9 @@
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -115,7 +121,9 @@
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -142,7 +150,9 @@
default_{{ HOSTNAME_MTR03 }}: +88
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+130, +220]
@@ -731,3 +741,55 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
index d8f2505..3da02ff 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -209,3 +209,14 @@
manila_share_backend: 'lvm'
manila_lvm_volume_name: 'manila-volume'
manila_lvm_devices: '/dev/loop1'
+ openstack_share_address: 172.16.10.68
+ openstack_share_node01_address: 172.16.10.69
+ openstack_share_node02_address: 172.16.10.70
+ openstack_share_node03_address: 172.16.10.71
+ openstack_share_node01_deploy_address: 192.168.10.69
+ openstack_share_node02_deploy_address: 192.168.10.70
+ openstack_share_node03_deploy_address: 192.168.10.71
+ openstack_share_hostname: share
+ openstack_share_node01_hostname: share01
+ openstack_share_node02_hostname: share02
+ openstack_share_node03_hostname: share03
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
index 6230f55..2be8edf 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -224,4 +224,26 @@
ens3:
role: single_dhcp
ens4:
+ role: single_ctl
+
+ share02.mcp-queens-ovs.local:
+ reclass_storage_name: openstack_share_node02
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ share03.mcp-queens-ovs.local:
+ reclass_storage_name: openstack_share_node03
+ roles:
+ - openstack_share
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
role: single_ctl
\ No newline at end of file
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
index e88d8a4..1e102d5 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
@@ -15,6 +15,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='\*') }}
+
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
index 587cb1e..07eb3af 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -5,22 +5,22 @@
{{ SHARED_SL.MACRO_INSTALL_DOCKER_SWARM() }}
+{{ SHARED_SL.MACRO_INSTALL_GLUSTERFS_CLIENT() }}
+
{{ SHARED_SL.MACRO_INSTALL_MONGODB() }}
{{ SHARED_SL.MACRO_INSTALL_MONGODB_CLUSTER() }}
-{{ SHARED_SL.MACRO_INSTALL_PROMETEUS() }}
+{{ SHARED_SL.MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() }}
+
+{{ SHARED_SL.MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() }}
{{ SHARED_SL.MACRO_INSTALL_LOG_COLLECTION() }}
{{ SHARED_SL.MACRO_INSTALL_CEILOMETER_COLLECTOR() }}
-{{ SHARED_SL.MACRO_COLLECT_GRAINS() }}
-
{{ SHARED_SL.MACRO_CONFIGURE_SERVICES() }}
-{{ SHARED_SL.MACRO_LAUNCH_CONTAINERS() }}
-
{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
index 0b7b0e8..ac187d1 100644
--- a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -34,7 +34,8 @@
{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
{% set HOSTNAME_SHARE01 = os_env('HOSTNAME_SHARE01', 'share01.' + DOMAIN_NAME) %}
-
+{% set HOSTNAME_SHARE02 = os_env('HOSTNAME_SHARE01', 'share02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_SHARE03 = os_env('HOSTNAME_SHARE01', 'share03.' + DOMAIN_NAME) %}
template:
devops_settings:
env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
@@ -65,7 +66,9 @@
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -94,7 +97,9 @@
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+90, -10]
@@ -123,7 +128,9 @@
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+10, -10]
@@ -152,7 +159,9 @@
default_{{ HOSTNAME_DNS01 }}: +111
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
- default_{{ HOSTNAME_SHARE01 }}: +130
+ default_{{ HOSTNAME_SHARE01 }}: +69
+ default_{{ HOSTNAME_SHARE02 }}: +70
+ default_{{ HOSTNAME_SHARE03 }}: +71
ip_ranges:
dhcp: [+130, +220]
@@ -792,3 +801,55 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_SHARE03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s.yaml b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
index a3f228e..88075db 100644
--- a/tcp_tests/templates/k8s-ha-calico/k8s.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/k8s.yaml
@@ -45,7 +45,7 @@
- description: Register addons
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@kubernetes:master and *01*' state.sls kubernetes.master.setup
+ -C 'I@kubernetes:master' state.sls kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/shared-openstack.yaml b/tcp_tests/templates/shared-openstack.yaml
index cec31ae..fb326d0 100644
--- a/tcp_tests/templates/shared-openstack.yaml
+++ b/tcp_tests/templates/shared-openstack.yaml
@@ -322,13 +322,6 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: WR https://mirantis.jira.com/browse/PROD-19012
- cmd: |
- salt 'ctl*' cmd.run 'systemctl restart apache2'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: false
-
- description: Check manila-services
cmd: |
salt 'ctl01*' cmd.run '. /root/keystonercv3; manila service-list'
@@ -338,7 +331,7 @@
- description: Create manila type
cmd: |
- salt 'cfg01*' state.sls manila.client
+ salt -C "I@manila:client" state.sls manila.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
diff --git a/tcp_tests/templates/shared-sl.yaml b/tcp_tests/templates/shared-sl.yaml
index d41d1c1..413162b 100644
--- a/tcp_tests/templates/shared-sl.yaml
+++ b/tcp_tests/templates/shared-sl.yaml
@@ -67,6 +67,15 @@
skip_fail: false
{%- endmacro %}
+{%- macro MACRO_INSTALL_GLUSTERFS_CLIENT() %}
+- description: Install glusterfs client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:client' state.sls glusterfs.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+{%- endmacro %}
+
{%- macro MACRO_INSTALL_MONGODB() %}
# Install slv2 infra
# Install MongoDB for alerta
@@ -90,7 +99,9 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 5, delay: 20}
skip_fail: false
+{%- endmacro %}
+{%- macro MACRO_INSTALL_TELEGRAF_AND_PROMETHEUS() %}
- description: Install telegraf
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
node_name: {{ HOSTNAME_CFG01 }}
@@ -105,21 +116,35 @@
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+{%- endmacro %}
+
+{%- macro MACRO_INSTALL_ELASTICSEARCH_AND_KIBANA() %}
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true and *01*' state.sls elasticsearch.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
- description: Install elasticsearch server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server:enabled:true' state.sls elasticsearch.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install kibana server
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true and *01*' state.sls kibana.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server:enabled:true' state.sls kibana.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
- description: Install elasticsearch client
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 30}
skip_fail: false
@@ -135,25 +160,12 @@
INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server and *01*' state.sls influxdb;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb;
fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-{%- endmacro %}
-
-{%- macro MACRO_INSTALL_PROMETEUS() %}
-# Install Prometheus LTS(optional if set in model)
-- description: Prometheus LTS(optional if set in model)
- cmd: |
- PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
- echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
- if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
- salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
- fi
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 5}
- skip_fail: true
+ skip_fail: false
{%- endmacro %}
{%- macro MACRO_INSTALL_LOG_COLLECTION() %}
@@ -187,39 +199,29 @@
skip_fail: false
{%- endmacro %}
-{%- macro MACRO_COLLECT_GRAINS() %}
-# Collect grains needed to configure the services
-
-- description: Get grains
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Sync modules
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 10}
- skip_fail: false
-
-- description: Update mine
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 5, delay: 15}
- skip_fail: false
-{%- endmacro %}
-
{%- macro MACRO_CONFIGURE_SERVICES() %}
-# Configure the services running in Docker Swarm
-- description: Configure prometheus in docker swarm
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
+# Collect grains needed to configure the services
+- description: Collect Grains
+ cmd: |
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-{%- endmacro %}
-{%- macro MACRO_LAUNCH_CONTAINERS() %}
-# Launch containers
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Remote Collector in Docker Swarm for Openstack deployments
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls heka.remote_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
- description: launch prometheus containers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
node_name: {{ HOSTNAME_CFG01 }}
@@ -229,24 +231,43 @@
- description: Check docker ps
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
- skip_fail: false
-
-- description: Install sphinx
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
- node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Configure Grafana dashboards and datasources
- cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+- description: "check grafana service"
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:server' cmd.run
+ 'export SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ curl -sf http://${SL_VIP}:15013/;'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 10}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Configure Grafana dashboards and datasources
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 60}
+ skip_fail: false
+
+- description: "check grafana service"
+ cmd: timeout 60 salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:server' cmd.run
+ 'export SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ curl -sf http://${SL_VIP}:15013/;'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Configure Alerta if it is exists
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:alerta' match.pillar 'prometheus:alerta' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:alerta' state.sls prometheus.alerta
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
skip_fail: false
- description: Run salt minion to create cert files
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion.cert
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
- skip_fail: false
-{%- endmacro %}
\ No newline at end of file
+ skip_fail: true
+{%- endmacro %}
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
new file mode 100644
index 0000000..9f28ba9
--- /dev/null
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/post_openstack.yaml
@@ -0,0 +1,118 @@
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+# Install OpenStack control services
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', 'virtual-offline-pike-ovs-dpdk') %}
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+{% set DOCKER_LOCAL_REPO = os_env('DOCKER_LOCAL_REPO', 'deb [arch=amd64] http://mirror.mcp.mirantis.local.test/ubuntu-xenial/docker ' + REPOSITORY_SUITE + ' stable') %}
+
+ # Upload cirros image
+- description: Upload cirros image on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ 'wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable local docker repo
+ cmd: |
+ set -e;
+ echo "{{ DOCKER_LOCAL_REPO }}" > /etc/apt/sources.list.d/mcp_docker.list;
+ apt-get clean; apt-get update;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install docker-ce on gtw
+ cmd: salt-call cmd.run 'apt-get install docker-ce -y'
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Enable forward policy on gtw
+ cmd: |
+ set -e;
+ iptables --policy FORWARD ACCEPT;
+ node_name: {{ HOSTNAME_GTW01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create rc file on cfg
+ cmd: scp ctl01:/root/keystonercv3 /root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Copy rc file
+ cmd: scp /root/keystonercv3 gtw01:/root
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ BACKUP.MACRO_WR_NGINX_MASTER() }}
+{{ BACKUP.MACRO_BACKUP_BACKUPNINJA() }}
+{{ BACKUP.MACRO_BACKUP_XTRABACKUP() }}
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
index 921cd7b..d671337 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/salt.yaml
@@ -3,6 +3,9 @@
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID01 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID02 with context %}
+{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CID03 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP01 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CMP02 with context %}
{% from 'virtual-offline-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_GTW01 with context %}
@@ -43,6 +46,9 @@
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL01) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL02) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CTL03) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID01) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID02) }}
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CID03) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP01) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CMP02) }}
{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_GTW01) }}
@@ -65,7 +71,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "backupninja" "watchdog" "runtest" "auditd" "logrotate" "gnocchi" "manila" "jenkins" "glusterfs"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
@@ -88,15 +94,60 @@
retry: {count: 1, delay: 10}
skip_fail: false
-- description: Install watchdog
- cmd: salt -C "I@watchdog:server" state.sls watchdog;
+#- description: Install watchdog
+# cmd: salt -C "I@watchdog:server" state.sls watchdog;
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: WR for correct acces to git repo from jenkins on cfg01 node
+ cmd: |
+ export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mk/mk-pipelines /home/repo/mk/mk-pipelines/;
+ export GIT_SSL_NO_VERIFY=true; git clone --mirror https://gerrit.mcp.mirantis.local.test/mcp-ci/pipeline-library /home/repo/mcp-ci/pipeline-library/;
+ chown -R git:www-data /home/repo/mk/mk-pipelines/*;
+ chown -R git:www-data /home/repo/mcp-ci/pipeline-library/*;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
skip_fail: false
+- description: '*Workaround* to remove apt key until migrate on CC'
+ cmd: salt-key -d apt01.virtual-offline-pike-ovs-dpdk -y
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+
+- description: '*Workaround* stop minion on apt like proxy node'
+ cmd: systemctl stop salt-minion.service
+ node_name: {{ HOSTNAME_APT01 }}
+ retry: {count: 1, delay: 1}
+ skip_fail: true
+
- description: Workaround to avoid reboot cmp nodes bring OVS interfaces UP
cmd: |
salt 'cmp*' cmd.run "ifup br-mesh";
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 10}
- skip_fail: false
\ No newline at end of file
+ skip_fail: false
+
+- description: Temporary WR
+ cmd: |
+ ssh-keyscan cfg01 > /var/lib/jenkins/.ssh/known_hosts || true;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Enable Jenkins
+ cmd: |
+ systemctl enable jenkins || true;
+ systemctl restart jenkins || true;
+ sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: run jenkins.client
+ cmd: |
+ salt-call state.sls jenkins.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 60}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
index ab4dbe5..15da576 100644
--- a/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/virtual-offline-pike-ovs-dpdk/underlay.yaml
@@ -19,6 +19,9 @@
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME + '.local') %}
{% set HOSTNAME_APT01 = os_env('HOSTNAME_APT01', 'apt01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID01 = os_env('HOSTNAME_CID01', 'cid01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID02 = os_env('HOSTNAME_CID02', 'cid02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CID03 = os_env('HOSTNAME_CID03', 'cid03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
@@ -45,6 +48,10 @@
l2_network_device: +1
default_{{ HOSTNAME_APT01 }}: +122
default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CID }}: +80
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
@@ -59,7 +66,7 @@
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_VS }}: +178
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+60, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -69,6 +76,10 @@
l2_network_device: +1
default_{{ HOSTNAME_APT01 }}: +122
default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CID }}: +80
+ default_{{ HOSTNAME_CID01 }}: +91
+ default_{{ HOSTNAME_CID02 }}: +92
+ default_{{ HOSTNAME_CID03 }}: +93
default_{{ HOSTNAME_CTL01 }}: +101
default_{{ HOSTNAME_CTL02 }}: +102
default_{{ HOSTNAME_CTL03 }}: +103
@@ -83,7 +94,7 @@
default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_VS }}: +178
ip_ranges:
- dhcp: [+90, -10]
+ dhcp: [+60, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -666,3 +677,81 @@
interfaces: *interfaces
network_config: *network_config
+
+ - name: {{ HOSTNAME_CID01 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID02 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CID03 }}
+ role: salt_minion
+ params:
+ vcpu: {{ os_env('CID_NODE_CPU', 3) }}
+ memory: {{ os_env('CID_NODE_MEMORY', 8192) }}
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: {{ os_env('CID_NODE_VOLUME_SIZE', 150) }}
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index c62817b..bcbcd75 100644
--- a/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "backupninja" "runtest" "logrotate" "jenkins"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
index 69d8324..56efe59 100644
--- a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -16,6 +16,7 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.jenkins.client import JenkinsClient
LOG = logger.logger
@@ -280,6 +281,7 @@
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
+ @pytest.mark.offline_dpdk
def test_mcp_dpdk_ovs_install(self, underlay,
openstack_deployed,
openstack_actions,
@@ -299,3 +301,103 @@
tempest_actions.prepare_and_run_tempest(dpdk=True)
LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ @pytest.mark.offline_dpdk
+ def test_pipeline_deploy_os_dpdk(self, show_step,
+ underlay, config, salt_deployed,
+ tempest_actions,
+ openstack_actions):
+ """Deploy cid, deploys os with pipelines
+
+ Scenario:
+ 1. Prepare salt on hosts.
+ 2. Connect to jenkins on cfg01 node
+ 3. Run deploy on cfg01 node
+ 4. Connect to jenkins on cid node
+ 5. Run deploy DT on cid node
+ 6. Run deploy of os with DT
+ """
+ show_step(1)
+ nodes = underlay.node_names()
+ LOG.info("Nodes - {}".format(nodes))
+ show_step(2)
+ cfg_node_name = underlay.get_target_node_names(
+ target='cfg')[0]
+ salt_api = salt_deployed.get_pillar(
+ cfg_node_name, '_param:jenkins_salt_api_url')
+ salt_api = salt_api[0].get(cfg_node_name)
+ jenkins = JenkinsClient(
+ host='http://{}:8081'.format(config.salt.salt_master_host),
+ username='admin',
+ password='r00tme')
+ params = jenkins.make_defults_params('deploy_openstack')
+ params['SALT_MASTER_URL'] = salt_api
+ params['STACK_INSTALL'] = 'core,cicd'
+
+ show_step(3)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS', \
+ "Deploy core, cid failed {0}{1}".format(
+ jenkins.build_info(name=build[0], build_id=build[1]), result)
+
+ show_step(4)
+ cid_node = underlay.get_target_node_names(
+ target='cid01')[0]
+ salt_output = salt_deployed.get_pillar(
+ cid_node, 'jenkins:client:master:password')
+ cid_passwd = salt_output[0].get(cid_node)
+
+ pillar = 'keepalived:cluster:instance:cicd_control_vip:address'
+ addresses = salt_deployed.get_pillar('cid01*', pillar)
+ ip = list(set([ip
+ for item in addresses
+ for node, ip in item.items() if ip]))
+ LOG.info('Jenkins ip is {}'.format(ip))
+ try:
+ assert len(ip) > 0, 'fail to find jenkins ip'
+ except AssertionError:
+ salt_deployed._salt.local(
+ tgt='cid*', fun='cmd.run',
+ args='service keepalived restart')
+ addresses = salt_deployed.get_pillar('cid01*', pillar)
+ ip = list(set([ip
+ for item in addresses
+ for node, ip in item.items() if ip]))
+ LOG.info('Jenkins ip is {}'.format(ip))
+ assert len(ip) > 0, 'fail to find jenkins ip {}'.format(addresses)
+
+ jenkins = JenkinsClient(
+ host='http://{}:8081'.format(ip[0]),
+ username='admin',
+ password=cid_passwd)
+ params['STACK_INSTALL'] = 'ovs,openstack'
+ params['SALT_MASTER_URL'] = 'http://{}:6969'.format(
+ config.salt.salt_master_host)
+ show_step(5)
+ build = jenkins.run_build('deploy_openstack', params)
+ jenkins.wait_end_of_build(
+ name=build[0],
+ build_id=build[1],
+ timeout=60 * 60 * 4)
+ result = jenkins.build_info(name=build[0],
+ build_id=build[1])['result']
+ assert result == 'SUCCESS',\
+ "Deploy openstack was failed with results {0} {1}".format(
+ jenkins.build_info(name=build[0], build_id=build[1]),
+ result)
+
+ # Prepare resources before test
+ steps_path = config.openstack_deploy.penstack_resources_steps_path
+ commands = underlay.read_template(steps_path)
+ openstack_actions.install(commands)
+
+ if settings.RUN_TEMPEST:
+ tempest_actions.prepare_and_run_tempest()
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 7f01cf5..2fddde6 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -16,6 +16,7 @@
import netaddr
import os
import json
+import requests
from tcp_tests import logger
from tcp_tests import settings
@@ -70,7 +71,9 @@
})
show_step(5)
- k8s_deployed.nslookup(hostname, svc.get_ip())
+ dns_svc = k8s_deployed.api.services.get(
+ name='coredns', namespace='kube-system')
+ k8s_deployed.nslookup(hostname, dns_svc.get_ip())
show_step(6)
deployment.delete()
@@ -385,3 +388,74 @@
[ns.name for ns in k8s_deployed.api.namespaces.list()]
for namespace in dashboard_namespaces:
assert namespace['objectMeta']['name'] in namespaces_names_list
+
+ @pytest.mark.grap_versions
+ @pytest.mark.fail_snapshot
+ def test_k8s_ingress_nginx(self, show_step, config,
+ salt_deployed, k8s_deployed):
+ """Test ingress-nginx configured and working with metallb
+
+ Scenario:
+ 1. Setup Kubernetes cluster with metallb
+ 2. Create 2 example deployments and expose them
+ 3. Create ingress controller with 2 backends to each deployment
+ service respectively
+ 4. Wait ingress for deploy
+ 5. Try to reach default endpoint
+ 6. Try to reach test1 and test2 deployment services endpoints
+ """
+ show_step(1)
+ if not config.k8s_deploy.kubernetes_metallb_enabled:
+ pytest.skip("Test requires metallb addon enabled")
+ if not config.k8s_deploy.kubernetes_ingressnginx_enabled:
+ pytest.skip("Test requires ingress-nginx addon enabled")
+
+ show_step(2)
+ image = 'nginxdemos/hello:plain-text'
+ port = 80
+ dep1 = k8s_deployed.run_sample_deployment(
+ 'dep-ingress-1', image=image, port=port)
+ dep2 = k8s_deployed.run_sample_deployment(
+ 'dep-ingress-2', image=image, port=port)
+ svc1 = dep1.wait_ready().expose()
+ svc2 = dep2.wait_ready().expose()
+
+ show_step(3)
+ body = {
+ 'apiVersion': 'extensions/v1beta1',
+ 'kind': 'Ingress',
+ 'metadata': {'name': 'ingress-test'},
+ 'spec': {
+ 'rules': [{'http': {
+ 'paths': [{
+ 'backend': {
+ 'serviceName': svc1.name,
+ 'servicePort': port},
+ 'path': '/test1'}, {
+ 'backend': {
+ 'serviceName': svc2.name,
+ 'servicePort': port},
+ 'path': '/test2'
+ }]
+ }}]
+ }
+ }
+ ingress = k8s_deployed.api.ingresses.create(body=body)
+
+ show_step(4)
+ ingress.wait_ready()
+
+ show_step(5)
+ ingress_address = "https://{}".format(
+ ingress.read().status.load_balancer.ingress[0].ip)
+
+ assert requests.get(ingress_address, verify=False).status_code == 404
+
+ show_step(6)
+ req1 = requests.get(ingress_address + "/test1", verify=False)
+ assert req1.status_code == 200
+ assert 'dep-ingress-1' in req1.text
+
+ req2 = requests.get(ingress_address + "/test2", verify=False)
+ assert req2.status_code == 200
+ assert 'dep-ingress-2' in req2.text