Merge "Add cookied model: dvr-ceph-rgw"
diff --git a/tcp_tests/fixtures/k8s_fixtures.py b/tcp_tests/fixtures/k8s_fixtures.py
index a85473d..fee64cf 100644
--- a/tcp_tests/fixtures/k8s_fixtures.py
+++ b/tcp_tests/fixtures/k8s_fixtures.py
@@ -69,19 +69,40 @@
# Deploy Kubernetes cluster
if not config.k8s.k8s_installed:
+ # Workaround for dhclient not killed on non-dhcp interfaces
+ # see https://mirantis.jira.com/browse/PROD-22473
+ tgt = 'I@kubernetes:pool'
+ LOG.warning('Killing dhclient on every non-dhcp interface '
+ 'on nodes with target={}'.format(tgt))
+ interfaces_pillar = k8s_actions._salt.get_pillar(
+ tgt=tgt, pillar='linux:network:interface')[0]
+
+ for node_name, interfaces in interfaces_pillar.items():
+ for iface_name, iface in interfaces.items():
+ iface_name = iface.get('name', iface_name)
+ default_proto = 'static' if 'address' in iface else 'dhcp'
+ if iface.get('proto', default_proto) != 'dhcp':
+ LOG.warning('Trying to kill dhclient for iface {0} '
+ 'on node {1}'.format(iface_name, node_name))
+ underlay.check_call(
+ cmd='pkill -f "dhclient.*{}"'.format(iface_name),
+ node_name=node_name, raise_on_err=False)
+
+ LOG.warning('Restarting keepalived service on controllers...')
+ k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
+ args='systemctl restart keepalived.service')
+ # give some time to keepalived to enter in MASTER state
+ time.sleep(3)
+ # --- end of workaround
+
+ # install k8s
steps_path = config.k8s_deploy.k8s_steps_path
commands = underlay.read_template(steps_path)
k8s_actions.install(commands)
+
hardware.create_snapshot(ext.SNAPSHOT.k8s_deployed)
salt_deployed.sync_time()
- # Workaround for keepalived hang issue after env revert from snapshot
- # see https://mirantis.jira.com/browse/PROD-12038
- LOG.warning('Restarting keepalived service on controllers...')
- k8s_actions._salt.local(tgt='ctl*', fun='cmd.run',
- args='systemctl restart keepalived.service')
- # give some time to keepalived to enter in MASTER state
- time.sleep(5)
return k8s_actions
@@ -150,7 +171,7 @@
@pytest.fixture(scope='function')
-def cncf_log_helper(request, func_name, underlay, k8s_deployed):
+def k8s_cncf_log_helper(request, func_name, underlay, k8s_deployed):
"""Finalizer to prepare cncf tar.gz and save results from archive"""
cncf_publisher = request.keywords.get('cncf_publisher', None)
@@ -163,7 +184,7 @@
or "{}".format(func_name)
k8s_deployed.extract_file_to_node(
system='k8s', file_path='tmp/sonobuoy',
- pod_name='sonobuoy', pod_namespace='sonobuoy'
+ pod_name='sonobuoy', pod_namespace='heptio-sonobuoy'
)
k8s_deployed.manage_cncf_archive()
k8s_deployed.download_k8s_logs(files)
diff --git a/tcp_tests/helpers/netchecker.py b/tcp_tests/helpers/netchecker.py
index 24ab271..dc58d9c 100644
--- a/tcp_tests/helpers/netchecker.py
+++ b/tcp_tests/helpers/netchecker.py
@@ -12,11 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import requests
from devops.helpers import helpers
-from k8sclient.client import rest
from tcp_tests import logger
from tcp_tests.helpers import utils
@@ -24,570 +22,195 @@
LOG = logger.logger
+NETCHECKER_NAMESPACE = "netchecker"
+NETCHECKER_SERVICE_PREFIX = "netchecker"
+NETCHECKER_SERVER_PREFIX = "netchecker-server-"
+NETCHECKER_AGENT_PREFIX = "netchecker-agent-"
-NETCHECKER_SERVICE_NAME = "netchecker-service"
-NETCHECKER_CONTAINER_PORT = NETCHECKER_SERVICE_PORT = 8081
-NETCHECKER_NODE_PORT = 31081
-NETCHECKER_REPORT_INTERVAL = 30
-NETCHECKER_SERVER_REPLICAS = 1
-NETCHECKER_PROBEURLS = "http://ipinfo.io"
-NETCHECKER_SVC_CFG = {
- "apiVersion": "v1",
- "kind": "Service",
- "metadata": {
- "name": NETCHECKER_SERVICE_NAME
- },
- "spec": {
- "ports": [
- {
- "nodePort": NETCHECKER_NODE_PORT,
- "port": NETCHECKER_SERVICE_PORT,
- "protocol": "TCP",
- "targetPort": NETCHECKER_CONTAINER_PORT
+class Netchecker(object):
+ def __init__(self, k8sapi, namespace=NETCHECKER_NAMESPACE):
+ self._api = k8sapi
+ self._namespace = namespace
+
+ def get_netchecker_pod_ip(self, prefix=NETCHECKER_SERVER_PREFIX):
+ pods = self._api.pods.list(self._namespace, name_prefix=prefix)
+ assert len(pods) > 0, "No '{}' pods found!".format(prefix)
+ return pods[0].read().status.host_ip
+
+ def get_netchecker_service(self, prefix=NETCHECKER_SERVICE_PREFIX):
+ services = self._api.services.list(self._namespace, name_prefix=prefix)
+ assert len(services) > 0, "No '{}' services found!".format(prefix)
+ return services[0]
+
+ @utils.retry(3, requests.exceptions.RequestException)
+ def get_connectivity_status(self):
+ kube_host_ip = self.get_netchecker_pod_ip()
+
+ net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
+ kube_host_ip, self.get_service_port())
+
+ response = requests.get(net_status_url, timeout=5)
+ LOG.debug('Connectivity check status: [{0}] {1}'.format(
+ response.status_code, response.text.strip()))
+ return response
+
+ @utils.retry(3, requests.exceptions.RequestException)
+ def wait_netchecker_pods_running(self, prefix):
+ for pod in self._api.pods.list(self._namespace, name_prefix=prefix):
+ pod.wait_running(timeout=600)
+
+ def check_network(self, works):
+ if works:
+ assert self.get_connectivity_status().status_code in (200, 204)
+ else:
+ assert self.get_connectivity_status().status_code == 400
+
+ def wait_check_network(self, works, timeout=60, interval=10):
+ helpers.wait_pass(
+ lambda: self.check_network(works=works),
+ timeout=timeout,
+ interval=interval)
+
+ def kubernetes_block_traffic_namespace(self,
+ namespace=NETCHECKER_NAMESPACE):
+ self._api.namespaces.get(name=namespace).patch({
+ "metadata": {
+ "annotations": {
+ "net.beta.kubernetes.io/network-policy":
+ '{"ingress": {"isolation": "DefaultDeny"}}',
+ }
}
- ],
- "selector": {
- "app": "netchecker-server"
- },
- "type": "NodePort"
- }
-}
+ })
-NETCHECKER_DEPLOYMENT_CFG = {
- "kind": "Deployment",
- "spec": {
- "template": {
+ def calico_allow_netchecker_connections(self):
+ srv_pod_ip = self.get_netchecker_pod_ip()
+
+ body = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker",
+ "namespace": self._namespace,
+ },
"spec": {
- "containers": [
+ "ingress": [{
+ "from": [{
+ "ipBlock": {
+ "cidr": srv_pod_ip + "/24"
+ }
+ }]
+ }],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
+ }
+ }
+ }
+
+ self._api.networkpolicies.create(namespace=self._namespace, body=body)
+
+ def kubernetes_allow_traffic_from_agents(self):
+ self._api.namespaces.get('default').patch({
+ "metadata": {
+ "labels": {
+ "name": 'default',
+ "net.beta.kubernetes.io/network-policy": None,
+ }
+ }
+ })
+
+ kubernetes_policy = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-agent",
+ "namespace": self._namespace,
+ },
+ "spec": {
+ "ingress": [
{
- "name": "netchecker-server",
- "env": None,
- "imagePullPolicy": "IfNotPresent",
- "image": "mirantis/k8s-netchecker-server:latest",
- "args": [
- "-v=5",
- "-logtostderr",
- "-kubeproxyinit",
- "-endpoint=0.0.0.0:{0}".format(
- NETCHECKER_CONTAINER_PORT)
- ],
- "ports": [
+ "from": [
{
- "containerPort": NETCHECKER_CONTAINER_PORT,
- "hostPort": NETCHECKER_NODE_PORT
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": self._namespace
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent"
+ }
+ }
}
]
}
- ]
- },
- "metadata": {
- "labels": {
- "app": "netchecker-server"
- },
- "name": "netchecker-server"
- }
- },
- "replicas": NETCHECKER_SERVER_REPLICAS
- },
- "apiVersion": "extensions/v1beta1",
- "metadata": {
- "name": "netchecker-server"
- }
-}
-
-NETCHECKER_DS_CFG = [
- {
- "apiVersion": "extensions/v1beta1",
- "kind": "DaemonSet",
- "metadata": {
- "labels": {
- "app": "netchecker-agent"
- },
- "name": "netchecker-agent"
- },
- "spec": {
- "template": {
- "metadata": {
- "labels": {
- "app": "netchecker-agent"
- },
- "name": "netchecker-agent"
- },
- "spec": {
- "tolerations": [
- {
- "key": "node-role.kubernetes.io/master",
- "effect": "NoSchedule"
- }
- ],
- "containers": [
- {
- "env": [
- {
- "name": "MY_POD_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "metadata.name"
- }
- }
- },
- {
- "name": "MY_NODE_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "spec.nodeName"
- }
- }
- },
- {
- "name": "REPORT_INTERVAL",
- "value": str(NETCHECKER_REPORT_INTERVAL)
- },
- ],
- "image": "mirantis/k8s-netchecker-agent:latest",
- "imagePullPolicy": "IfNotPresent",
- "name": "netchecker-agent",
- "command": ["netchecker-agent"],
- "args": [
- "-v=5",
- "-logtostderr",
- "-probeurls={0}".format(NETCHECKER_PROBEURLS)
- ]
- }
- ],
- }
- },
- "updateStrategy": {
- "type": "RollingUpdate"
- }
- }
- },
- {
- "apiVersion": "extensions/v1beta1",
- "kind": "DaemonSet",
- "metadata": {
- "labels": {
- "app": "netchecker-agent-hostnet"
- },
- "name": "netchecker-agent-hostnet"
- },
- "spec": {
- "template": {
- "metadata": {
- "labels": {
- "app": "netchecker-agent-hostnet"
- },
- "name": "netchecker-agent-hostnet"
- },
- "spec": {
- "tolerations": [
- {
- "key": "node-role.kubernetes.io/master",
- "effect": "NoSchedule"
- }
- ],
- "containers": [
- {
- "env": [
- {
- "name": "MY_POD_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "metadata.name"
- }
- }
- },
- {
- "name": "MY_NODE_NAME",
- "valueFrom": {
- "fieldRef": {
- "fieldPath": "spec.nodeName"
- }
- }
- },
- {
- "name": "REPORT_INTERVAL",
- "value": str(NETCHECKER_REPORT_INTERVAL)
- },
- ],
- "image": "mirantis/k8s-netchecker-agent:latest",
- "imagePullPolicy": "IfNotPresent",
- "name": "netchecker-agent",
- "command": ["netchecker-agent"],
- "args": [
- "-v=5",
- "-logtostderr",
- "-probeurls={0}".format(NETCHECKER_PROBEURLS)
- ]
- }
- ],
- "hostNetwork": True,
- "dnsPolicy": "ClusterFirstWithHostNet",
- "updateStrategy": {
- "type": "RollingUpdate"
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
}
}
+ }
+ }
+
+ kubernetes_policy_hostnet = {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "NetworkPolicy",
+ "metadata": {
+ "name": "access-netchecker-agent-hostnet",
+ "namespace": self._namespace,
},
- "updateStrategy": {
- "type": "RollingUpdate"
- }
- }
- }
-]
-
-NETCHECKER_BLOCK_POLICY = {
- "kind": "policy",
- "spec": {
- "ingress": [
- {
- "action": "allow"
- },
- {
- "action": "deny",
- "destination": {
- "ports": [
- NETCHECKER_SERVICE_PORT
- ]
- },
- "protocol": "tcp"
- }
- ]
- },
- "apiVersion": "v1",
- "metadata": {
- "name": "deny-netchecker"
- }
-}
-
-
-def start_server(k8s, config, namespace=None,
- deploy_spec=NETCHECKER_DEPLOYMENT_CFG,
- svc_spec=NETCHECKER_SVC_CFG):
- """Start netchecker server in k8s cluster
-
- :param k8s: K8SManager
- :param config: fixture provides oslo.config
- :param namespace: str
- :param deploy_spec: dict
- :param svc_spec: dict
- :return: None
- """
- for container in deploy_spec['spec']['template']['spec']['containers']:
- if container['name'] == 'netchecker-server':
- container['image'] = \
- config.k8s_deploy.kubernetes_netchecker_server_image
- try:
- if k8s.api.deployments.get(name=deploy_spec['metadata']['name'],
- namespace=namespace):
- LOG.debug('Network checker server deployment "{}" '
- 'already exists! Skipping resource '
- 'creation'.format(deploy_spec['metadata']['name']))
- except rest.ApiException as e:
- if e.status == 404:
- n = k8s.check_deploy_create(body=deploy_spec, namespace=namespace)
- k8s.wait_deploy_ready(n.name, namespace=namespace)
- else:
- raise e
- try:
- if k8s.api.services.get(name=svc_spec['metadata']['name']):
- LOG.debug('Network checker server service {} is '
- 'already running! Skipping resource creation'
- '.'.format(svc_spec['metadata']['name']))
- except rest.ApiException as e:
- if e.status == 404:
- k8s.check_service_create(body=svc_spec, namespace=namespace)
- else:
- raise e
-
-
-def start_agent(k8s, config, namespace=None, ds_spec=NETCHECKER_DS_CFG,
- service_namespace=None):
- """Start netchecker agent in k8s cluster
-
- :param k8s: K8SManager
- :param config: fixture provides oslo.config
- :param namespace: str
- :param ds_spec: str
- :return: None
- """
- for ds in ds_spec:
- for container in ds['spec']['template']['spec']['containers']:
- if container['name'] == 'netchecker-agent':
- container['image'] = \
- config.k8s_deploy.kubernetes_netchecker_agent_image
- if service_namespace is not None:
- container['args'].append(
- "-serverendpoint={0}.{1}.svc.cluster.local:{2}".format(
- NETCHECKER_SERVICE_NAME,
- service_namespace,
- NETCHECKER_SERVICE_PORT))
- k8s.check_ds_create(body=ds, namespace=namespace)
- k8s.wait_ds_ready(dsname=ds['metadata']['name'], namespace=namespace)
- k8s.wait_pods_phase(pods=[pod for pod in k8s.api.pods.list()
- if 'netchecker-agent' in pod.name],
- phase='Running',
- timeout=600)
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_connectivity_status(k8sclient,
- netchecker_pod_port=NETCHECKER_NODE_PORT,
- pod_name='netchecker-server', namespace='default'):
-
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if pod_name in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
-
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
- kube_host_ip = netchecker_srv_pod.status.host_ip
- net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
- kube_host_ip, netchecker_pod_port)
- response = requests.get(net_status_url, timeout=5)
- LOG.debug('Connectivity check status: [{0}] {1}'.format(
- response.status_code, response.text.strip()))
- return response
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_netchecker_pod_status(k8s,
- pod_name='netchecker-server',
- namespace='default'):
-
- k8s.wait_pods_phase(
- pods=[pod for pod in k8s.api.pods.list(namespace=namespace)
- if pod_name in pod.name], phase='Running', timeout=600)
-
-
-def check_network(k8sclient, netchecker_pod_port,
- namespace='default', works=True):
- if works:
- assert get_connectivity_status(
- k8sclient, namespace=namespace,
- netchecker_pod_port=netchecker_pod_port).status_code in (200, 204)
- else:
- assert get_connectivity_status(
- k8sclient, namespace=namespace,
- netchecker_pod_port=netchecker_pod_port).status_code == 400
-
-
-def wait_check_network(k8sclient, namespace='default', works=True, timeout=300,
- interval=10, netchecker_pod_port=NETCHECKER_NODE_PORT):
- helpers.wait_pass(
- lambda: check_network(
- k8sclient, netchecker_pod_port=netchecker_pod_port,
- namespace=namespace,
- works=works),
- timeout=timeout,
- interval=interval)
-
-
-def calico_block_traffic_on_node(underlay, target_node):
- cmd = "echo '{0}' | calicoctl create -f -".format(NETCHECKER_BLOCK_POLICY)
- underlay.sudo_check_call(cmd, node_name=target_node)
- LOG.info('Blocked traffic to the network checker service from '
- 'containers on node "{}".'.format(target_node))
-
-
-def calico_unblock_traffic_on_node(underlay, target_node):
- cmd = "echo '{0}' | calicoctl delete -f -".format(NETCHECKER_BLOCK_POLICY)
-
- underlay.sudo_check_call(cmd, node_name=target_node)
- LOG.info('Unblocked traffic to the network checker service from '
- 'containers on node "{}".'.format(target_node))
-
-
-def calico_get_version(underlay, target_node):
- raw_version = underlay.sudo_check_call('calicoctl version',
- node_name=target_node)
-
- assert raw_version['exit_code'] == 0 and len(raw_version['stdout']) > 0, \
- "Unable to get calico version!"
-
- if len(raw_version['stdout']) > 1:
- ctl_version = raw_version['stdout'][0].split()[1].strip()
- else:
- ctl_version = raw_version['stdout'][0].strip()
-
- LOG.debug("Calico (calicoctl) version on '{0}': '{1}'".format(target_node,
- ctl_version))
- return ctl_version
-
-
-def kubernetes_block_traffic_namespace(underlay, kube_host_ip, namespace):
- # TODO(apanchenko): do annotation using kubernetes API
- cmd = ('kubectl annotate ns {0} \'net.beta.kubernetes.io/'
- 'network-policy={{"ingress": {{"isolation":'
- ' "DefaultDeny"}}}}\'').format(namespace)
- underlay.sudo_check_call(cmd=cmd, host=kube_host_ip)
-
-
-def calico_allow_netchecker_connections(underlay, k8sclient, kube_host_ip,
- namespace):
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if 'netchecker-server' in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
-
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
- nc_host_ip = netchecker_srv_pod.status.host_ip
-
- kubernetes_policy = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "ipBlock": {
- "cidr": nc_host_ip + "/24"
+ "spec": {
+ "ingress": [
+ {
+ "from": [
+ {
+ "namespaceSelector": {
+ "matchLabels": {
+ "name": self._namespace
+ }
+ }
+ },
+ {
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-agent-hostnet"
+ }
+ }
}
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
+ ]
+ }
+ ],
+ "podSelector": {
+ "matchLabels": {
+ "app": "netchecker-server"
+ }
}
}
}
- }
- cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy))
- underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
+ self._api.networkpolicies.create(
+ namespace=self._namespace, body=kubernetes_policy)
+ self._api.networkpolicies.create(
+ namespace=self._namespace, body=kubernetes_policy_hostnet)
+ @utils.retry(3, requests.exceptions.RequestException)
+ def get_metric(self):
+ kube_host_ip = self.get_netchecker_pod_ip()
-def kubernetes_allow_traffic_from_agents(underlay, kube_host_ip, namespace):
- # TODO(apanchenko): add network policies using kubernetes API
- label_namespace_cmd = "kubectl label namespace default name=default"
- underlay.sudo_check_call(cmd=label_namespace_cmd, host=kube_host_ip)
- kubernetes_policy = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker-agent",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "namespaceSelector": {
- "matchLabels": {
- "name": namespace
- }
- }
- },
- {
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-agent"
- }
- }
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
- }
- }
- }
- }
+ metrics_url = 'http://{0}:{1}/metrics'.format(
+ kube_host_ip, self.get_service_port())
- kubernetes_policy_hostnet = {
- "apiVersion": "extensions/v1beta1",
- "kind": "NetworkPolicy",
- "metadata": {
- "name": "access-netchecker-agent-hostnet",
- "namespace": namespace,
- },
- "spec": {
- "ingress": [
- {
- "from": [
- {
- "namespaceSelector": {
- "matchLabels": {
- "name": namespace
- }
- }
- },
- {
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-agent-hostnet"
- }
- }
- }
- ]
- }
- ],
- "podSelector": {
- "matchLabels": {
- "app": "netchecker-server"
- }
- }
- }
- }
+ response = requests.get(metrics_url, timeout=30)
+ LOG.debug('Metrics: [{0}] {1}'.format(
+ response.status_code, response.text.strip()))
+ return response
- cmd_add_policy = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy))
- underlay.sudo_check_call(cmd=cmd_add_policy, host=kube_host_ip)
-
- cmd_add_policy_hostnet = "echo '{0}' | kubectl create -f -".format(
- json.dumps(kubernetes_policy_hostnet))
- underlay.sudo_check_call(cmd=cmd_add_policy_hostnet, host=kube_host_ip)
-
-
-@utils.retry(3, requests.exceptions.RequestException)
-def get_metric(k8sclient, netchecker_pod_port,
- pod_name='netchecker-server', namespace='default'):
-
- netchecker_srv_pod_names = [pod.name for pod in
- k8sclient.pods.list(namespace=namespace)
- if pod_name in pod.name]
-
- assert len(netchecker_srv_pod_names) > 0, \
- "No netchecker-server pods found!"
- netchecker_srv_pod = k8sclient.pods.get(name=netchecker_srv_pod_names[0],
- namespace=namespace)
-
- kube_host_ip = netchecker_srv_pod.status.host_ip
- metrics_url = 'http://{0}:{1}/metrics'.format(
- kube_host_ip, netchecker_pod_port)
- response = requests.get(metrics_url, timeout=30)
- LOG.debug('Metrics: [{0}] {1}'.format(
- response.status_code, response.text.strip()))
- return response
-
-
-def get_service_port(k8sclient, service_name='netchecker',
- namespace='netchecker'):
- full_service_name = [service.name for service
- in k8sclient.services.list(namespace=namespace)
- if service_name in service.name]
- assert len(full_service_name) > 0, "No netchecker service run"
-
- service_details = k8sclient.services.get(name=full_service_name[0],
- namespace=namespace)
-
- LOG.debug('Necthcecker service details {0}'.format(service_details))
- netchecker_port = service_details.spec.ports[0].node_port
- return netchecker_port
+ def get_service_port(self):
+ service_details = self.get_netchecker_service()
+ LOG.debug('Netchecker service details {0}'.format(service_details))
+ return service_details.read().spec.ports[0].node_port
diff --git a/tcp_tests/managers/k8s/__init__.py b/tcp_tests/managers/k8s/__init__.py
index bd76aa7..9a5c573 100644
--- a/tcp_tests/managers/k8s/__init__.py
+++ b/tcp_tests/managers/k8s/__init__.py
@@ -11,10 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-import urllib3
-
from tcp_tests.managers.k8s.cluster import K8sCluster
+from tcp_tests.managers.k8s.base import read_yaml_file, read_yaml_str
-__all__ = ['K8sCluster']
-
-urllib3.disable_warnings() # Supress https insecure warning
+__all__ = ['K8sCluster', 'read_yaml_file', 'read_yaml_str', 'read_yaml_url']
diff --git a/tcp_tests/managers/k8s/base.py b/tcp_tests/managers/k8s/base.py
index 7adb41a..6ca6a88 100644
--- a/tcp_tests/managers/k8s/base.py
+++ b/tcp_tests/managers/k8s/base.py
@@ -12,97 +12,140 @@
# License for the specific language governing permissions and limitations
-class K8sBaseResource(object):
- """docstring for K8sBaseResource"""
+import yaml
+import requests
+import os
- def __init__(self, manager, data):
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class K8sBaseResource(object):
+ resource_type = None
+
+ def __init__(self, manager, name=None, namespace=None, data=None):
self._manager = manager
- self._add_details(data)
+ self._name = name
+ self._namespace = namespace
+ self._read_cache = None
+ if data is not None:
+ self._update_cache(data)
def __repr__(self):
- reprkeys = sorted(k
- for k in self.__dict__.keys()
- if k[0] != '_' and
- k not in ['manager'])
- info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
- return "<%s %s>" % (self.__class__.__name__, info)
+ uid = 'unknown-uid'
+ if self._read_cache is not None:
+ uid = self.uid
+ return "<{0} name='{1}' namespace='{2}' uuid='{3}'>".format(
+ self.__class__.__name__, self.name, self.namespace, uid)
@property
- def api_version(self):
- return self._data.api_version
-
- def _add_details(self, data):
- self._data = data
- for k in [k for k in dir(data)
- if not any((k.startswith('_'), k in ('to_dict', 'to_str')))]:
- try:
- setattr(self, k, getattr(data, k))
- except AttributeError:
- # In this case we already defined the attribute on the class
- pass
-
- def __eq__(self, other):
- if not isinstance(other, K8sBaseResource):
- return NotImplemented
- # two resources of different types are not equal
- if not isinstance(other, self.__class__):
- return False
- return self._info == other._info
-
-
-class K8sBaseManager(object):
-
- resource_class = None
-
- def __init__(self, api, namespace):
- self._api = api
- self._namespace = namespace
- self._raw = None
-
- @property
- def api(self):
- return self._api
+ def name(self):
+ return self._name
@property
def namespace(self):
return self._namespace
- def get(self, *args, **kwargs):
- if not hasattr(self, '_get'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_get'))
+ @property
+ def uid(self):
+ return self.read(cached=True).metadata.uid
- return self.resource_class(self, self._get(*args, **kwargs))
+ def _update_cache(self, data):
+ self._read_cache = data
+ self._namespace = data.metadata.namespace
+ self._name = data.metadata.name
- def list(self, *args, **kwargs):
- if not hasattr(self, '_list'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_list'))
+ def read(self, cached=False, **kwargs):
+ if not cached:
+ self._update_cache(self._read(**kwargs))
+ return self._read_cache
- lst = self._list(*args, **kwargs)
+ def create(self, body, **kwargs):
+ LOG.info("K8S API Creating {0} with body:\n{1}".format(
+ self.resource_type, body))
- return [self.resource_class(self, item) for item in lst.items]
+ self._update_cache(self._create(body, **kwargs))
+ return self
- def create(self, *args, **kwargs):
- if not hasattr(self, '_create'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_create'))
- return self.resource_class(self, self._create(*args, **kwargs))
+ def patch(self, body, **kwargs):
- def replace(self, *args, **kwargs):
- if not hasattr(self, '_replace'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_replace'))
- return self._replace(*args, **kwargs)
+ LOG.info("K8S API Patching {0} name={1} ns={2} with body:\n{3}".format(
+ self.resource_type, self.name, self.namespace, body))
- def delete(self, *args, **kwargs):
- if not hasattr(self, '_delete'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_delete'))
- return self._delete(*args, **kwargs)
+ self._update_cache(self._patch(body, **kwargs))
+ return self
- def deletecollection(self, *args, **kwargs):
- if not hasattr(self, '_deletecollection'):
- raise NotImplementedError(
- '{} does not have {}'.format(self, '_deletecollection'))
- return self._deletecollection(*args, **kwargs)
+ def replace(self, body, **kwargs):
+ self._update_cache(self._replace(body, **kwargs))
+ return self
+
+ def delete(self, **kwargs):
+ self._delete(**kwargs)
+ return self
+
+ def __eq__(self, other):
+ if not isinstance(other, K8sBaseResource):
+ return NotImplemented
+
+ if not isinstance(other, self.__class__):
+ return False
+
+ return self.uid == other.uid
+
+
+class K8sBaseManager(object):
+ resource_class = None
+
+ def __init__(self, cluster):
+ self._cluster = cluster
+
+ @property
+ def resource_type(self):
+ return self.resource_class.resource_type
+
+ def get(self, name=None, namespace=None, data=None):
+ namespace = namespace or self._cluster.default_namespace
+ return self.resource_class(self, name, namespace, data)
+
+ def create(self, name=None, namespace=None, body=None, **kwargs):
+ return self.get(name=name, namespace=namespace).create(body, **kwargs)
+
+ def __resource_from_data(self, data):
+ return self.resource_class(self, data=data)
+
+ def __list_filter(self, items, name_prefix=None):
+ if name_prefix is not None:
+ items = [item for item in items if
+ item.metadata.name.startswith(name_prefix)]
+ return items
+
+ def __list_to_resource(self, items):
+ return [self.__resource_from_data(item) for item in items]
+
+ def list(self, namespace=None, name_prefix=None, **kwargs):
+ namespace = namespace or self._cluster.default_namespace
+ items = self._list(namespace=namespace, **kwargs).items
+ items = self.__list_filter(items, name_prefix=name_prefix)
+ return self.__list_to_resource(items)
+
+ def list_all(self, name_prefix=None, **kwargs):
+ items = self._list_all(**kwargs).items
+ items = self.__list_filter(items, name_prefix=name_prefix)
+ return self.__list_to_resource(items)
+
+
+def read_yaml_str(yaml_str):
+ """ load yaml from string helper """
+ return yaml.safe_load(yaml_str)
+
+
+def read_yaml_file(file_path, *args):
+ """ load yaml from joined file_path and *args helper """
+ with open(os.path.join(file_path, *args)) as f:
+ return yaml.safe_load(f)
+
+
+def read_yaml_url(yaml_file_url):
+ """ load yaml from url helper """
+ return yaml.safe_load(requests.get(yaml_file_url).text)
diff --git a/tcp_tests/managers/k8s/cluster.py b/tcp_tests/managers/k8s/cluster.py
index 4bda03f..8ffb4d1 100644
--- a/tcp_tests/managers/k8s/cluster.py
+++ b/tcp_tests/managers/k8s/cluster.py
@@ -12,19 +12,14 @@
# License for the specific language governing permissions and limitations
-import base64
-import ssl
-
-from k8sclient.client import api_client
-from k8sclient.client.apis import apiv_api
-from k8sclient.client.apis import apisextensionsvbeta_api
-from k8sclient.client.apis import apisbatchv_api
+import kubernetes
+from kubernetes import client
from tcp_tests.managers.k8s.componentstatuses import \
K8sComponentStatusManager
from tcp_tests.managers.k8s.daemonsets import K8sDaemonSetManager
from tcp_tests.managers.k8s.deployments import K8sDeploymentManager
-from tcp_tests.managers.k8s.endpoints import K8sEndpointManager
+from tcp_tests.managers.k8s.endpoints import K8sEndpointsManager
from tcp_tests.managers.k8s.events import K8sEventManager
from tcp_tests.managers.k8s.horizontalpodautoscalers import \
K8sHorizontalPodAutoscalerManager
@@ -46,65 +41,73 @@
K8sServiceAccountManager
from tcp_tests.managers.k8s.services import K8sServiceManager
from tcp_tests.managers.k8s.replicasets import K8sReplicaSetManager
+from tcp_tests.managers.k8s.networkpolicies import K8sNetworkPolicyManager
class K8sCluster(object):
- """docstring for K8sCluster"""
-
- def __init__(self, schema="https", user=None, password=None,
+ def __init__(self, schema="https", user=None, password=None, ca=None,
host='localhost', port='443', default_namespace='default'):
- if user and password:
- auth_string = '%s:%s' % (user, password)
- auth = base64.encodestring(auth_string.encode()).decode()[:-1]
- auth = "Basic {}".format(auth)
- self._client = api_client.ApiClient(
- '{schema}://{host}:{port}'.format(
- schema=schema, host=host, port=port))
- self._client.set_default_header('Authorization', auth)
- restcli_impl = self._client.RESTClient.IMPL
- restcli_impl.ssl_pool_manager.connection_pool_kw['cert_reqs'] = \
- ssl.CERT_NONE
+ self.default_namespace = default_namespace
- else:
- self._client = api_client.ApiClient(
- '{schema}://{host}:{port}'.format(
- schema=schema, host=host, port=port))
- self._api = apiv_api.ApivApi(self._client)
- self._bapi = apisbatchv_api.ApisbatchvApi(self._client)
- self._eapi = apisextensionsvbeta_api.ApisextensionsvbetaApi(
- self._client)
- self._default_namespace = default_namespace
+ api_server = '{0}://{1}:{2}'.format(schema, host, port)
- self.nodes = K8sNodeManager(self._api, self._default_namespace)
- self.pods = K8sPodManager(self._api, self._default_namespace)
- self.endpoints = K8sEndpointManager(self._api, self._default_namespace)
- self.namespaces = K8sNamespaceManager(self._api,
- self._default_namespace)
- self.services = K8sServiceManager(self._api, self._default_namespace)
- self.serviceaccounts = K8sServiceAccountManager(
- self._api, self._default_namespace)
- self.secrets = K8sSecretManager(self._api, self._default_namespace)
- self.events = K8sEventManager(self._api, self._default_namespace)
- self.limitranges = K8sLimitRangeManager(self._api,
- self._default_namespace)
- self.jobs = K8sJobManager(self._bapi, self._default_namespace)
- self.daemonsets = K8sDaemonSetManager(self._eapi,
- self._default_namespace)
- self.ingresses = K8sIngressManager(self._eapi, self._default_namespace)
- self.deployments = K8sDeploymentManager(self._eapi,
- self._default_namespace)
- self.horizontalpodautoscalers = K8sHorizontalPodAutoscalerManager(
- self._eapi, self._default_namespace)
- self.componentstatuses = K8sComponentStatusManager(
- self._api, self._default_namespace)
- self.resourcequotas = K8sResourceQuotaManager(
- self._api, self._default_namespace)
- self.replicationcontrollers = K8sReplicationControllerManager(
- self._api, self._default_namespace)
- self.pvolumeclaims = K8sPersistentVolumeClaimManager(
- self._api, self._default_namespace)
- self.pvolumes = K8sPersistentVolumeManager(
- self._api, self._default_namespace)
- self.replicasets = K8sReplicaSetManager(
- self._eapi, self._default_namespace
- )
+ config_data = {
+ 'apiVersion': 'v1',
+ 'kind': 'Config',
+ 'preferences': {},
+ 'current-context': 'cluster-remote',
+ 'clusters': [{
+ 'name': 'cluster',
+ 'cluster': {
+ 'server': api_server,
+ 'certificate-authority-data': ca,
+ },
+ }],
+ 'users': [{
+ 'name': 'remote',
+ 'user': {
+ 'password': password,
+ 'username': user,
+ },
+ }],
+ 'contexts': [{
+ 'name': 'cluster-remote',
+ 'context': {
+ 'cluster': 'cluster',
+ 'user': 'remote',
+ },
+ }],
+ }
+
+ configuration = type.__call__(client.Configuration)
+ loader = kubernetes.config.kube_config.KubeConfigLoader(config_data)
+ loader.load_and_set(configuration)
+ api_client = client.ApiClient(configuration=configuration)
+
+ self.api_core = client.CoreV1Api(api_client)
+ self.api_apps = client.AppsV1Api(api_client)
+ self.api_extensions = client.ExtensionsV1beta1Api(api_client)
+ self.api_autoscaling = client.AutoscalingV1Api(api_client)
+ self.api_batch = client.BatchV1Api(api_client)
+
+ self.nodes = K8sNodeManager(self)
+ self.pods = K8sPodManager(self)
+ self.endpoints = K8sEndpointsManager(self)
+ self.namespaces = K8sNamespaceManager(self)
+ self.services = K8sServiceManager(self)
+ self.serviceaccounts = K8sServiceAccountManager(self)
+ self.secrets = K8sSecretManager(self)
+ self.events = K8sEventManager(self)
+ self.limitranges = K8sLimitRangeManager(self)
+ self.jobs = K8sJobManager(self)
+ self.daemonsets = K8sDaemonSetManager(self)
+ self.ingresses = K8sIngressManager(self)
+ self.deployments = K8sDeploymentManager(self)
+ self.horizontalpodautoscalers = K8sHorizontalPodAutoscalerManager(self)
+ self.componentstatuses = K8sComponentStatusManager(self)
+ self.resourcequotas = K8sResourceQuotaManager(self)
+ self.replicationcontrollers = K8sReplicationControllerManager(self)
+ self.pvolumeclaims = K8sPersistentVolumeClaimManager(self)
+ self.pvolumes = K8sPersistentVolumeManager(self)
+ self.replicasets = K8sReplicaSetManager(self)
+ self.networkpolicies = K8sNetworkPolicyManager(self)
diff --git a/tcp_tests/managers/k8s/componentstatuses.py b/tcp_tests/managers/k8s/componentstatuses.py
index a991576..7a9c27a 100644
--- a/tcp_tests/managers/k8s/componentstatuses.py
+++ b/tcp_tests/managers/k8s/componentstatuses.py
@@ -17,23 +17,21 @@
class K8sComponentStatus(K8sBaseResource):
- """docstring for K8sComponentStatus"""
+ resource_type = 'componentstatus'
- def __repr__(self):
- return "<K8sComponentStatus: %s>" % self.name
-
- @property
- def name(self):
- return self.metadata.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_component_status(self.name, **kwargs)
class K8sComponentStatusManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sComponentStatus
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_component_status(name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_component_status(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_component_status(**kwargs)
+
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/daemonsets.py b/tcp_tests/managers/k8s/daemonsets.py
index dc2c5f7..15e6076 100644
--- a/tcp_tests/managers/k8s/daemonsets.py
+++ b/tcp_tests/managers/k8s/daemonsets.py
@@ -12,68 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sDaemonSet(K8sBaseResource):
- """docstring for K8sDaemonSet"""
+ resource_type = 'daemonset'
- def __repr__(self):
- return "<K8sDaemonSet: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_daemon_set(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_daemon_set(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_daemon_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_daemon_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_daemon_set(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sDaemonSetManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sDaemonSet
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_daemon_set(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_daemon_set(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_daemon_set(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_daemon_set(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_daemon_set(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_daemon_set(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def update(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.patch_namespaced_daemon_set(
- body=body, name=name, namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_daemon_set_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/deployments.py b/tcp_tests/managers/k8s/deployments.py
index 5d47d70..3894ac4 100644
--- a/tcp_tests/managers/k8s/deployments.py
+++ b/tcp_tests/managers/k8s/deployments.py
@@ -12,63 +12,56 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
+from devops.helpers import helpers
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sDeployment(K8sBaseResource):
- """docstring for K8sDeployment"""
+ resource_type = 'deployment'
- def __repr__(self):
- return "<K8sDeployment: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_deployment(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_deployment(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_deployment(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_deployment(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_deployment(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+ def is_ready(self):
+ dep = self.read()
+ return dep.status.available_replicas == dep.status.replicas
+
+ def wait_ready(self, timeout=120, interval=5):
+ helpers.wait(lambda: self.is_ready(),
+ timeout=timeout, interval=interval)
+ return self
class K8sDeploymentManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sDeployment
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_deployment(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_deployment(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_deployment(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_deployment(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_deployment(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_deployment(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_deployment(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_deployment(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_deployment_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/endpoints.py b/tcp_tests/managers/k8s/endpoints.py
index ed1066e..0ddb8ae 100644
--- a/tcp_tests/managers/k8s/endpoints.py
+++ b/tcp_tests/managers/k8s/endpoints.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
-class K8sEndpoint(K8sBaseResource):
- """docstring for K8sEndpoint"""
+class K8sEndpoints(K8sBaseResource):
+ resource_type = 'endpoints'
- def __repr__(self):
- return "<K8sEndpoint: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_endpoints(
+ self.name, self.namespace, **kwargs)
+
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_endpoints(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_endpoints(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_endpoints(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_endpoints(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sEndpointsManager(K8sBaseManager):
+ resource_class = K8sEndpoints
@property
- def name(self):
- return self.metadata.name
+ def api(self):
+ return self._cluster.api_core
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_endpoints(namespace, **kwargs)
-class K8sEndpointManager(K8sBaseManager):
- """docstring for ClassName"""
-
- resource_class = K8sEndpoint
-
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_endpoints(
- name=name, namespace=namespace, **kwargs)
-
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_endpoints(
- namespace=namespace, **kwargs)
-
- def _full_list(self, **kwargs):
- return self.api.list_endpoints(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_endpoints(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_endpoints(
- body=body, name=body, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_endpoints(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_endpoints(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_endpoints_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/events.py b/tcp_tests/managers/k8s/events.py
index 099e9e4..a0d865a 100644
--- a/tcp_tests/managers/k8s/events.py
+++ b/tcp_tests/managers/k8s/events.py
@@ -12,58 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sEvent(K8sBaseResource):
- """docstring for K8sEvent"""
+ resource_type = 'event'
- def __repr__(self):
- return "<K8sEvent: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_event(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_event(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_event(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_event(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_event(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sEventManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sEvent
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_event(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_event(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_event(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_event(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_event(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_event(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_event(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_event(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_event_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/horizontalpodautoscalers.py b/tcp_tests/managers/k8s/horizontalpodautoscalers.py
index 6ce78e7..b7842ac 100644
--- a/tcp_tests/managers/k8s/horizontalpodautoscalers.py
+++ b/tcp_tests/managers/k8s/horizontalpodautoscalers.py
@@ -12,59 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sHorizontalPodAutoscaler(K8sBaseResource):
- """docstring for K8sHorizontalPodAutoscaler"""
+ resource_type = 'horizontalpodautoscaler'
- def __repr__(self):
- return "<K8sHorizontalPodAutoscaler: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_horizontal_pod_autoscaler(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_horizontal_pod_autoscaler(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sHorizontalPodAutoscalerManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sHorizontalPodAutoscaler
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_horizontal_pod_autoscaler(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_autoscaling
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_horizontal_pod_autoscaler(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_horizontal_pod_autoscaler(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_horizontal_pod_autoscaler(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_horizontal_pod_autoscaler(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_horizontal_pod_autoscaler(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_horizontal_pod_autoscaler(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_horizontal_pod_autoscaler_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/ingresses.py b/tcp_tests/managers/k8s/ingresses.py
index 81b240e..906dc31 100644
--- a/tcp_tests/managers/k8s/ingresses.py
+++ b/tcp_tests/managers/k8s/ingresses.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sIngress(K8sBaseResource):
- """docstring for K8sIngress"""
+ resource_type = 'ingress'
- def __repr__(self):
- return "<K8sIngress: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_ingress(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_ingress(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_ingress(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_ingress(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_ingress(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sIngressManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sIngress
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_ingress(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_extensions
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_ingress(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_ingress(namespace, **kwargs)
- def _full_list(self, **kwargs):
- return self.api.list_ingress(**kwargs)
-
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_ingress(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_ingress(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_ingress(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_ingress(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
+ def _list_all(self, **kwargs):
+ return self.api.list_ingress_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/jobs.py b/tcp_tests/managers/k8s/jobs.py
index a2dbb81..740a662 100644
--- a/tcp_tests/managers/k8s/jobs.py
+++ b/tcp_tests/managers/k8s/jobs.py
@@ -12,58 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sJob(K8sBaseResource):
- """docstring for K8sJob"""
+ resource_type = 'job'
- def __repr__(self):
- return "<K8sJob: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_job(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_job(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_job(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_job(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_job(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sJobManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sJob
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_job(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_batch
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_job(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_job(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_job(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_job(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_job(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_job(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_job(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_job_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/limitranges.py b/tcp_tests/managers/k8s/limitranges.py
index 7136300..3a9b2de 100644
--- a/tcp_tests/managers/k8s/limitranges.py
+++ b/tcp_tests/managers/k8s/limitranges.py
@@ -12,52 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sLimitRange(K8sBaseResource):
- """docstring for K8sLimitRange"""
+ resource_type = 'limitrange'
- def __repr__(self):
- return "<K8sLimitRange: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_limit_range(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_limit_range(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_limit_range(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_limit_range(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_limit_range(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sLimitRangeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sLimitRange
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_limit_range(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_limit_range(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_limit_range(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_limit_range(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_limit_range(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_limit_range(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_limit_range(
- namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_limit_range_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/namespaces.py b/tcp_tests/managers/k8s/namespaces.py
index 8c36302..224ae33 100644
--- a/tcp_tests/managers/k8s/namespaces.py
+++ b/tcp_tests/managers/k8s/namespaces.py
@@ -12,40 +12,41 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sNamespace(K8sBaseResource):
- """docstring for ClassName"""
+ resource_type = 'namespace'
- def __repr__(self):
- return "<K8sNamespace: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespace(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespace(body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespace(self.name, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespace(self.name, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespace(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sNamespaceManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sNamespace
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_namespace(name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_namespace(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespace(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_namespace(body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_namespace(body, name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_namespace(body, name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_namespace(**kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/networkpolicies.py b/tcp_tests/managers/k8s/networkpolicies.py
new file mode 100644
index 0000000..eb92a12
--- /dev/null
+++ b/tcp_tests/managers/k8s/networkpolicies.py
@@ -0,0 +1,56 @@
+# Copyright 2017 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+
+
+from kubernetes import client
+
+from tcp_tests.managers.k8s.base import K8sBaseResource
+from tcp_tests.managers.k8s.base import K8sBaseManager
+
+
+class K8sNetworkPolicy(K8sBaseResource):
+ resource_type = 'networkpolicy'
+
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_network_policy(
+ self.name, self.namespace, **kwargs)
+
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_network_policy(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_network_policy(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_network_policy(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_network_policy(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+
+class K8sNetworkPolicyManager(K8sBaseManager):
+ resource_class = K8sNetworkPolicy
+
+ @property
+ def api(self):
+ return self._cluster.api_extensions
+
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_network_policy(namespace, **kwargs)
+
+ def _list_all(self, **kwargs):
+ return self.api.list_network_policy_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/nodes.py b/tcp_tests/managers/k8s/nodes.py
index c6d4dbe..4b0451e 100644
--- a/tcp_tests/managers/k8s/nodes.py
+++ b/tcp_tests/managers/k8s/nodes.py
@@ -12,70 +12,41 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sNode(K8sBaseResource):
- """docstring for ClassName"""
+ resource_type = 'node'
- def __repr__(self):
- return "<K8sNode: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_node(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_node(body, **kwargs)
- @property
- def labels(self):
- return self.metadata.labels
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_node(self.name, body, **kwargs)
- @labels.setter
- def labels(self, labels):
- current_labels = {
- label: None for label in self.labels
- }
- current_labels.update(labels)
- self.add_labels(labels=current_labels)
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_node(self.name, body, **kwargs)
- def add_labels(self, labels):
- if not isinstance(labels, dict):
- raise TypeError("labels must be a dict!")
- body = {
- "metadata":
- {
- "labels": labels
- }
- }
- self._add_details(self._manager.update(body=body, name=self.name))
-
- def remove_labels(self, list_labels):
- labels = {label: None for label in list_labels}
- self.add_labels(labels=labels)
+ def _delete(self, **kwargs):
+ self._manager.api.delete_node(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sNodeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sNode
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_node(name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_node(**kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_node(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_node(body=body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_node(body=body, name=name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_node(body=body, name=name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_node(**kwargs)
-
- def update(self, body, name, **kwargs):
- return self.api.patch_namespaced_node(body=body, name=name, **kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/persistentvolumeclaims.py b/tcp_tests/managers/k8s/persistentvolumeclaims.py
index f28f622..b28b869 100644
--- a/tcp_tests/managers/k8s/persistentvolumeclaims.py
+++ b/tcp_tests/managers/k8s/persistentvolumeclaims.py
@@ -12,52 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sPersistentVolumeClaim(K8sBaseResource):
- """docstring for K8sPersistentVolumeClaim"""
+ resource_type = 'persistentvolumeclaim'
- def __repr__(self):
- return "<K8sPersistentVolumeClaim: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_persistent_volume_claim(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_persistent_volume_claim(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_persistent_volume_claim(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_persistent_volume_claim(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_persistent_volume_claim(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sPersistentVolumeClaimManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPersistentVolumeClaim
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_persistent_volume_claim(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_persistent_volume_claim(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_persistent_volume_claim(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_persistent_volume_claim(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_persistent_volume_claim(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_persistent_volume_claim(
- namespace=namespace, **kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_persistent_volume_claim_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/persistentvolumes.py b/tcp_tests/managers/k8s/persistentvolumes.py
index 8ab7ec2..7424935 100644
--- a/tcp_tests/managers/k8s/persistentvolumes.py
+++ b/tcp_tests/managers/k8s/persistentvolumes.py
@@ -12,46 +12,43 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sPersistentVolume(K8sBaseResource):
- """docstring for K8sPersistentVolume"""
+ resource_type = 'persistentvolume'
- def __repr__(self):
- return "<K8sPersistentVolume: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_persistent_volume(self.name, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_persistent_volume(body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_persistent_volume(
+ self.name, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_persistent_volume(
+ self.name, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_persistent_volume(
+ self.name, client.V1DeleteOptions(), **kwargs)
class K8sPersistentVolumeManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPersistentVolume
- def _get(self, name, **kwargs):
- return self.api.read_namespaced_persistent_volume(
- name=name, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, **kwargs):
- return self.api.list_namespaced_persistent_volume(
- **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_persistent_volume(**kwargs)
- def _create(self, body, **kwargs):
- return self.api.create_namespaced_persistent_volume(
- body, **kwargs)
-
- def _replace(self, body, name, **kwargs):
- return self.api.replace_namespaced_persistent_volume(
- body=body, name=name, **kwargs)
-
- def _delete(self, body, name, **kwargs):
- return self.api.delete_namespaced_persistent_volume(
- body=body, name=name, **kwargs)
-
- def _deletecollection(self, **kwargs):
- return self.api.deletecollection_namespaced_persistent_volume(
- **kwargs)
+ def _list_all(self, **kwargs):
+ return self._list(None, **kwargs)
diff --git a/tcp_tests/managers/k8s/pods.py b/tcp_tests/managers/k8s/pods.py
index 94abc20..98192a6 100644
--- a/tcp_tests/managers/k8s/pods.py
+++ b/tcp_tests/managers/k8s/pods.py
@@ -11,6 +11,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
+
+from kubernetes import client
+
from devops.helpers import helpers
from tcp_tests.managers.k8s.base import K8sBaseResource
@@ -18,91 +21,51 @@
class K8sPod(K8sBaseResource):
- """docstring for K8sPod"""
+ resource_type = 'pod'
- def __repr__(self):
- return "<K8sPod: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_pod(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_pod(
+ self.namespace, body, **kwargs)
- @property
- def phase(self):
- return self.status.phase
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_pod(
+ self.name, self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_pod(
+ self.name, self.namespace, body, **kwargs)
- def wait_phase(self, phase, timeout=60, interval=5):
- """Wait phase of pod_name from namespace while timeout
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_pod(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
- :param list or str: phase
- :param int: timeout
+ def wait_phase(self, phases, timeout=60, interval=3):
+ if isinstance(phases, str):
+ phases = [phases]
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
+ helpers.wait(lambda: self.read().status.phase in phases,
+ timeout=timeout, interval=interval,
+ timeout_msg='Timeout waiting, pod {0} phase is not in '
+ '"{1}"'.format(self.name, phases))
+ return self
- def check():
- self._add_details(self._manager.get(name=self.name,
- namespace=self.namespace))
- return self.phase in phase
-
- helpers.wait(check, timeout=timeout, interval=interval,
- timeout_msg='Timeout waiting({timeout}s), pod {pod_name} '
- 'is not in "{phase}" phase'.format(
- timeout=timeout,
- pod_name=self.name,
- phase=phase))
-
- def wait_running(self, timeout=60, interval=5):
- self.wait_phase(['Running'], timeout=timeout, interval=interval)
+ def wait_running(self, timeout=240, interval=3):
+ return self.wait_phase('Running', timeout=timeout, interval=interval)
class K8sPodManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sPod
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_pod(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_pod(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_pod(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_pod(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_pod(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- # NOTE: the following two lines should be deleted after
- # serialization is fixed in python-k8sclient
- if isinstance(body, self.resource_class):
- body = body.swagger_types
- return self.api.delete_namespaced_pod(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_pod(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_pod(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_pod_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/replicasets.py b/tcp_tests/managers/k8s/replicasets.py
index 31b6db6..7132157 100644
--- a/tcp_tests/managers/k8s/replicasets.py
+++ b/tcp_tests/managers/k8s/replicasets.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sReplicaSet(K8sBaseResource):
- """docstring for K8sPod"""
+ resource_type = 'replicaset'
- def __repr__(self):
- return "<K8sPod: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_replica_set(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_replica_set(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_replica_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_replica_set(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_replica_set(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sReplicaSetManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sReplicaSet
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_replica_set(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_apps
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_replica_set(namespace=namespace,
- **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_replica_set(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_replica_set(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_replica_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_replica_set(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_replica_set(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_replica_set(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_replica_set_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/replicationcontrollers.py b/tcp_tests/managers/k8s/replicationcontrollers.py
index 6cf7da4..aabb092 100644
--- a/tcp_tests/managers/k8s/replicationcontrollers.py
+++ b/tcp_tests/managers/k8s/replicationcontrollers.py
@@ -12,59 +12,47 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sReplicationController(K8sBaseResource):
- """docstring for K8sReplicationController"""
+ resource_type = 'replicationcontroller'
- def __repr__(self):
- return "<K8sReplicationController: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_replication_controller(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_replication_controller(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_replication_controller(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_replication_controller(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_replication_controller(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sReplicationControllerManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sReplicationController
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_replication_controller(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
+ def _list(self, namespace, **kwargs):
return self.api.list_namespaced_replication_controller(
- namespace=namespace, **kwargs)
+ namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_replication_controller(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_replication_controller(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_replication_controller(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_replication_controller(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_replication_controller(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_replication_controller_for_all_namespaces(
+ **kwargs)
diff --git a/tcp_tests/managers/k8s/resourcequotas.py b/tcp_tests/managers/k8s/resourcequotas.py
index 49d81d5..2edf00d 100644
--- a/tcp_tests/managers/k8s/resourcequotas.py
+++ b/tcp_tests/managers/k8s/resourcequotas.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sResourceQuota(K8sBaseResource):
- """docstring for K8sResourceQuota"""
+ resource_type = 'resourcequota'
- def __repr__(self):
- return "<K8sResourceQuota: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_resource_quota(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_resource_quota(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_resource_quota(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_resource_quota(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_resource_quota(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sResourceQuotaManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sResourceQuota
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_resource_quota(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_resource_quota(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_resource_quota(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_resource_quota(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_resource_quota(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_resource_quota(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_resource_quota(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_resourse_quota(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_resource_quota_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/secrets.py b/tcp_tests/managers/k8s/secrets.py
index 355c884..474c119 100644
--- a/tcp_tests/managers/k8s/secrets.py
+++ b/tcp_tests/managers/k8s/secrets.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sSecret(K8sBaseResource):
- """docstring for K8sSecret"""
+ resource_type = 'secret'
- def __repr__(self):
- return "<K8sSecret: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_secret(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_secret(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_secret(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_secret(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_secret(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sSecretManager(K8sBaseManager):
- """docstring for ClassName"""
-
resource_class = K8sSecret
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_secret(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_secret(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_secret(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_secret(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_secret(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_secret(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_secret(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_secret(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_secret_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/serviceaccounts.py b/tcp_tests/managers/k8s/serviceaccounts.py
index bf58b4c..3b779eb 100644
--- a/tcp_tests/managers/k8s/serviceaccounts.py
+++ b/tcp_tests/managers/k8s/serviceaccounts.py
@@ -12,59 +12,45 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sServiceAccount(K8sBaseResource):
- """docstring for K8sServiceAccount"""
+ resource_type = 'serviceaccount'
- def __repr__(self):
- return "<K8sServiceAccount: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_service_account(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_service_account(
+ self.namespace, body, **kwargs)
+
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_service_account(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_service_account(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_service_account(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
class K8sServiceAccountManager(K8sBaseManager):
- """docstring for K8sServiceAccountManager"""
-
resource_class = K8sServiceAccount
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_service_account(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_service_account(
- namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_service_account(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_service_account(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_service_account(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_service_account(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _deletecollection(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.deletecollection_namespaced_service_account(
- namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_service_account(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_service_account_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8s/services.py b/tcp_tests/managers/k8s/services.py
index 97a6be7..6c4a22b 100644
--- a/tcp_tests/managers/k8s/services.py
+++ b/tcp_tests/managers/k8s/services.py
@@ -12,57 +12,51 @@
# License for the specific language governing permissions and limitations
+from kubernetes import client
+
from tcp_tests.managers.k8s.base import K8sBaseResource
from tcp_tests.managers.k8s.base import K8sBaseManager
class K8sService(K8sBaseResource):
- """docstring for K8sService"""
+ resource_type = 'service'
- def __repr__(self):
- return "<K8sService: %s>" % self.name
+ def _read(self, **kwargs):
+ return self._manager.api.read_namespaced_service(
+ self.name, self.namespace, **kwargs)
- @property
- def name(self):
- return self.metadata.name
+ def _create(self, body, **kwargs):
+ return self._manager.api.create_namespaced_service(
+ self.namespace, body, **kwargs)
- @property
- def namespace(self):
- return self.metadata.namespace
+ def _patch(self, body, **kwargs):
+ return self._manager.api.patch_namespaced_service(
+ self.name, self.namespace, body, **kwargs)
+
+ def _replace(self, body, **kwargs):
+ return self._manager.api.replace_namespaced_service(
+ self.name, self.namespace, body, **kwargs)
+
+ def _delete(self, **kwargs):
+ self._manager.api.delete_namespaced_service(
+ self.name, self.namespace, client.V1DeleteOptions(), **kwargs)
+
+ def get_ip(self, external=False):
+ if external:
+ return self.read().status.load_balancer.ingress[0].ip
+ else:
+ return self.read().spec.cluster_ip
class K8sServiceManager(K8sBaseManager):
- """docstring for K8sServiceManager"""
-
resource_class = K8sService
- def _get(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.read_namespaced_service(
- name=name, namespace=namespace, **kwargs)
+ @property
+ def api(self):
+ return self._cluster.api_core
- def _list(self, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.list_namespaced_service(namespace=namespace, **kwargs)
+ def _list(self, namespace, **kwargs):
+ return self.api.list_namespaced_service(namespace, **kwargs)
- def _create(self, body, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.create_namespaced_service(
- body=body, namespace=namespace, **kwargs)
-
- def _replace(self, body, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.replace_namespaced_service(
- body=body, name=name, namespace=namespace, **kwargs)
-
- def _delete(self, name, namespace=None, **kwargs):
- namespace = namespace or self.namespace
- return self.api.delete_namespaced_service(
- name=name, namespace=namespace, **kwargs)
-
- def full_list(self, *args, **kwargs):
- lst = self._full_list(*args, **kwargs)
- return [self.resource_class(self, item) for item in lst.items]
-
- def _full_list(self, **kwargs):
- return self.api.list_service(**kwargs)
+ def _list_all(self, **kwargs):
+ return self.api.list_service_for_all_namespaces(**kwargs)
diff --git a/tcp_tests/managers/k8smanager.py b/tcp_tests/managers/k8smanager.py
index 9b5588d..a72f2f1 100644
--- a/tcp_tests/managers/k8smanager.py
+++ b/tcp_tests/managers/k8smanager.py
@@ -13,9 +13,6 @@
# under the License.
import os
-import time
-from uuid import uuid4
-import six
import requests
import yaml
@@ -27,7 +24,6 @@
from tcp_tests.helpers.utils import retry
from tcp_tests.managers.execute_commands import ExecuteCommandsMixin
from tcp_tests.managers.k8s import cluster
-from k8sclient.client.rest import ApiException
LOG = logger.logger
@@ -42,9 +38,10 @@
self.__config = config
self.__underlay = underlay
self._salt = salt
- self._api_client = None
- super(K8SManager, self).__init__(
- config=config, underlay=underlay)
+ self._api = None
+ self.kubectl = K8SKubectlCli(self)
+ self.virtlet = K8SVirtlet(self)
+ super(K8SManager, self).__init__(config=config, underlay=underlay)
def install(self, commands):
self.execute_commands(commands,
@@ -75,602 +72,81 @@
"nodes with K8s master").format(k8s_host, k8s_proxy_ip)
return k8s_host
+ def _api_init(self):
+ ca_result = self.controller_check_call(
+ 'base64 --wrap=0 /etc/kubernetes/ssl/ca-kubernetes.crt')
+
+ self._api = cluster.K8sCluster(
+ user=self.__config.k8s_deploy.kubernetes_admin_user,
+ password=self.__config.k8s_deploy.kubernetes_admin_password,
+ ca=ca_result['stdout'][0],
+ host=self.__config.k8s.kube_host,
+ port=self.__config.k8s.kube_apiserver_port)
+
@property
def api(self):
- if self._api_client is None:
- self._api_client = cluster.K8sCluster(
- user=self.__config.k8s_deploy.kubernetes_admin_user,
- password=self.__config.k8s_deploy.kubernetes_admin_password,
- host=self.__config.k8s.kube_host,
- port=self.__config.k8s.kube_apiserver_port,
- default_namespace='default')
- return self._api_client
+ """
+ :rtype: cluster.K8sCluster
+ """
+ if self._api is None:
+ self._api_init()
+ return self._api
- def ctl_hosts(self):
+ def get_controllers(self):
+ """ Return list of controllers ssh underlays """
return [node for node in self.__config.underlay.ssh if
ext.UNDERLAY_NODE_ROLES.k8s_controller in node['roles']]
+ def get_masters(self):
+ """ Return list of kubernetes masters hosts fqdn """
+ masters_fqdn = self._salt.get_pillar(
+ tgt='I@kubernetes:master', pillar='linux:network:fqdn')
+ return [self.__underlay.host_by_node_name(node_name=v)
+ for pillar in masters_fqdn for k, v in pillar.items()]
+
@property
- def ctl_host(self):
- return self.ctl_hosts()[0]['node_name']
+ def controller_name(self):
+ """ Return node name of controller node that used for all actions """
+ names = [node['node_name'] for node in self.get_controllers()]
+ # we want to return same controller name every time
+ names.sort()
+ return names[0]
- def get_pod_phase(self, pod_name, namespace=None):
- return self.api.pods.get(
- name=pod_name, namespace=namespace).phase
-
- def wait_pod_phase(self, pod_name, phase, namespace=None, timeout=60):
- """Wait phase of pod_name from namespace while timeout
-
- :param str: pod_name
- :param str: namespace
- :param list or str: phase
- :param int: timeout
-
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
-
- def check():
- return self.get_pod_phase(pod_name, namespace) in phase
-
- helpers.wait(check, timeout=timeout,
- timeout_msg='Timeout waiting, pod {pod_name} is not in '
- '"{phase}" phase'.format(
- pod_name=pod_name, phase=phase))
-
- def wait_pods_phase(self, pods, phase, timeout=60):
- """Wait timeout seconds for phase of pods
-
- :param pods: list of K8sPod
- :param phase: list or str
- :param timeout: int
-
- :rtype: None
- """
- if isinstance(phase, str):
- phase = [phase]
-
- def check(pod_name, namespace):
- return self.get_pod_phase(pod_name, namespace) in phase
-
- def check_all_pods():
- return all(check(pod.name, pod.metadata.namespace) for pod in pods)
-
- helpers.wait(
- check_all_pods,
- timeout=timeout,
- timeout_msg='Timeout waiting, pods {0} are not in "{1}" '
- 'phase'.format([pod.name for pod in pods], phase))
-
- def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
- """Check creating sample pod
-
- :param k8s_pod: V1Pod
- :param namespace: str
- :rtype: V1Pod
- """
- LOG.info("Creating pod in k8s cluster")
- if isinstance(body, six.string_types):
- body = yaml.load(body)
- LOG.debug(
- "POD spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- LOG.debug("Timeout for creation is set to {}".format(timeout))
- LOG.debug("Checking interval is set to {}".format(interval))
- pod = self.api.pods.create(body=body, namespace=namespace)
- pod.wait_running(timeout=timeout, interval=interval)
- LOG.info("Pod '{0}' is created in '{1}' namespace".format(
- pod.name, pod.namespace))
- return self.api.pods.get(name=pod.name, namespace=pod.namespace)
-
- def wait_pod_deleted(self, podname, timeout=60, interval=5):
- helpers.wait(
- lambda: podname not in [pod.name for pod in self.api.pods.list()],
- timeout=timeout,
- interval=interval,
- timeout_msg="Pod deletion timeout reached!"
- )
-
- def check_pod_delete(self, k8s_pod, timeout=300, interval=5,
- namespace=None):
- """Deleting pod from k8s
-
- :param k8s_pod: tcp_tests.managers.k8s.nodes.K8sNode
- :param k8sclient: tcp_tests.managers.k8s.cluster.K8sCluster
- """
- LOG.info("Deleting pod '{}'".format(k8s_pod.name))
- LOG.debug("Pod status:\n{}".format(k8s_pod.status))
- LOG.debug("Timeout for deletion is set to {}".format(timeout))
- LOG.debug("Checking interval is set to {}".format(interval))
- self.api.pods.delete(body=k8s_pod, name=k8s_pod.name,
- namespace=namespace)
- self.wait_pod_deleted(k8s_pod.name, timeout, interval)
- LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
-
- def check_service_create(self, body, namespace=None):
- """Check creating k8s service
-
- :param body: dict, service spec
- :param namespace: str
- :rtype: K8sService object
- """
- LOG.info("Creating service in k8s cluster")
- LOG.debug(
- "Service spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- service = self.api.services.create(body=body, namespace=namespace)
- LOG.info("Service '{0}' is created in '{1}' namespace".format(
- service.name, service.namespace))
- return self.api.services.get(name=service.name,
- namespace=service.namespace)
-
- def check_ds_create(self, body, namespace=None):
- """Check creating k8s DaemonSet
-
- :param body: dict, DaemonSet spec
- :param namespace: str
- :rtype: K8sDaemonSet object
- """
- LOG.info("Creating DaemonSet in k8s cluster")
- LOG.debug(
- "DaemonSet spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- ds = self.api.daemonsets.create(body=body, namespace=namespace)
- LOG.info("DaemonSet '{0}' is created in '{1}' namespace".format(
- ds.name, ds.namespace))
- return self.api.daemonsets.get(name=ds.name, namespace=ds.namespace)
-
- def check_ds_ready(self, dsname, namespace=None):
- """Check if k8s DaemonSet is ready
-
- :param dsname: str, ds name
- :return: bool
- """
- ds = self.api.daemonsets.get(name=dsname, namespace=namespace)
- return (ds.status.current_number_scheduled ==
- ds.status.desired_number_scheduled)
-
- def wait_ds_ready(self, dsname, namespace=None, timeout=60, interval=5):
- """Wait until all pods are scheduled on nodes
-
- :param dsname: str, ds name
- :param timeout: int
- :param interval: int
- """
- helpers.wait(
- lambda: self.check_ds_ready(dsname, namespace=namespace),
- timeout=timeout, interval=interval)
-
- def check_deploy_create(self, body, namespace=None):
- """Check creating k8s Deployment
-
- :param body: dict, Deployment spec
- :param namespace: str
- :rtype: K8sDeployment object
- """
- LOG.info("Creating Deployment in k8s cluster")
- LOG.debug(
- "Deployment spec to create:\n{}".format(
- yaml.dump(body, default_flow_style=False))
- )
- deploy = self.api.deployments.create(body=body, namespace=namespace)
- LOG.info("Deployment '{0}' is created in '{1}' namespace".format(
- deploy.name, deploy.namespace))
- return self.api.deployments.get(name=deploy.name,
- namespace=deploy.namespace)
-
- def check_deploy_ready(self, deploy_name, namespace=None):
- """Check if k8s Deployment is ready
-
- :param deploy_name: str, deploy name
- :return: bool
- """
- deploy = self.api.deployments.get(name=deploy_name,
- namespace=namespace)
- return deploy.status.available_replicas == deploy.status.replicas
-
- def wait_deploy_ready(self, deploy_name, namespace=None, timeout=60,
- interval=5):
- """Wait until all pods are scheduled on nodes
-
- :param deploy_name: str, deploy name
- :param timeout: int
- :param interval: int
- """
- helpers.wait(
- lambda: self.check_deploy_ready(deploy_name, namespace=namespace),
- timeout=timeout, interval=interval)
-
- def check_namespace_create(self, name):
- """Check creating k8s Namespace
-
- :param name: str
- :rtype: K8sNamespace object
- """
- try:
- ns = self.api.namespaces.get(name=name)
- LOG.info("Namespace '{0}' is already exists".format(ns.name))
- except ApiException as e:
- if hasattr(e, "status") and 404 == e.status:
- LOG.info("Creating Namespace in k8s cluster")
- ns = self.api.namespaces.create(
- body={'metadata': {'name': name}})
- LOG.info("Namespace '{0}' is created".format(ns.name))
- # wait 10 seconds until a token for new service account
- # is created
- time.sleep(10)
- ns = self.api.namespaces.get(name=ns.name)
- else:
- raise
- return ns
-
- def create_objects(self, path):
- if isinstance(path, str):
- path = [path]
- params = ' '.join(["-f {}".format(p) for p in path])
- cmd = 'kubectl create {params}'.format(params=params)
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- LOG.info("Running command '{cmd}' on node {node}".format(
- cmd=cmd,
- node=remote.hostname)
- )
- result = remote.check_call(cmd)
- LOG.info(result['stdout'])
-
- def get_running_pods(self, pod_name, namespace=None):
- pods = [pod for pod in self.api.pods.list(namespace=namespace)
- if (pod_name in pod.name and pod.status.phase == 'Running')]
- return pods
-
- def get_pods_number(self, pod_name, namespace=None):
- pods = self.get_running_pods(pod_name, namespace)
- return len(pods)
-
- def get_running_pods_by_ssh(self, pod_name, namespace=None):
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call("kubectl get pods --namespace {} |"
- " grep {} | awk '{{print $1 \" \""
- " $3}}'".format(namespace,
- pod_name))['stdout']
- running_pods = [data.strip().split()[0] for data in result
- if data.strip().split()[1] == 'Running']
- return running_pods
-
- def get_pods_restarts(self, pod_name, namespace=None):
- pods = [pod.status.container_statuses[0].restart_count
- for pod in self.get_running_pods(pod_name, namespace)]
- return sum(pods)
-
- def run_conformance(self, timeout=60 * 60, log_out='k8s_conformance.log',
- raise_on_err=True, node_name=None,
- api_server='http://127.0.0.1:8080'):
- if node_name is None:
- node_name = self.ctl_host
- cmd = "set -o pipefail; docker run --net=host -e API_SERVER="\
- "'{api}' {image} | tee '{log}'".format(
- api=api_server, image=self.__config.k8s.k8s_conformance_image,
- log=log_out)
+ def controller_check_call(self, cmd, **kwargs):
+ """ Run command on controller and return result """
+ LOG.info("running cmd on k8s controller: {}".format(cmd))
return self.__underlay.check_call(
- cmd=cmd, node_name=node_name, timeout=timeout,
- raise_on_err=raise_on_err)
+ cmd=cmd, node_name=self.controller_name, **kwargs)
- def get_k8s_masters(self):
- k8s_masters_fqdn = self._salt.get_pillar(tgt='I@kubernetes:master',
- pillar='linux:network:fqdn')
- return [self._K8SManager__underlay.host_by_node_name(node_name=v)
- for pillar in k8s_masters_fqdn for k, v in pillar.items()]
-
- def kubectl_run(self, name, image, port, replicas=None):
- cmd = "kubectl run {0} --image={1} --port={2}".format(
- name, image, port)
- if replicas is not None:
- cmd += " --replicas={}".format(replicas)
- return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
-
- def kubectl_expose(self, resource, name, port, type, target_name=None):
- cmd = "kubectl expose {0} {1} --port={2} --type={3}".format(
- resource, name, port, type)
- if target_name is not None:
- cmd += " --name={}".format(target_name)
- return self.__underlay.check_call(cmd=cmd, node_name=self.ctl_host)
-
- def kubectl_annotate(self, resource, name, annotation):
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call(
- "kubectl annotate {0} {1} {2}".format(
- resource, name, annotation
- )
- )
- return result
-
- def get_svc_ip(self, name, namespace='kube-system', external=False):
- cmd = "kubectl get svc {0} -n {1} | awk '{{print ${2}}}' | tail -1".\
- format(name, namespace, 4 if external else 3)
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'][0].strip()
-
- @retry(300, exception=DevopsCalledProcessError)
- def nslookup(self, host, src):
- self.__underlay.check_call(
- "nslookup {0} {1}".format(host, src), node_name=self.ctl_host)
-
- @retry(300, exception=DevopsCalledProcessError)
- def curl(self, url):
+ def get_keepalived_vip(self):
"""
- Run curl on controller and return stdout
+ Return k8s VIP IP address
- :param url: url to curl
- :return: response string
+ :return: str, IP address
"""
- result = self.__underlay.check_call(
- "curl -s -S \"{}\"".format(url), node_name=self.ctl_host)
- LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
- return result['stdout']
+ ctl_vip_pillar = self._salt.get_pillar(
+ tgt="I@kubernetes:control:enabled:True",
+ pillar="_param:cluster_vip_address")[0]
+ return ctl_vip_pillar.values()[0]
-# ---------------------------- Virtlet methods -------------------------------
- def install_jq(self):
- """Install JQuery on node. Required for changing yamls on the fly.
+ def run_sample_deployment(self, name, **kwargs):
+ return K8SSampleDeployment(self, name, **kwargs)
- :return:
+ def get_pod_ips_from_container(self, pod_name, exclude_local=True,
+ namespace='default'):
+ """ Get ips from container using 'ip a'
+ Required for cni-genie multi-cni cases
+
+ :return: list of IP adresses
"""
- cmd = "apt install jq -y"
- return self.__underlay.check_call(cmd, node_name=self.ctl_host)
+ cmd = "ip a|grep \"inet \"|awk '{{print $2}}'"
+ result = self.kubectl.cli_exec(namespace, pod_name, cmd)['stdout']
+ ips = [line.strip().split('/')[0] for line in result]
+ if exclude_local:
+ ips = [ip for ip in ips if not ip.startswith("127.")]
+ return ips
- def git_clone(self, project, target):
- cmd = "git clone {0} {1}".format(project, target)
- return self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- def run_vm(self, name=None, yaml_path='~/virtlet/examples/cirros-vm.yaml'):
- if not name:
- name = 'virtlet-vm-{}'.format(uuid4())
- cmd = (
- "kubectl convert -f {0} --local "
- "-o json | jq '.metadata.name|=\"{1}\"' | kubectl create -f -")
- self.__underlay.check_call(cmd.format(yaml_path, name),
- node_name=self.ctl_host)
- return name
-
- def get_vm_info(self, name, jsonpath="{.status.phase}", expected=None):
- cmd = "kubectl get po {} -n default".format(name)
- if jsonpath:
- cmd += " -o jsonpath={}".format(jsonpath)
- return self.__underlay.check_call(
- cmd, node_name=self.ctl_host, expected=expected)
-
- def wait_active_state(self, name, timeout=180):
- helpers.wait(
- lambda: self.get_vm_info(name)['stdout'][0] == 'Running',
- timeout=timeout,
- timeout_msg="VM {} didn't Running state in {} sec. "
- "Current state: ".format(
- name, timeout, self.get_vm_info(name)['stdout'][0]))
-
- def delete_vm(self, name, timeout=180):
- cmd = "kubectl delete po -n default {}".format(name)
- self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- helpers.wait(
- lambda:
- "Error from server (NotFound):" in
- " ".join(self.get_vm_info(name, expected=[0, 1])['stderr']),
- timeout=timeout,
- timeout_msg="VM {} didn't Running state in {} sec. "
- "Current state: ".format(
- name, timeout, self.get_vm_info(name)['stdout'][0]))
-
- def adjust_cirros_resources(
- self, cpu=2, memory='256',
- target_yaml='virtlet/examples/cirros-vm-exp.yaml'):
- # We will need to change params in case of example change
- cmd = ("cd ~/virtlet/examples && "
- "cp cirros-vm.yaml {2} && "
- "sed -r 's/^(\s*)(VirtletVCPUCount\s*:\s*\"1\"\s*$)/ "
- "\1VirtletVCPUCount: \"{0}\"/' {2} && "
- "sed -r 's/^(\s*)(memory\s*:\s*128Mi\s*$)/\1memory: "
- "{1}Mi/' {2}".format(cpu, memory, target_yaml))
- self.__underlay.check_call(cmd, node_name=self.ctl_host)
-
- def get_domain_name(self, vm_name):
- cmd = ("~/virtlet/examples/virsh.sh list --name | "
- "grep -i {0} ".format(vm_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'].strip()
-
- def get_vm_cpu_count(self, domain_name):
- cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
- "grep 'cpu' | grep -o '[[:digit:]]*'".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def get_vm_memory_count(self, domain_name):
- cmd = ("~/virtlet/examples/virsh.sh dumpxml {0} | "
- "grep 'memory unit' | "
- "grep -o '[[:digit:]]*'".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def get_domain_id(self, domain_name):
- cmd = ("virsh dumpxml {} | grep id=\' | "
- "grep -o [[:digit:]]*".format(domain_name))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return int(result['stdout'].strip())
-
- def list_vm_volumes(self, domain_name):
- domain_id = self.get_domain_id(domain_name)
- cmd = ("~/virtlet/examples/virsh.sh domblklist {} | "
- "tail -n +3 | awk {{'print $2'}}".format(domain_id))
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- return result['stdout'].strip()
-
- def run_virtlet_conformance(self, timeout=60 * 120,
- log_file='virtlet_conformance.log'):
- if self.__config.k8s.run_extended_virtlet_conformance:
- ci_image = "cloud-images.ubuntu.com/xenial/current/" \
- "xenial-server-cloudimg-amd64-disk1.img"
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-include-cloud-init-tests -junitOutput report.xml "
- "-image {2} -sshuser ubuntu -memoryLimit 1024 "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
- self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file, ci_image))
- else:
- cmd = ("set -o pipefail; "
- "docker run --net=host {0} /virtlet-e2e-tests "
- "-junitOutput report.xml "
- "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
- "-ginkgo.focus '\[Conformance\]' "
- "| tee {1}".format(
- self.__config.k8s_deploy.kubernetes_virtlet_image,
- log_file))
- LOG.info("Executing: {}".format(cmd))
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- result = remote.check_call(cmd, timeout=timeout)
- stderr = result['stderr']
- stdout = result['stdout']
- LOG.info("Test results stdout: {}".format(stdout))
- LOG.info("Test results stderr: {}".format(stderr))
- return result
-
- def start_k8s_cncf_verification(self, timeout=60 * 90):
- cncf_cmd = ("curl -L https://raw.githubusercontent.com/cncf/"
- "k8s-conformance/master/sonobuoy-conformance.yaml"
- " | kubectl apply -f -")
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- remote.check_call(cncf_cmd, timeout=60)
- self.wait_pod_phase('sonobuoy', 'Running',
- namespace='sonobuoy', timeout=120)
- wait_cmd = ('kubectl logs -n sonobuoy sonobuoy | '
- 'grep "sonobuoy is now blocking"')
-
- expected = [0, 1]
- helpers.wait(
- lambda: remote.check_call(
- wait_cmd, expected=expected).exit_code == 0,
- interval=30, timeout=timeout,
- timeout_msg="Timeout for CNCF reached."
- )
-
- def extract_file_to_node(self, system='docker',
- container='virtlet',
- file_path='report.xml',
- out_dir='.',
- **kwargs):
- """
- Download file from docker or k8s container to node
-
- :param system: docker or k8s
- :param container: Full name of part of name
- :param file_path: File path in container
- :param kwargs: Used to control pod and namespace
- :param out_dir: Output directory
- :return:
- """
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- if system is 'docker':
- cmd = ("docker ps --all | grep \"{0}\" |"
- " awk '{{print $1}}'".format(container))
- result = remote.check_call(cmd, raise_on_err=False)
- if result['stdout']:
- container_id = result['stdout'][0].strip()
- else:
- LOG.info('No container found, skipping extraction...')
- return
- cmd = "docker start {}".format(container_id)
- remote.check_call(cmd, raise_on_err=False)
- cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
- container_id, file_path, out_dir)
- remote.check_call(cmd, raise_on_err=False)
- else:
- # system is k8s
- pod_name = kwargs.get('pod_name')
- pod_namespace = kwargs.get('pod_namespace')
- cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
- pod_namespace, pod_name, file_path, out_dir)
- remote.check_call(cmd, raise_on_err=False)
-
- def download_k8s_logs(self, files):
- """
- Download JUnit report and conformance logs from cluster
- :param files:
- :return:
- """
- master_host = self.__config.salt.salt_master_host
- with self.__underlay.remote(host=master_host) as r:
- for log_file in files:
- cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
- self.ctl_host, log_file)
- r.check_call(cmd, raise_on_err=False)
- LOG.info("Downloading the artifact {0}".format(log_file))
- r.download(destination=log_file, target=os.getcwd())
-
- def combine_xunit(self, path, output):
- """
- Function to combine multiple xmls with test results to
- one.
-
- :param path: Path where xmls to combine located
- :param output: Path to xml file where output will stored
- :return:
- """
- with self.__underlay.remote(node_name=self.ctl_host) as r:
- cmd = ("apt-get install python-setuptools -y; "
- "pip install "
- "https://github.com/mogaika/xunitmerge/archive/master.zip")
- LOG.debug('Installing xunitmerge')
- r.check_call(cmd, raise_on_err=False)
- LOG.debug('Merging xunit')
- cmd = ("cd {0}; arg = ''; "
- "for i in $(ls | grep xml); "
- "do arg=\"$arg $i\"; done && "
- "xunitmerge $arg {1}".format(path, output))
- r.check_call(cmd, raise_on_err=False)
-
- def manage_cncf_archive(self):
- """
- Function to untar archive, move files, that we are needs to the
- home folder, prepare it to downloading and clean the trash.
- Will generate files: e2e.log, junit_01.xml, cncf_results.tar.gz
- and version.txt
- :return:
- """
-
- # Namespace and pod name may be hardcoded since this function is
- # very specific for cncf and cncf is not going to change
- # those launch pod name and namespace.
- get_tar_name_cmd = ("kubectl logs -n sonobuoy sonobuoy | "
- "grep 'Results available' | "
- "sed 's/.*\///' | tr -d '\"'")
-
- with self.__underlay.remote(
- node_name=self.ctl_host) as remote:
- tar_name = remote.check_call(get_tar_name_cmd)['stdout'][0].strip()
- untar = "mkdir result && tar -C result -xzf {0}".format(tar_name)
- remote.check_call(untar, raise_on_err=False)
- manage_results = ("mv result/plugins/e2e/results/e2e.log . && "
- "mv result/plugins/e2e/results/junit_01.xml . ;"
- "kubectl version > version.txt")
- remote.check_call(manage_results, raise_on_err=False)
- cleanup_host = "rm -rf result"
- remote.check_call(cleanup_host, raise_on_err=False)
- # This one needed to use download fixture, since I don't know
- # how possible apply fixture arg dynamically from test.
- rename_tar = "mv {0} cncf_results.tar.gz".format(tar_name)
- remote.check_call(rename_tar, raise_on_err=False)
-
- def update_k8s_images(self, tag):
+ def update_k8s_version(self, tag):
"""
Update k8s images tag version in cluster meta and apply required
for update states
@@ -705,73 +181,336 @@
self.execute_commands(
update_commands, label="Updating kubernetes to '{}'".format(tag))
- def get_keepalived_vip(self):
+ def run_conformance(self, timeout=60*60, log_out='k8s_conformance.log',
+ raise_on_err=True, node_name=None,
+ api_server='http://127.0.0.1:8080'):
+ if node_name is None:
+ node_name = self.controller_name
+ cmd = "set -o pipefail; docker run --net=host " \
+ "-e API_SERVER='{api}' {image} | tee '{log}'".format(
+ api=api_server, log=log_out,
+ image=self.__config.k8s.k8s_conformance_image)
+ return self.__underlay.check_call(
+ cmd=cmd, node_name=node_name, timeout=timeout,
+ raise_on_err=raise_on_err)
+
+ def run_virtlet_conformance(self, timeout=60 * 120,
+ log_file='virtlet_conformance.log'):
+ if self.__config.k8s.run_extended_virtlet_conformance:
+ ci_image = "cloud-images.ubuntu.com/xenial/current/" \
+ "xenial-server-cloudimg-amd64-disk1.img"
+ cmd = ("set -o pipefail; "
+ "docker run --net=host {0} /virtlet-e2e-tests "
+ "-include-cloud-init-tests -junitOutput report.xml "
+ "-image {2} -sshuser ubuntu -memoryLimit 1024 "
+ "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ "-ginkgo.focus '\[Conformance\]' "
+ "| tee {1}".format(
+ self.__config.k8s_deploy.kubernetes_virtlet_image,
+ log_file, ci_image))
+ else:
+ cmd = ("set -o pipefail; "
+ "docker run --net=host {0} /virtlet-e2e-tests "
+ "-junitOutput report.xml "
+ "-alsologtostderr -cluster-url http://127.0.0.1:8080 "
+ "-ginkgo.focus '\[Conformance\]' "
+ "| tee {1}".format(
+ self.__config.k8s_deploy.kubernetes_virtlet_image,
+ log_file))
+ LOG.info("Executing: {}".format(cmd))
+ with self.__underlay.remote(
+ node_name=self.controller_name) as remote:
+ result = remote.check_call(cmd, timeout=timeout)
+ stderr = result['stderr']
+ stdout = result['stdout']
+ LOG.info("Test results stdout: {}".format(stdout))
+ LOG.info("Test results stderr: {}".format(stderr))
+ return result
+
+ def start_k8s_cncf_verification(self, timeout=60 * 90):
"""
- Return k8s VIP IP address
-
- :return: str, IP address
+ Build sonobuoy using golang docker image and install it in system
+ Then generate sonobuoy verification manifest using gen command
+ and wait for it to end using status annotation
+ TODO (vjigulin): All sonobuoy methods can be improved if we define
+ correct KUBECONFIG env variable
"""
- ctl_vip_pillar = self._salt.get_pillar(
- tgt="I@kubernetes:control:enabled:True",
- pillar="_param:cluster_vip_address")[0]
- return ctl_vip_pillar.values()[0]
+ cncf_cmd =\
+ 'docker run --network host --name sonobuoy golang:1.11 bash -c ' \
+ '"go get -d -v github.com/heptio/sonobuoy && ' \
+ 'go install -v github.com/heptio/sonobuoy" && ' \
+ 'docker cp sonobuoy:/go/bin/sonobuoy /usr/local/bin/ && ' \
+ 'docker rm sonobuoy && ' \
+ 'sonobuoy gen | kubectl apply -f -'
- def get_sample_deployment(self, name, **kwargs):
- return K8SSampleDeployment(self, name, **kwargs)
+ self.controller_check_call(cncf_cmd, timeout=900)
- def is_pod_exists_with_prefix(self, prefix, namespace, phase='Running'):
- for pod in self.api.pods.list(namespace=namespace):
- if pod.name.startswith(prefix) and pod.phase == phase:
- return True
- return False
+ sonobuoy_pod = self.api.pods.get('sonobuoy', 'heptio-sonobuoy')
+ sonobuoy_pod.wait_running()
- def get_pod_ips_from_container(self, pod_name, exclude_local=True):
- """ Not all containers have 'ip' binary on-board """
- cmd = "kubectl exec {0} ip a|grep \"inet \"|awk '{{print $2}}'".format(
- pod_name)
- result = self.__underlay.check_call(cmd, node_name=self.ctl_host)
- ips = [line.strip().split('/')[0] for line in result['stdout']]
- if exclude_local:
- ips = [ip for ip in ips if not ip.startswith("127.")]
- return ips
+ def sonobuoy_status():
+ annotations = sonobuoy_pod.read().metadata.annotations
+ json_status = annotations['sonobuoy.hept.io/status']
+ status = yaml.safe_load(json_status)['status']
+ if status != 'running':
+ LOG.info("CNCF status: {}".format(json_status))
+ return yaml.safe_load(json_status)['status']
- def create_pod_from_file(self, path, namespace=None):
- with open(path) as f:
- data = f.read()
- return self.check_pod_create(data, namespace=namespace)
+ LOG.info("Waiting for CNCF to complete")
+ helpers.wait(
+ lambda: sonobuoy_status() == 'complete',
+ interval=30, timeout=timeout,
+ timeout_msg="Timeout for CNCF reached."
+ )
+
+ def extract_file_to_node(self, system='docker',
+ container='virtlet',
+ file_path='report.xml',
+ out_dir='.',
+ **kwargs):
+ """
+ Download file from docker or k8s container to node
+
+ :param system: docker or k8s
+ :param container: Full name of part of name
+ :param file_path: File path in container
+ :param kwargs: Used to control pod and namespace
+ :param out_dir: Output directory
+ :return:
+ """
+ with self.__underlay.remote(node_name=self.controller_name) as remote:
+ if system is 'docker':
+ cmd = ("docker ps --all | grep \"{0}\" |"
+ " awk '{{print $1}}'".format(container))
+ result = remote.check_call(cmd, raise_on_err=False)
+ if result['stdout']:
+ container_id = result['stdout'][0].strip()
+ else:
+ LOG.info('No container found, skipping extraction...')
+ return
+ cmd = "docker start {}".format(container_id)
+ remote.check_call(cmd, raise_on_err=False)
+ cmd = "docker cp \"{0}:/{1}\" \"{2}\"".format(
+ container_id, file_path, out_dir)
+ remote.check_call(cmd, raise_on_err=False)
+ else:
+ # system is k8s
+ pod_name = kwargs.get('pod_name')
+ pod_namespace = kwargs.get('pod_namespace')
+ cmd = 'kubectl cp \"{0}/{1}:/{2}\" \"{3}\"'.format(
+ pod_namespace, pod_name, file_path, out_dir)
+ remote.check_call(cmd, raise_on_err=False)
+
+ def download_k8s_logs(self, files):
+ """
+ Download JUnit report and conformance logs from cluster
+ :param files:
+ :return:
+ """
+ master_host = self.__config.salt.salt_master_host
+ with self.__underlay.remote(host=master_host) as r:
+ for log_file in files:
+ cmd = "rsync -r \"{0}:/root/{1}\" /root/".format(
+ self.controller_name, log_file)
+ r.check_call(cmd, raise_on_err=False)
+ LOG.info("Downloading the artifact {0}".format(log_file))
+ r.download(destination=log_file, target=os.getcwd())
+
+ def combine_xunit(self, path, output):
+ """
+ Function to combine multiple xmls with test results to
+ one.
+
+ :param path: Path where xmls to combine located
+ :param output: Path to xml file where output will stored
+ :return:
+ """
+ with self.__underlay.remote(node_name=self.controller_name) as r:
+ cmd = ("apt-get install python-setuptools -y; "
+ "pip install "
+ "https://github.com/mogaika/xunitmerge/archive/master.zip")
+ LOG.debug('Installing xunitmerge')
+ r.check_call(cmd, raise_on_err=False)
+ LOG.debug('Merging xunit')
+ cmd = ("cd {0}; arg = ''; "
+ "for i in $(ls | grep xml); "
+ "do arg=\"$arg $i\"; done && "
+ "xunitmerge $arg {1}".format(path, output))
+ r.check_call(cmd, raise_on_err=False)
+
+ def manage_cncf_archive(self):
+ """
+ Function to untar archive, move files that we are needs to the
+ home folder, prepare it to downloading.
+ Will generate files on controller node:
+ e2e.log, junit_01.xml, cncf_results.tar.gz, version.txt
+ :return:
+ """
+
+ cmd =\
+ "rm -rf cncf_results.tar.gz result && " \
+ "mkdir result && " \
+ "mv *_sonobuoy_*.tar.gz cncf_results.tar.gz && " \
+ "tar -C result -xzf cncf_results.tar.gz && " \
+ "mv result/plugins/e2e/results/e2e.log . ; " \
+ "mv result/plugins/e2e/results/junit_01.xml . ; " \
+ "kubectl version > version.txt"
+
+ self.controller_check_call(cmd, raise_on_err=False)
+
+ @retry(300, exception=DevopsCalledProcessError)
+ def nslookup(self, host, src):
+ """ Run nslookup on controller and return result """
+ return self.controller_check_call("nslookup {0} {1}".format(host, src))
+
+ @retry(300, exception=DevopsCalledProcessError)
+ def curl(self, url):
+ """
+ Run curl on controller and return stdout
+
+ :param url: url to curl
+ :return: response string
+ """
+ result = self.controller_check_call("curl -s -S \"{}\"".format(url))
+ LOG.debug("curl \"{0}\" result: {1}".format(url, result['stdout']))
+ return result['stdout']
-class K8SSampleDeployment:
- def __init__(self, manager, name, replicas=2,
- image='gcr.io/google-samples/node-hello:1.0', port=8080):
- self.manager = manager
- self.name = name
- self.image = image
- self.port = port
- self.replicas = replicas
+class K8SKubectlCli(object):
+ """ Contain kubectl cli commands and api wrappers"""
+ def __init__(self, manager):
+ self._manager = manager
- def run(self):
- self.manager.kubectl_run(self.name, self.image, self.port,
- replicas=self.replicas)
+ def cli_run(self, namespace, name, image, port, replicas=1):
+ cmd = "kubectl -n {0} run {1} --image={2} --port={3} --replicas={4}".\
+ format(namespace, name, image, port, replicas)
+ return self._manager.controller_check_call(cmd)
- def expose(self, service_type='ClusterIP', target_name=None):
- self.manager.kubectl_expose(
- 'deployment', self.name, self.port, service_type, target_name)
+ def run(self, namespace, name, image, port, replicas=1):
+ self.cli_run(namespace, name, image, port, replicas)
+ return self._manager.api.deployments.get(
+ namespace=namespace, name=name)
- def get_svc_ip(self, external=False):
- return self.manager.get_svc_ip(self.name, namespace='default',
- external=external)
+ def cli_expose(self, namespace, resource_type, resource_name,
+ service_name=None, port='', service_type='ClusterIP'):
+ cmd = "kubectl -n {0} expose {1} {2} --port={3} --type={4}".format(
+ namespace, resource_type, resource_name, port, service_type)
+ if service_name is not None:
+ cmd += " --name={}".format(service_name)
+ return self._manager.controller_check_call(cmd)
- def curl(self, external=False):
- url = "http://{0}:{1}".format(
- self.get_svc_ip(external=external), self.port)
+ def expose(self, resource, service_name=None,
+ port='', service_type='ClusterIP'):
+ self.cli_expose(resource.namespace, resource.resource_type,
+ resource.name, service_name=service_name,
+ port=port, service_type=service_type)
+ return self._manager.api.services.get(
+ namespace=resource.namespace, name=service_name or resource.name)
+
+ def cli_exec(self, namespace, pod_name, cmd, container=''):
+ kubectl_cmd = "kubectl -n {0} exec --container={1} {2} -- {3}".format(
+ namespace, container, pod_name, cmd)
+ return self._manager.controller_check_call(kubectl_cmd)
+
+ # def exec(...), except exec is statement in python
+ def execute(self, pod, cmd, container=''):
+ return self.cli_exec(pod.namespace, pod.name, cmd, container=container)
+
+ def cli_annotate(self, namespace, resource_type, resource_name,
+ annotations, overwrite=False):
+ cmd = "kubectl -n {0} annotate {1} {2} {3}".format(
+ namespace, resource_type, resource_name, annotations)
+ if overwrite:
+ cmd += " --overwrite"
+ return self._manager.controller_check_call(cmd)
+
+ def annotate(self, resource, annotations, overwrite=False):
+ return self.cli_annotate(resource.namespace, resource.resource_type,
+ resource.name, annotations,
+ overwrite=overwrite)
+
+
+class K8SVirtlet(object):
+ """ Contain virtlet-related methods"""
+ def __init__(self, manager, namespace='kube-system'):
+ self._manager = manager
+ self._namespace = namespace
+
+ def get_virtlet_node_pod(self, node_name):
+ for pod in self._manager.api.pods.list(
+ namespace=self._namespace, name_prefix='virtlet-'):
+ if pod.read().spec.node_name == node_name:
+ return pod
+ return None
+
+ def get_pod_dom_uuid(self, pod):
+ uuid_name_map = self.virtlet_execute(
+ pod.read().spec.node_name, 'virsh list --uuid --name')['stdout']
+ LOG.info("HEHEHEH {}".format(uuid_name_map))
+ LOG.info("MDAMDMAD {}".format(pod.name))
+ for line in uuid_name_map:
+ if line.rstrip().endswith("-{}".format(pod.name)):
+ return line.split(" ")[0]
+ raise Exception("Cannot detect uuid for pod {}".format(pod.name))
+
+ def virsh_domstats(self, pod):
+ """ get dict of vm stats """
+ uuid = self.get_pod_dom_uuid(pod)
+ result = self.virtlet_execute(
+ pod.read().spec.node_name, 'virsh domstats {}'.format(uuid))
+ stats = dict()
+ for line in result['stdout']:
+ if '=' in line:
+ vals = line.strip().split('=')
+ stats[vals[0]] = vals[1]
+ return stats
+
+ def virtlet_execute(self, node_name, cmd, container='libvirt'):
+ """ execute command inside virtlet container """
+ pod = self.get_virtlet_node_pod(node_name)
+ return self._manager.kubectl.execute(pod, cmd, container)
+
+
+class K8SSampleDeployment(object):
+ """ Wrapper for deployment run=>expose=>check frequent combination """
+ def __init__(self, manager, name,
+ namespace=None,
+ image='gcr.io/google-samples/node-hello:1.0',
+ port=8080,
+ replicas=2):
+ namespace = namespace or manager.api.default_namespace
+
+ self._manager = manager
+ self._port = port
+ self._deployment = \
+ manager.kubectl.run(namespace, name,
+ image=image, port=port, replicas=replicas)
+ self._index = 1 # used to generate svc name
+ self._svc = None # hold last created svc
+
+ def wait_ready(self, timeout=300, interval=5):
+ self._deployment.wait_ready(timeout=timeout, interval=interval)
+ return self
+
+ def svc(self):
+ """ Return the last exposed service"""
+ return self._svc
+
+ def expose(self, service_type='ClusterIP'):
+ service_name = "{0}-s{1}".format(self._deployment.name, self._index)
+ self._svc = self._manager.kubectl.expose(
+ self._deployment, port=self._port,
+ service_name=service_name, service_type=service_type)
+ return self._svc
+
+ def curl(self, svc=None, external=False):
+ if svc is None:
+ svc = self.svc()
+ url = "http://{0}:{1}".format(svc.get_ip(external), self._port)
if external:
return requests.get(url).text
else:
- return self.manager.curl(url)
+ return self._manager.curl(url)
- def is_service_available(self, external=False):
- return "Hello Kubernetes!" in self.curl(external=external)
-
- def wait_for_ready(self):
- return self.manager.wait_deploy_ready(self.name)
+ def is_service_available(self, svc=None, external=False):
+ return "Hello Kubernetes!" in self.curl(svc, external=external)
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index ca2d0a1..8233a9b 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -15,10 +15,10 @@
jira
testrail<=0.3.8
functools32
-python-k8sclient==0.4.0
+kubernetes
salt-pepper<=0.5.3
setuptools<=36.2.0
netaddr
mock>=1.2
python-jenkins
-cmd2<0.9
\ No newline at end of file
+cmd2<0.9
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
new file mode 100644
index 0000000..f43d3f7
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-cookiecutter-mcp-queens-dvr.yaml
@@ -0,0 +1,183 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: cookied-mcp-queens-dvr.local
+ cluster_name: cookied-mcp-queens-dvr
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'True'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ stacklight_enabled: 'True'
+ fluentd_enabled: 'True'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: prometheus
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
new file mode 100644
index 0000000..0f806cf
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/_context-environment.yaml
@@ -0,0 +1,236 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
+# - features_designate_pool_manager_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+# - features_designate_pool_manager_database
+# - features_designate_pool_manager
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+# - features_designate_pool_manager_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+# dns01.mcp11-ovs-dpdk.local:
+# reclass_storage_name: openstack_dns_node01
+# roles:
+# - features_designate_pool_manager_dns
+# - linux_system_codename_xenial
+# classes:
+# - system.linux.system.repo.mcp.extra
+# - system.linux.system.repo.mcp.apt_mirantis.openstack
+# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+# - system.linux.system.repo.mcp.apt_mirantis.saltstack
+# interfaces:
+# ens3:
+# role: single_dhcp
+# ens4:
+# role: single_ctl
+# single_address: ${_param:openstack_dns_node01_address}
+#
+# dns02.mcp11-ovs-dpdk.local:
+# reclass_storage_name: openstack_dns_node02
+# roles:
+# - features_designate_pool_manager_dns
+# - linux_system_codename_xenial
+# classes:
+# - system.linux.system.repo.mcp.extra
+# - system.linux.system.repo.mcp.apt_mirantis.openstack
+# - system.linux.system.repo.mcp.apt_mirantis.ubuntu
+# - system.linux.system.repo.mcp.apt_mirantis.saltstack
+# interfaces:
+# ens3:
+# role: single_dhcp
+# ens4:
+# role: single_ctl
+# single_address: ${_param:openstack_dns_node02_address}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
new file mode 100644
index 0000000..92b21a5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/core.yaml
@@ -0,0 +1,119 @@
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
new file mode 100644
index 0000000..14cf12e
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/openstack.yaml
@@ -0,0 +1,157 @@
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+# isntall designate
+#- description: Install powerdns
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@powerdns:server' state.sls powerdns.server
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+#- description: Install designate
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@designate:server' state.sls designate -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 5, delay: 10}
+# skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml b/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
new file mode 100644
index 0000000..cc98f46
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/salt.yaml
@@ -0,0 +1,31 @@
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "glusterfs" "jenkins" "maas" "backupninja" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "fluentd" "runtest" "auditd" "logrotate"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
new file mode 100644
index 0000000..1d323ba
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/sl.yaml
@@ -0,0 +1,256 @@
+{% from 'cookied-mcp-queens-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+# Install docker swarm
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install slv2 infra
+# Install MongoDB for alerta
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Create MongoDB cluster
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 20}
+ skip_fail: false
+
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
new file mode 100644
index 0000000..7c1628c
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-dvr/underlay.yaml
@@ -0,0 +1,764 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-queens-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-dvr') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+#{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+# default_{{ HOSTNAME_DNS01 }}: +111
+# default_{{ HOSTNAME_DNS02 }}: +112
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+# - name: {{ HOSTNAME_DNS01 }}
+# role: salt_minion
+# params:
+# vcpu: !os_env SLAVE_NODE_CPU, 1
+# memory: !os_env SLAVE_NODE_MEMORY, 2048
+# boot:
+# - hd
+# cloud_init_volume_name: iso
+# cloud_init_iface_up: ens3
+# volumes:
+# - name: system
+# capacity: !os_env NODE_VOLUME_SIZE, 150
+# backing_store: mcp_ubuntu_1604_image
+# format: qcow2
+# - name: iso # Volume with name 'iso' will be used
+# # for store image with cloud-init metadata.
+# capacity: 1
+# format: raw
+# device: cdrom
+# bus: ide
+# cloudinit_meta_data: *cloudinit_meta_data
+# cloudinit_user_data: *cloudinit_user_data_1604
+#
+# interfaces: *all_interfaces
+# network_config: *all_network_config
+#
+# - name: {{ HOSTNAME_DNS02 }}
+# role: salt_minion
+# params:
+# vcpu: !os_env SLAVE_NODE_CPU, 1
+# memory: !os_env SLAVE_NODE_MEMORY, 2048
+# boot:
+# - hd
+# cloud_init_volume_name: iso
+# cloud_init_iface_up: ens3
+# volumes:
+# - name: system
+# capacity: !os_env NODE_VOLUME_SIZE, 150
+# backing_store: mcp_ubuntu_1604_image
+# format: qcow2
+# - name: iso # Volume with name 'iso' will be used
+# # for store image with cloud-init metadata.
+# capacity: 1
+# format: raw
+# device: cdrom
+# bus: ide
+# cloudinit_meta_data: *cloudinit_meta_data
+# cloudinit_user_data: *cloudinit_user_data_1604
+#
+# interfaces: *all_interfaces
+# network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
new file mode 100644
index 0000000..e6e73a5
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-cookiecutter-mcp-queens-ovs.yaml
@@ -0,0 +1,183 @@
+default_context:
+ bmk_enabled: 'False'
+ ceph_enabled: 'False'
+ cicd_enabled: 'False'
+ cluster_domain: cookied-mcp-queens-ovs.local
+ cluster_name: cookied-mcp-queens-ovs
+ compute_bond_mode: active-backup
+ compute_primary_first_nic: eth1
+ compute_primary_second_nic: eth2
+ context_seed: wUqrwKeBTCpRpVrhK1KwZQv4cjM9VhG7L2vQ0iQsTuMrXASklEBDmJEf6bnPEqcK
+ control_network_netmask: 255.255.255.0
+ control_network_subnet: 172.16.10.0/24
+ control_vlan: '10'
+ cookiecutter_template_branch: master
+ cookiecutter_template_credentials: gerrit
+ cookiecutter_template_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/mk/cookiecutter-templates.git
+ deploy_network_gateway: 192.168.10.1
+ deploy_network_netmask: 255.255.255.0
+ deploy_network_subnet: 192.168.10.0/24
+ deployment_type: physical
+ dns_server01: 172.18.176.6
+ dns_server02: 172.18.208.44
+ email_address: ddmitriev@mirantis.com
+ gateway_primary_first_nic: eth1
+ gateway_primary_second_nic: eth2
+ infra_bond_mode: active-backup
+ infra_deploy_nic: eth0
+ infra_kvm01_control_address: 172.16.10.101
+ infra_kvm01_deploy_address: 192.168.10.101
+ infra_kvm01_hostname: kvm01
+ infra_kvm02_control_address: 172.16.10.102
+ infra_kvm02_deploy_address: 192.168.10.102
+ infra_kvm02_hostname: kvm02
+ infra_kvm03_control_address: 172.16.10.103
+ infra_kvm03_deploy_address: 192.168.10.103
+ infra_kvm03_hostname: kvm03
+ infra_kvm_vip_address: 172.16.10.100
+ infra_primary_first_nic: eth1
+ infra_primary_second_nic: eth2
+ kubernetes_enabled: 'False'
+ local_repositories: 'False'
+ maas_deploy_address: 192.168.10.90
+ maas_hostname: cfg01
+ mcp_version: stable
+ offline_deployment: 'False'
+ opencontrail_enabled: 'False'
+ openstack_benchmark_node01_address: 172.16.10.95
+ openstack_benchmark_node01_hostname: bmk01
+ openstack_cluster_size: compact
+ openstack_compute_count: '2'
+ openstack_compute_rack01_hostname: cmp
+ openstack_compute_rack01_single_subnet: 172.16.10
+ openstack_compute_rack01_tenant_subnet: 10.1.0
+ openstack_control_address: 172.16.10.100
+ openstack_control_hostname: ctl
+ openstack_control_node01_address: 172.16.10.101
+ openstack_control_node01_hostname: ctl01
+ openstack_control_node02_address: 172.16.10.102
+ openstack_control_node02_hostname: ctl02
+ openstack_control_node03_address: 172.16.10.103
+ openstack_control_node03_hostname: ctl03
+ openstack_database_address: 172.16.10.100
+ openstack_database_hostname: ctl
+ openstack_database_node01_address: 172.16.10.101
+ openstack_database_node01_hostname: ctl01
+ openstack_database_node02_address: 172.16.10.102
+ openstack_database_node02_hostname: ctl02
+ openstack_database_node03_address: 172.16.10.103
+ openstack_database_node03_hostname: ctl03
+ openstack_enabled: 'True'
+ openstack_gateway_node01_address: 172.16.10.110
+ openstack_gateway_node01_hostname: gtw01
+ openstack_gateway_node01_tenant_address: 10.1.0.6
+ openstack_gateway_node02_address: 172.16.10.111
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.1.0.7
+ openstack_gateway_node03_address: 172.16.10.112
+ openstack_gateway_node03_hostname: gtw03
+ openstack_gateway_node03_tenant_address: 10.1.0.8
+ openstack_message_queue_address: 172.16.10.100
+ openstack_message_queue_hostname: ctl
+ openstack_message_queue_node01_address: 172.16.10.101
+ openstack_message_queue_node01_hostname: ctl01
+ openstack_message_queue_node02_address: 172.16.10.102
+ openstack_message_queue_node02_hostname: ctl02
+ openstack_message_queue_node03_address: 172.16.10.103
+ openstack_message_queue_node03_hostname: ctl03
+ openstack_network_engine: ovs
+ openstack_neutron_qos: 'False'
+ openstack_neutron_vlan_aware_vms: 'False'
+ openstack_nfv_dpdk_enabled: 'False'
+ openstack_nfv_sriov_enabled: 'False'
+ openstack_nova_compute_nfv_req_enabled: 'False'
+ openstack_ovs_dvr_enabled: 'False'
+ openstack_ovs_encapsulation_type: vxlan
+ openstack_proxy_address: 172.16.10.80
+ openstack_proxy_hostname: prx
+ openstack_proxy_node01_address: 172.16.10.121
+ openstack_proxy_node01_hostname: prx01
+ openstack_proxy_node02_address: 172.16.10.122
+ openstack_proxy_node02_hostname: prx02
+ openstack_upgrade_node01_address: 172.16.10.19
+ openstack_version: queens
+ oss_enabled: 'False'
+ oss_node03_address: ${_param:stacklight_monitor_node03_address}
+ oss_webhook_app_id: '24'
+ oss_pushkin_email_sender_password: password
+ oss_pushkin_smtp_port: '587'
+ oss_webhook_login_id: '13'
+ platform: openstack_enabled
+ public_host: ${_param:openstack_proxy_address}
+ publication_method: email
+ reclass_repository: https://github.com/Mirantis/mk-lab-salt-model.git
+ backup_private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKCAQEAxL6/rVgCetsETpZaUmXmkj8cZ1WN0eubH1FvMDOi/La9ZJyT
+ k0C6AYpJnIyEm93pMj5cLm08qRqMW+2pdOhYjcH69yg5MrX5SkRk8jCmIHIYoIbh
+ Qnwbnj3dd3I39ZdfU2FO7u2vlbglVou6ZoQxlJDItuLNtzq6EG+w9eF19e7+OsC6
+ 6iUItp618zfw1l3J/8nKvCGe2RYDf7mJW6XwCl/DwryJmwwzvPgYJ3QMuDD8/HFj
+ lrJ3xjFTXj4b4Ws1XIoy78fFbtiLr4OwqCYkho03u2E5rOOP1qZxZB63sivHMLMO
+ MM5bOAQKbulFNoyALADGYfc7sf0bZ4u9XXDXxQIDAQABAoIBAQCfmc2MJRT97KW1
+ yqpCpX9BrAiymuiNHf+cjEcSZxEUyHkjIRFmJt+9WB0W7ba1anM92vCUiPDojSzH
+ dig9Oi578JxR20NrK8uqv4jUHzrknynzLveVI3CUEcOSnglfJQijbxDFKfOCFPvV
+ FUyE1UATMNBh6+LNfMprgu+exuMWOPnDyUiYQ+WZ0JfuZY8fuaZte4woJJOb9LUu
+ 5rsMG/smIzjpgZ0Z9ZVDMurfq565qhpaXRAqKeIuyht8pacTo31iMQdHB78AvY/3
+ g0z21Gk8k3z0Kr/YFKr2r4FmXY5m/gAUvZly2ZrVQM5XsbTVCzq/JpI5fssNvSbU
+ AKmXzf4RAoGBAOO3d4/cstxERzW6hyOTjZIN1ppR52CsnZTsVPbfd0pCtmzmVZce
+ CtHKdcXSbTwZvvkK09QSWAp3MoSpd0gIOiLU8Wx/R/RIZsu9BlhTS3r3EQLnk72d
+ H/1TTA+j4T/LIYLSojQ1RxvIrHetAD44j732aTwKAHj/SybEAVqNkOB/AoGBAN0u
+ gLcrgqIHGrk4VjWSvlCGymfF40equcx+ud7XhfZDGETUOSahW4dPZ52cjPAkrCBQ
+ MMfcDwSVGsOAjd+mNt11BHUKobnhXwFaWWuyqyn9NmWFbjMbICVh7E3Of5aVN38o
+ lrmo/7LuKMVG7XRwphCv5NkaJmQG4njDyUQWlaW7AoGADCd8wDb9bPhP/LQqBmIX
+ ylXmwHHisaxE9O/wUQT4bwREjGd25gv6c9wkkRx8LBsLsGs9hzI7dMOL9Ly+2x9l
+ SvqmsC3S/1zl77X1Ir2/Z57MT6Vgo1xBmtnZU3Rhz2/eKAdqFPNLClaZrgGT475N
+ HcyLLWMzR0IJFtabY+Puea0CgYA8Zb5wRkldxWLewSuJZZDinGwY+kieAVjLJq/K
+ 0j+ah6fQ48LXcah0wpIgz+cMjHcUO9GWQdk3/x9X03rqX5EL2DBnZYfUIl63F9zj
+ M97ZkHOSNWVqPzX//0Vv2butewG0j3jZKfTo/2/SrxOYgEpYtC9huWpSVi7xm0US
+ erhSkQKBgFIf9JEsfgE57ANhvITZ3ZI0uZXNxZkXQaVg8jvScDi79IIhy9iPzhKC
+ aIIQoDNIlWv1ftCRZ5AlBvVXgvQ/QNrwy48JiQTzWZlb9Ezg8w+olQmSbG6fq7Y+
+ 7r3i+QUZ7RBdOb24QcQ618q54ozNTCB7OywY78ptFzeoBeptiNr1
+ -----END RSA PRIVATE KEY-----
+ backup_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEvr+tWAJ62wROllpSZeaSPxxnVY3R65sfUW8wM6L8tr1knJOTQLoBikmcjISb3ekyPlwubTypGoxb7al06FiNwfr3KDkytflKRGTyMKYgchighuFCfBuePd13cjf1l19TYU7u7a+VuCVWi7pmhDGUkMi24s23OroQb7D14XX17v46wLrqJQi2nrXzN/DWXcn/ycq8IZ7ZFgN/uYlbpfAKX8PCvImbDDO8+BgndAy4MPz8cWOWsnfGMVNePhvhazVcijLvx8Vu2Iuvg7CoJiSGjTe7YTms44/WpnFkHreyK8cwsw4wzls4BApu6UU2jIAsAMZh9zux/Rtni71dcNfF
+ salt_api_password: H0rTPdmktZ8RI7T7y6fjqY0uEbbs7Kwi
+ salt_api_password_hash: $6$lfbIFtMZ$.nTbTDMzs1iYv0WqkZHia8H8Fma963Nv3qyyz1x68jQh0YXK9i907B/hvoG4QHMvfolE7V7vQnFClJ1mVA3Yb.
+ salt_master_address: 172.16.10.90
+ salt_master_hostname: cfg01
+ salt_master_management_address: 192.168.10.90
+ shared_reclass_url: ssh://mcp-jenkins@gerrit.mcp.mirantis.net:29418/salt-models/reclass-system.git
+ fluentd_enabled: 'True'
+ stacklight_enabled: 'True'
+ stacklight_log_address: 172.16.10.60
+ stacklight_log_hostname: log
+ stacklight_log_node01_address: 172.16.10.61
+ stacklight_log_node01_hostname: log01
+ stacklight_log_node02_address: 172.16.10.62
+ stacklight_log_node02_hostname: log02
+ stacklight_log_node03_address: 172.16.10.63
+ stacklight_log_node03_hostname: log03
+ stacklight_monitor_address: 172.16.10.70
+ stacklight_monitor_hostname: mon
+ stacklight_monitor_node01_address: 172.16.10.71
+ stacklight_monitor_node01_hostname: mon01
+ stacklight_monitor_node02_address: 172.16.10.72
+ stacklight_monitor_node02_hostname: mon02
+ stacklight_monitor_node03_address: 172.16.10.73
+ stacklight_monitor_node03_hostname: mon03
+ stacklight_telemetry_address: 172.16.10.85
+ stacklight_telemetry_hostname: mtr
+ stacklight_telemetry_node01_address: 172.16.10.86
+ stacklight_telemetry_node01_hostname: mtr01
+ stacklight_telemetry_node02_address: 172.16.10.87
+ stacklight_telemetry_node02_hostname: mtr02
+ stacklight_telemetry_node03_address: 172.16.10.88
+ stacklight_telemetry_node03_hostname: mtr03
+ stacklight_version: '2'
+ stacklight_long_term_storage_type: influxdb
+ static_ips_on_deploy_network_enabled: 'False'
+ tenant_network_gateway: 10.1.0.1
+ tenant_network_netmask: 255.255.255.0
+ tenant_network_subnet: 10.1.0.0/24
+ tenant_vlan: '20'
+ upstream_proxy_enabled: 'False'
+ use_default_network_scheme: 'False'
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
new file mode 100644
index 0000000..4c7091b
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/_context-environment.yaml
@@ -0,0 +1,204 @@
+nodes:
+ cfg01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: infra_config_node01
+ roles:
+ - infra_config
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node01
+ roles:
+ - infra_kvm
+ - openstack_control_leader
+ - openstack_database_leader
+ - openstack_message_queue
+ # - features_designate_bind9_database
+ # - features_designate_bind9_dns
+ # - features_designate_bind9
+ # - features_designate_bind9_keystone
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node02
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ # - features_designate_bind9_database
+ # - features_designate_bind9_dns
+ # - features_designate_bind9
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ ctl03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_control_node03
+ roles:
+ - infra_kvm
+ - openstack_control
+ - openstack_database
+ - openstack_message_queue
+ # - features_designate_bind9_database
+ # - features_designate_bind9
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ prx01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_proxy_node01
+ roles:
+ - openstack_proxy
+ # - features_designate_bind9_proxy
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node01
+ roles:
+ - stacklightv2_server_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node02
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mon03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_server_node03
+ roles:
+ - stacklightv2_server
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node01
+ roles:
+ - stacklight_log_leader_v2
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node02
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ log03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_log_node03
+ roles:
+ - stacklight_log
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node01
+ roles:
+ - stacklight_telemetry_leader
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr02.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node02
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ mtr03.mcp11-ovs-dpdk.local:
+ reclass_storage_name: stacklight_telemetry_node03
+ roles:
+ - stacklight_telemetry
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+
+ # Generator-based computes. For compatibility only
+ cmp<<count>>.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_compute_rack01
+ roles:
+ - openstack_compute
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
+
+ gtw01.mcp11-ovs-dpdk.local:
+ reclass_storage_name: openstack_gateway_node01
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ ens3:
+ role: single_dhcp
+ ens4:
+ role: single_ctl
+ ens5:
+ role: bond0_ab_ovs_vxlan_mesh
+ ens6:
+ role: bond1_ab_ovs_floating
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
new file mode 100644
index 0000000..afd0d5a
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/core.yaml
@@ -0,0 +1,119 @@
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-backup-restore.yaml' as BACKUP with context %}
+
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status; gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
new file mode 100644
index 0000000..26775ce
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/openstack.yaml
@@ -0,0 +1,136 @@
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
+{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+{% import 'shared-openstack.yaml' as SHARED_OPENSTACK with context %}
+
+# Install OpenStack control services
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE(USE_ORCHESTRATE=false) }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_NEUTRON() }}
+
+# isntall designate
+#- description: Install bind
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@bind:server' state.sls bind
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 5}
+# skip_fail: false
+
+#- description: Install designate
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+# -C 'I@designate:server' state.sls designate -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 5, delay: 10}
+# skip_fail: false
+
+{{ SHARED_OPENSTACK.MACRO_INSTALL_HEAT() }}
+
+- description: Deploy horizon dashboard
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@horizon:server' state.sls horizon
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Deploy nginx proxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@nginx:server' state.sls nginx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+# Install compute node
+
+- description: Apply formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+- description: Re-apply(as in doc) formulas for compute node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' state.apply
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check IP on computes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'cmp*' cmd.run
+ 'ip a'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 10, delay: 30}
+ skip_fail: false
+
+- description: Create net04_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_external
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04_ext {{ IPV4_NET_EXTERNAL_PREFIX }}.0/24 --name net04_ext__subnet --disable-dhcp --allocation-pool start={{ IPV4_NET_EXTERNAL_PREFIX }}.150,end={{ IPV4_NET_EXTERNAL_PREFIX }}.180 --gateway {{ IPV4_NET_EXTERNAL_PREFIX }}.1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron net-create net04'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create subnet_net04
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron subnet-create net04 {{ IPV4_NET_TENANT_PREFIX }}.0/24 --name net04__subnet --allocation-pool start={{ IPV4_NET_TENANT_PREFIX }}.120,end={{ IPV4_NET_TENANT_PREFIX }}.240'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create router
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-create net04_router01'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set geteway
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-gateway-set net04_router01 net04_ext'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Add interface
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; neutron router-interface-add net04_router01 net04__subnet'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: sync time
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+ 'service ntp stop; ntpd -gq; service ntp start'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+{{ SHARED.INSTALL_DOCKER_ON_GTW() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
new file mode 100644
index 0000000..40e2f4e
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/salt.yaml
@@ -0,0 +1,49 @@
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CMP01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CMP02 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
+# Other salt model repository parameters see in shared-salt.yaml
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
+
+{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
+
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "fluentd" "runtest" "maas" "jenkins" "glusterfs" "backupninja" "auditd" "logrotate"') }}
+
+{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
+
+{{ SHARED.MACRO_RUN_SALT_MASTER_UNDERLAY_STATES() }}
+
+{{ SHARED.MACRO_GENERATE_INVENTORY() }}
+
+{{ SHARED.MACRO_NETWORKING_WORKAROUNDS() }}
+
+{{ SHARED.MACRO_BOOTSTRAP_ALL_MINIONS() }}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_SERVICES_ON_CFG()}}
+
+{{SHARED.MACRO_CHECK_SALT_VERSION_ON_NODES()}}
+
+- description: Hack gtw node
+ cmd: salt '{{ HOSTNAME_GTW01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.110/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp01 node
+ cmd: salt '{{ HOSTNAME_CMP01 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.105/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Hack cmp02 node
+ cmd: salt '{{ HOSTNAME_CMP02 }}' cmd.run "ip addr del {{ SHARED.IPV4_NET_CONTROL_PREFIX }}.106/24 dev ens4; ip addr flush dev ens4";
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
new file mode 100644
index 0000000..748c05e
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/sl.yaml
@@ -0,0 +1,258 @@
+{% from 'cookied-mcp-queens-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Configure docker service
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+# Install slv2 infra
+# Install MongoDB for alerta
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.server
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Create MongoDB cluster
+- description: Install Mongo if target matches
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' match.pillar 'mongodb:server' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@mongodb:server' state.sls mongodb.cluster
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 20}
+ skip_fail: false
+
+- description: Install telegraf
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 30}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install Prometheus LTS(optional if set in model)
+- description: Prometheus LTS(optional if set in model)
+ cmd: |
+ PROMETHEUS_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "PROMETHEUS rely service presence: ${PROMETHEUS_SERVICE}";
+ if [[ "$PROMETHEUS_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@prometheus:relay' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Install service for the log collection
+- description: Configure fluentd
+ cmd: |
+ FLUENTD_SERVICE=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Fluentd service presence: ${FLUENTD_SERVICE}";
+ if [[ "$FLUENTD_SERVICE" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@fluentd:agent' state.sls fluentd
+ else
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:log_collector' state.sls heka.log_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Install heka ceilometer collector
+- description: Install heka ceilometer if they exists
+ cmd: |
+ CEILO=`salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Ceilometer service presence: ${CEILO}";
+ if [[ "$CEILO" == "true" ]]; then
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' state.sls heka.ceilometer_collector;
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@heka:ceilometer_collector:enabled' service.restart ceilometer_collector
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Configure prometheus in docker swarm
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' state.sls prometheus,heka.remote_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+#Launch containers
+- description: launch prometheus containers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master and I@prometheus:server' state.sls docker.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Check docker ps
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm and I@prometheus:server' cmd.run "docker ps"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Install sphinx
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@sphinx:server' state.sls sphinx
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+
+#- description: Install prometheus alertmanager
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+#- description: run docker state
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm:role:master' state.sls docker
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+#
+#- description: docker ps
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@docker:swarm' dockerng.ps
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 10}
+# skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
new file mode 100644
index 0000000..da7908d
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml
@@ -0,0 +1,61 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifdown ens3
+ - sudo ip r d default || true # remove existing default route to get it from dhcp
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ - echo "nameserver 172.18.208.44" >> /etc/resolv.conf;
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
+ - path: /root/.ssh/config
+ owner: root:root
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
new file mode 100644
index 0000000..3fbb777
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay--user-data1604.yaml
@@ -0,0 +1,50 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
new file mode 100644
index 0000000..0000cc9
--- /dev/null
+++ b/tcp_tests/templates/cookied-mcp-queens-ovs/underlay.yaml
@@ -0,0 +1,701 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'cookied-mcp-queens-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'cookied-mcp-queens-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'cookied-mcp-queens-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'cookied-mcp-queens-ovs') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG01 = os_env('HOSTNAME_LOG01', 'log01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG02 = os_env('HOSTNAME_LOG02', 'log02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_LOG03 = os_env('HOSTNAME_LOG03', 'log03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR01 = os_env('HOSTNAME_MTR01', 'mtr01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR02 = os_env('HOSTNAME_MTR02', 'mtr02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MTR03 = os_env('HOSTNAME_MTR03', 'mtr03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'cookied-mcp-queens-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_MON01 }}: +71
+ default_{{ HOSTNAME_MON02 }}: +72
+ default_{{ HOSTNAME_MON03 }}: +73
+ default_{{ HOSTNAME_LOG01 }}: +61
+ default_{{ HOSTNAME_LOG02 }}: +62
+ default_{{ HOSTNAME_LOG03 }}: +63
+ default_{{ HOSTNAME_MTR01 }}: +86
+ default_{{ HOSTNAME_MTR02 }}: +87
+ default_{{ HOSTNAME_MTR03 }}: +88
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: false
+
+ external:
+ address_pool: external-pool01
+ dhcp: false
+ forward:
+ mode: route
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ format: qcow2
+ - name: cfg01_day01_image # Pre-configured day01 image
+ source_image: {{ os_env('IMAGE_PATH_CFG01_DAY01', os_env('IMAGE_PATH1604')) }} # http://images.mirantis.com/cfg01-day01.qcow2 or fallback to IMAGE_PATH1604
+ format: qcow2
+ - name: mcp_ubuntu_1604_image # Pre-configured image for control plane
+ source_image: !os_env MCP_IMAGE_PATH1604
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cfg01_day01_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 16384
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MON03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_LOG03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_MTR03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: mcp_ubuntu_1604_image
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 3
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
new file mode 100644
index 0000000..6abc5d0
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-dvr.yaml
@@ -0,0 +1,67 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-dvr' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-dvr.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.111' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='{{ SHARED.IPV4_NET_CONTROL_PREFIX }}.112' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # Workaround of missing reclass.system for dns role
+ # salt-call reclass.cluster_meta_set name='salt_master_host' value='${_param:infra_config_deploy_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add cinder volume on cmp nodes. PROD-20945
+ reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
new file mode 100644
index 0000000..e6fc5f0
--- /dev/null
+++ b/tcp_tests/templates/cookied-model-generator/salt_cookied-mcp-queens-ovs.yaml
@@ -0,0 +1,67 @@
+{% from 'cookied-model-generator/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'cookied-model-generator/underlay.yaml' import DOMAIN_NAME with context %}
+
+{% set LAB_CONFIG_NAME = 'cookied-mcp-queens-ovs' %}
+# Name of the context file (without extension, that is fixed .yaml) used to render the Environment model
+{% set ENVIRONMENT_MODEL_INVENTORY_NAME = os_env('ENVIRONMENT_MODEL_INVENTORY_NAME', LAB_CONFIG_NAME) %}
+# Path to the context files used to render Cluster and Environment models
+{%- set CLUSTER_CONTEXT_NAME = '_context-cookiecutter-mcp-queens-ovs.yaml' %}
+{%- set ENVIRONMENT_CONTEXT_NAMES = ['_context-environment.yaml'] %}
+
+{% import 'shared-salt.yaml' as SHARED with context %}
+
+{{ SHARED.MACRO_INSTALL_PACKAGES_ON_NODES(HOSTNAME_CFG01) }}
+
+{{ SHARED.MACRO_INSTALL_FORMULAS('\*') }}
+
+{{ SHARED.MACRO_GENERATE_COOKIECUTTER_MODEL() }}
+
+{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
+
+- description: "Workaround for combined roles: remove unnecessary classes"
+ cmd: |
+ set -e;
+ sed -i '/system.reclass.storage.system.physical_control_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_database_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.reclass.storage.system.openstack_message_queue_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_telemetry_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ sed -i '/system.salt.control.placement.openstack.compact/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/kvm.yml;
+ # sed -i '/system.reclass.storage.system.stacklight_log_cluster/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Start compute node addresses from .105 , as in static models
+ sed -i 's/start: 101/start: 105/g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+
+ # Bind9 services are placed on the first two ctl nodes
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node01_address' value='${_param:openstack_control_node01_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+ # salt-call reclass.cluster_meta_set name='openstack_dns_node02_address' value='${_param:openstack_control_node02_address}' file_name=/srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml
+
+ . /root/venv-reclass-tools/bin/activate;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.infra_kvm_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_database_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node02 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ reclass-tools del-key parameters.reclass.storage.node.openstack_message_queue_node03 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # reclass-tools del-key parameters.reclass.storage.node.stacklight_log_node01 /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml;
+ # Add cinder volume on cmp nodes. PROD-20945
+ reclass-tools add-key 'classes' 'system.cinder.volume.single' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.cinder.volume.backend.lvm' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key 'classes' 'system.linux.storage.loopback' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/compute/init.yml --merge ;
+ reclass-tools add-key parameters._param.loopback_device_size '20' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/init.yml;
+
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Temporary workaround for removing cinder-volume from CTL nodes
+ cmd: |
+ sed -i 's/\-\ system\.cinder\.volume\.single//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ sed -i 's/\-\ system\.cinder\.volume\.notification\.messagingv2//g' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/openstack/control.yml;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+{{ SHARED.MACRO_GENERATE_INVENTORY(RERUN_SALTMASTER_STATE=true) }}
diff --git a/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
index 1f27c73..7162657 100644
--- a/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
+++ b/tcp_tests/templates/k8s-ha-calico/k8s-update.yaml
@@ -1,7 +1,7 @@
{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CFG01 with context %}
{% from 'k8s-ha-calico/underlay.yaml' import HOSTNAME_CTL01 with context %}
-# Kubernetes
+# Kubernetes upgrade
- description: Update hypercube image
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:pool' state.sls kubernetes.pool
@@ -9,18 +9,11 @@
retry: {count: 3, delay: 5}
skip_fail: false
-- description: Run whole master to check consistency
+- description: Update the Kubernetes Master nodes and restart the services
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
- skip_fail: true
-
-# TODO: sync with PROD-20441
-- desciption: Restart kube-apiserver
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@kubernetes:control' service.restart kube-apiserver
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
skip_fail: false
- desciption: Print kubernetes version
diff --git a/tcp_tests/tests/system/test_calico.py b/tcp_tests/tests/system/test_calico.py
index 484aec5..ca5c116 100644
--- a/tcp_tests/tests/system/test_calico.py
+++ b/tcp_tests/tests/system/test_calico.py
@@ -41,13 +41,10 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -72,27 +69,19 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-server')
show_step(3)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running('netchecker-agent')
show_step(4)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(5)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -111,8 +100,9 @@
metric, res.text)
show_step(6)
- first_node = k8sclient.nodes.list()[0]
- first_node_ips = [addr.address for addr in first_node.status.addresses
+ first_node = k8s_deployed.api.nodes.list()[0]
+ first_node_ips = [addr.address for addr in
+ first_node.read().status.addresses
if 'IP' in addr.type]
assert len(first_node_ips) > 0, "Couldn't find first k8s node IP!"
first_node_names = [name for name in underlay.node_names()
@@ -123,13 +113,13 @@
target_pod_ip = None
- for pod in k8sclient.pods.list(namespace='netchecker'):
- LOG.debug('NC pod IP: {0}'.format(pod.status.pod_ip))
- if pod.status.host_ip not in first_node_ips:
+ for pod in k8s_deployed.api.pods.list(namespace='netchecker'):
+ LOG.debug('NC pod IP: {0}'.format(pod.read().status.pod_ip))
+ if pod.read().status.host_ip not in first_node_ips:
continue
# TODO: get pods by daemonset with name 'netchecker-agent'
if 'netchecker-agent-' in pod.name and 'hostnet' not in pod.name:
- target_pod_ip = pod.status.pod_ip
+ target_pod_ip = pod.read().status.pod_ip
assert target_pod_ip is not None, "Could not find netchecker pod IP!"
@@ -154,9 +144,7 @@
'recovered'.format(target_pod_ip, first_node.name))
show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port,
- works=True)
+ nch.wait_check_network(works=True)
@pytest.mark.fail_snapshot
@pytest.mark.calico_ci
@@ -183,38 +171,25 @@
"""
show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- kube_master_nodes = k8s_deployed.get_k8s_masters()
+ kube_master_nodes = k8s_deployed.get_masters()
assert kube_master_nodes, "No k8s masters found in pillars!"
- netchecker_port = netchecker.get_service_port(k8sclient)
+
+ nch = netchecker.Netchecker(k8s_deployed.api)
show_step(2)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
show_step(3)
- netchecker.kubernetes_block_traffic_namespace(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_block_traffic_namespace()
show_step(4)
- netchecker.calico_allow_netchecker_connections(underlay, k8sclient,
- kube_master_nodes[0],
- 'netchecker')
+ nch.calico_allow_netchecker_connections()
show_step(5)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=False, timeout=500,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=False)
show_step(6)
- netchecker.kubernetes_allow_traffic_from_agents(underlay,
- kube_master_nodes[0],
- 'netchecker')
+ nch.kubernetes_allow_traffic_from_agents()
show_step(7)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- works=True, timeout=300,
- netchecker_pod_port=netchecker_port)
+ nch.wait_check_network(works=True)
diff --git a/tcp_tests/tests/system/test_install_k8s.py b/tcp_tests/tests/system/test_install_k8s.py
index ec90863..8066cd9 100644
--- a/tcp_tests/tests/system/test_install_k8s.py
+++ b/tcp_tests/tests/system/test_install_k8s.py
@@ -47,29 +47,22 @@
11. Optionally run k8s e2e tests
"""
- # STEP #5
- # k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
- netchecker_port = netchecker.get_service_port(k8sclient)
+ sl_actions = stacklight_deployed
+ nch = netchecker.Netchecker(k8s_deployed.api)
+
show_step(6)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_SERVER_PREFIX)
show_step(7)
- netchecker.get_netchecker_pod_status(k8s=k8s_deployed,
- pod_name='netchecker-agent',
- namespace='netchecker')
+ nch.wait_netchecker_pods_running(netchecker.NETCHECKER_AGENT_PREFIX)
- # show_step(8)
- netchecker.wait_check_network(k8sclient, namespace='netchecker',
- netchecker_pod_port=netchecker_port)
+ show_step(8)
+ nch.wait_check_network(works=True)
+
show_step(9)
- res = netchecker.get_metric(k8sclient,
- netchecker_pod_port=netchecker_port,
- namespace='netchecker')
+ res = nch.get_metric()
assert res.status_code == 200, 'Unexpected response code {}'\
.format(res)
@@ -144,12 +137,9 @@
7. Optionally run k8s e2e conformance
"""
- k8s_actions = k8s_deployed
- sl_actions = stacklight_deployed
- # STEP #5
+
show_step(5)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ sl_actions = stacklight_deployed
prometheus_client = stacklight_deployed.api
try:
@@ -186,7 +176,7 @@
if config.k8s.k8s_conformance_run:
show_step(7)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
@pytest.mark.extract(container_system='docker', extract_from='conformance',
@@ -211,8 +201,8 @@
5. Run conformance if need
"""
- k8s_actions = k8s_deployed
+
if config.k8s.k8s_conformance_run:
show_step(5)
- k8s_actions.run_conformance()
+ k8s_deployed.run_conformance()
LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_k8s_actions.py b/tcp_tests/tests/system/test_k8s_actions.py
index 5216470..6510a67 100644
--- a/tcp_tests/tests/system/test_k8s_actions.py
+++ b/tcp_tests/tests/system/test_k8s_actions.py
@@ -19,6 +19,8 @@
from tcp_tests import logger
from tcp_tests import settings
+from tcp_tests.managers.k8s import read_yaml_file
+
LOG = logger.logger
@@ -38,39 +40,42 @@
3. Expose deployment
4. Annotate service with domain name
5. Try to get service using nslookup
+ 6. Delete service and deployment
"""
+ show_step(1)
if not (config.k8s_deploy.kubernetes_externaldns_enabled and
config.k8s_deploy.kubernetes_coredns_enabled):
- pytest.skip("Test requires Externaldns and coredns addons enabled")
-
- show_step(1)
- k8sclient = k8s_deployed.api
- assert k8sclient.nodes.list() is not None, "Can not get nodes list"
+ pytest.skip("Test requires externaldns and coredns addons enabled")
show_step(2)
- name = 'test-nginx'
- k8s_deployed.kubectl_run(name, 'nginx', '80')
+ deployment = k8s_deployed.run_sample_deployment('test-dep')
show_step(3)
- k8s_deployed.kubectl_expose('deployment', name, '80', 'ClusterIP')
+ svc = deployment.expose()
- hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
- annotation = "\"external-dns.alpha.kubernetes.io/" \
- "hostname={0}\"".format(hostname)
show_step(4)
- k8s_deployed.kubectl_annotate('service', name, annotation)
+ hostname = "test.{0}.local.".format(settings.LAB_CONFIG_NAME)
+ svc.patch({
+ "metadata": {
+ "annotations": {
+ "external-dns.alpha.kubernetes.io/hostname": hostname
+ }
+ }
+ })
show_step(5)
- dns_host = k8s_deployed.get_svc_ip('coredns')
- k8s_deployed.nslookup(hostname, dns_host)
+ k8s_deployed.nslookup(hostname, svc.get_ip())
+
+ show_step(6)
+ deployment.delete()
@pytest.mark.grab_versions
@pytest.mark.cncf_publisher(name=['e2e.log', 'junit_01.xml', 'version.txt',
'cncf_results.tar.gz'])
@pytest.mark.fail_snapshot
def test_k8s_cncf_certification(self, show_step, config, k8s_deployed,
- cncf_log_helper):
+ k8s_cncf_log_helper):
"""Run cncf e2e suite and provide files needed for pull request
to the CNCF repo
@@ -97,13 +102,13 @@
7. For every version in update chain:
Update cluster to new version, check test sample service
availability, run conformance
+ 8. Delete service and deployment
"""
show_step(5)
- sample = k8s_deployed.get_sample_deployment('test-dep-chain-upgrade')
- sample.run()
+ sample = k8s_deployed.run_sample_deployment('test-dep-chain-upgrade')
sample.expose()
- sample.wait_for_ready()
+ sample.wait_ready()
assert sample.is_service_available()
@@ -114,7 +119,7 @@
chain_versions = config.k8s.k8s_update_chain.split(" ")
for version in chain_versions:
LOG.info("Chain update to '{}' version".format(version))
- k8s_deployed.update_k8s_images(version)
+ k8s_deployed.update_k8s_version(version)
LOG.info("Checking test service availability")
assert sample.is_service_available()
@@ -123,6 +128,11 @@
log_name = "k8s_conformance_{}.log".format(version)
k8s_deployed.run_conformance(log_out=log_name, raise_on_err=False)
+ assert sample.is_service_available()
+
+ show_step(8)
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
def test_k8s_metallb(self, show_step, config, k8s_deployed):
@@ -136,6 +146,7 @@
5. Check services availability from outside of cluster
6. Run conformance
7. Check services availability from outside of cluster
+ 8. Delete deployments
"""
show_step(1)
if not config.k8s_deploy.kubernetes_metallb_enabled:
@@ -143,25 +154,25 @@
show_step(2)
ns = "metallb-system"
- assert k8s_deployed.is_pod_exists_with_prefix("controller", ns)
- assert k8s_deployed.is_pod_exists_with_prefix("speaker", ns)
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="controller")) > 0
+ assert \
+ len(k8s_deployed.api.pods.list(ns, name_prefix="speaker")) > 0
show_step(3)
samples = []
for i in range(5):
name = 'test-dep-metallb-{}'.format(i)
- sample = k8s_deployed.get_sample_deployment(name)
- sample.run()
- samples.append(sample)
+ samples.append(k8s_deployed.run_sample_deployment(name))
show_step(4)
for sample in samples:
sample.expose('LoadBalancer')
- for sample in samples:
- sample.wait_for_ready()
+ sample.wait_ready()
show_step(5)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
show_step(6)
@@ -169,11 +180,17 @@
show_step(7)
for sample in samples:
+ assert sample.is_service_available(external=False)
assert sample.is_service_available(external=True)
+ show_step(8)
+ for sample in samples:
+ sample.delete()
+
@pytest.mark.grap_versions
@pytest.mark.fail_snapshot
- def test_k8s_genie_flannel(self, show_step, salt_deployed, k8s_deployed):
+ def test_k8s_genie_flannel(self, show_step, config,
+ salt_deployed, k8s_deployed):
"""Test genie-cni+flannel cni setup
Scenario:
@@ -191,6 +208,7 @@
11. Check pods availability
12. Run conformance
13. Check pods availability
+ 14. Delete pods
"""
show_step(1)
@@ -213,13 +231,14 @@
LOG.info("Calico network: {}".format(calico_network))
show_step(2)
- assert k8s_deployed.is_pod_exists_with_prefix("kube-flannel-",
- "kube-system")
+ assert k8s_deployed.api.pods.list(
+ namespace="kube-system", name_prefix="kube-flannel-") > 0
- data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
show_step(3)
- flannel_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-flannel.yaml'))
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ flannel_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-flannel.yaml'))
+ flannel_pod.wait_running()
show_step(4)
flannel_ips = k8s_deployed.get_pod_ips_from_container(flannel_pod.name)
@@ -227,8 +246,9 @@
assert netaddr.IPAddress(flannel_ips[0]) in flannel_network
show_step(5)
- calico_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-calico.yaml'))
+ calico_pod.wait_running()
show_step(6)
calico_ips = k8s_deployed.get_pod_ips_from_container(calico_pod.name)
@@ -236,8 +256,9 @@
assert netaddr.IPAddress(calico_ips[0]) in calico_network
show_step(7)
- multicni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample-multicni.yaml'))
+ multicni_pod.wait_running()
show_step(8)
multicni_ips = \
@@ -248,8 +269,9 @@
netaddr.IPAddress(multicni_ips[1]) in net
show_step(9)
- nocni_pod = k8s_deployed.create_pod_from_file(
- os.path.join(data_dir, 'pod-sample.yaml'))
+ nocni_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'pod-sample.yaml'))
+ nocni_pod.wait_running()
show_step(10)
nocni_ips = k8s_deployed.get_pod_ips_from_container(nocni_pod.name)
@@ -277,3 +299,9 @@
show_step(13)
check_pods_availability()
+
+ show_step(14)
+ flannel_pod.delete()
+ calico_pod.delete()
+ multicni_pod.delete()
+ nocni_pod.delete()
diff --git a/tcp_tests/tests/system/test_virtlet_actions.py b/tcp_tests/tests/system/test_virtlet_actions.py
index 1cf4bee..83fd33a 100644
--- a/tcp_tests/tests/system/test_virtlet_actions.py
+++ b/tcp_tests/tests/system/test_virtlet_actions.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import pytest
+import os
+from tcp_tests.managers.k8s import read_yaml_file
from tcp_tests import logger
LOG = logger.logger
@@ -35,16 +37,17 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- vm_name = k8s_deployed.run_vm()
+ vm_pod = k8s_deployed.api.pods.create(
+ body=read_yaml_file(data_dir, 'cirros-vm.yaml'))
+
show_step(2)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- k8s_deployed.delete_vm(vm_name)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.fail_snapshot
@@ -61,33 +64,27 @@
if not config.k8s_deploy.kubernetes_virtlet_enabled:
pytest.skip("Test requires Virtlet addon enabled")
+ data_dir = os.path.join(os.path.dirname(__file__), 'testdata/k8s')
+ cpu = 2
+ memory_mb = 512
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
show_step(1)
- target_cpu = 2 # Cores
- target_memory = 256 # Size in MB
- target_memory_kb = target_memory * 1024
- target_yaml = 'virtlet/examples/cirros-vm-exp.yaml'
- k8s_deployed.adjust_cirros_resources(cpu=target_cpu,
- memory=target_memory,
- target_yaml=target_yaml)
+ pod_body = read_yaml_file(data_dir, 'cirros-vm.yaml')
+ pod_body['metadata']['annotations']['VirtletVCPUCount'] = str(cpu)
+ pod_body['spec']['containers'][0]['resources']['limits']['memory'] = \
+ '{}Mi'.format(memory_mb)
+
show_step(2)
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name, timeout=360)
+ vm_pod = k8s_deployed.api.pods.create(body=pod_body)
+ vm_pod.wait_running(timeout=600)
+
show_step(3)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- cpu = k8s_deployed.get_vm_cpu_count(domain_name)
- mem = k8s_deployed.get_vm_memory_count(domain_name)
- fail_msg = '{0} is not correct memory unit for VM. Correct is {1}'.\
- format(mem, target_memory_kb)
- assert target_memory_kb == mem, fail_msg
- fail_msg = '{0} is not correct cpu cores count for VM. ' \
- 'Correct is {1}'.format(cpu, target_cpu)
- assert target_cpu == cpu, fail_msg
+ stats = k8s_deployed.virtlet.virsh_domstats(vm_pod)
+ assert int(stats['vcpu.current']) == cpu
+ assert int(stats['balloon.maximum'])/1024 == memory_mb
+
show_step(4)
- k8s_deployed.delete_vm(target_yaml)
+ vm_pod.delete()
@pytest.mark.grab_versions
@pytest.mark.grab_k8s_results(name=['virtlet_conformance.log',
@@ -104,30 +101,3 @@
show_step(1)
k8s_deployed.run_virtlet_conformance()
-
- @pytest.mark.skip(reason="No configuration with ceph and k8s")
- def test_rbd_flexvolume_driver(self, show_step, config, k8s_deployed):
- """Test for deploying a VM with Ceph RBD volume using flexvolumeDriver
-
- Scenario:
- 1. Start VM with prepared yaml from run-ceph.sh scripts
- 2. Check that RBD volume is listed in virsh domblklist for VM
- 3. Destroy VM
-
- """
- # From:
- # https://github.com/Mirantis/virtlet/blob/master/tests/e2e/run_ceph.sh
- if not config.k8s_deploy.kubernetes_virtlet_enabled:
- pytest.skip("Test requires Virtlet addon enabled")
-
- k8s_deployed.git_clone('https://github.com/Mirantis/virtlet',
- '~/virtlet')
- k8s_deployed.install_jq()
-
- target_yaml = "virtlet/tests/e2e/cirros-vm-rbd-volume.yaml"
- vm_name = k8s_deployed.run_vm(target_yaml)
- k8s_deployed.wait_active_state(vm_name)
- domain_name = k8s_deployed.get_domain_name(vm_name)
- vm_volumes_list = k8s_deployed.list_vm_volumes(domain_name)
- assert 'rbd' in vm_volumes_list
- k8s_deployed.delete_vm(target_yaml)
diff --git a/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
new file mode 100644
index 0000000..5cac75b
--- /dev/null
+++ b/tcp_tests/tests/system/testdata/k8s/cirros-vm.yaml
@@ -0,0 +1,41 @@
+# From virtlet/examples/cirros-vm.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: cirros-vm
+ annotations:
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+ VirtletDiskDriver: virtio
+ # inject ssh keys via cloud-init
+ VirtletSSHKeys: |
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+spec:
+ # This nodeAffinity specification tells Kubernetes to run this
+ # pod only on the nodes that have extraRuntime=virtlet label.
+ # This label is used by Virtlet DaemonSet to select nodes
+ # that must have Virtlet runtime
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: cirros-vm
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/cirros
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for `kubectl attach -t` to work
+ tty: true
+ stdin: true
+ resources:
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ memory: 160Mi