Add OVS arping and OpenContrail ping check
Related-PROD: PROD-27908
(cherry picked from commit 5108e60c0dfbb3ab2d91b1f1914051f6db151991)
(cherry picked from commit 03db7241b15f78ca1e6cca17ff927f30437390d5)
(cherry picked from commit 58000eff4b8ef22a3b3512c3fe0a92958485cd31)
(cherry picked from commit 3ead3e6a2bd38d8aa5a428058f67661a09df71dc)
(cherry picked from commit 90e828b8e47215396c9aaa19e1e26314dae6cf71)
Related-PROD: PROD-28090
(cherry picked from commit 120e22cfddc4a71966cafeac70f0435c7863c2fb)
(cherry picked from commit 29ece3d9a3d9e69e0cf9f3b99d399268295a277a)
Change-Id: Iba15cb0be00f9fc7b6784459a3f0e9039c61d52a
diff --git a/telegraf/files/script/check_opencontrail_ping.py b/telegraf/files/script/check_opencontrail_ping.py
new file mode 100644
index 0000000..4a32c22
--- /dev/null
+++ b/telegraf/files/script/check_opencontrail_ping.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python2
+
+import argparse
+import multiprocessing
+from multiprocessing.pool import ThreadPool
+import requests
+import socket
+import subprocess
+from xml.etree import ElementTree
+
+HOSTNAME = socket.gethostname()
+OPENCONTRAIL_URL = "http://localhost:8085/Snh_ItfReq"
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--host", default="mon")
+parser.add_argument("--port", default="15016")
+parser.add_argument("--processes", type=int, default=multiprocessing.cpu_count() * 2)
+args = parser.parse_args()
+
+PROMETHEUS_QUERY_API = "http://{}:{}/api/v1/query".format(args.host, args.port)
+
+
+def call_process(cmd):
+ p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = p.communicate()
+ return output
+
+
+def instant_query(query):
+ params = {"query": query}
+ result = requests.get(PROMETHEUS_QUERY_API, params=params).json()["data"]["result"]
+ return result
+
+
+def get_opencontrail_vms():
+ response = requests.get(OPENCONTRAIL_URL)
+ tree = ElementTree.fromstring(response.content)
+ vms = {}
+ for data in tree.iter("ItfSandeshData"):
+ vm_uuid = data.find("vm_uuid").text
+ if not vm_uuid:
+ continue
+ ip_addr = data.find("ip_addr").text
+ mdata_ip_addr = data.find("mdata_ip_addr").text
+ addr = (ip_addr, mdata_ip_addr)
+ if vm_uuid in vms:
+ vms[vm_uuid].append(addr)
+ else:
+ vms[vm_uuid] = [addr]
+ return vms
+
+
+def check_output(data):
+ stdout = data[0]
+ transmitted = 0
+ received = -1
+ for line in stdout.split("\n"):
+ if 'transmitted' in line:
+ transmitted = int(line.split()[0])
+ received = int(line.split()[3])
+ return 1 if received == transmitted else 0
+
+
+def gather():
+ hosted_active_vms = set()
+ query = 'libvirt_domain_info_state{host="%s"}' % HOSTNAME
+ metrics = instant_query(query)
+ for metric in metrics:
+ instance_uuid = metric["metric"].get("instance_uuid", "")
+ instance_state = metric["value"][1]
+ if instance_uuid and instance_state == "1":
+ hosted_active_vms.add(instance_uuid)
+ checks = []
+ thread_pool = ThreadPool(args.processes)
+ vms = get_opencontrail_vms()
+ for instance in hosted_active_vms:
+ if instance not in vms:
+ print "instance_ping,id=%s valid=0" % instance
+ continue
+ addresses = vms[instance]
+ for ip, mdata_ip in addresses:
+ if not mdata_ip.startswith('169.254'):
+ print "instance_ping,id=%s valid=0" % instance
+ continue
+ print "instance_ping,id=%s valid=1" % instance
+ cmd = "ping -c3 -i0.2 -W1 %s" % mdata_ip
+ result = thread_pool.apply_async(call_process, (cmd,))
+ checks.append({"instance_uuid": instance, "result": result, "ip": ip, "mdata_ip": mdata_ip})
+ thread_pool.close()
+ thread_pool.join()
+ for check in checks:
+ output = check["result"].get()
+ print "instance_ping,ip_address=%(ip_address)s,mdata_ip=%(mdata_ip)s,id=%(id)s success=%(success)s" % \
+ {
+ 'mdata_ip': check['mdata_ip'],
+ 'ip_address': check['ip'],
+ 'id': check['instance_uuid'],
+ 'success': check_output(output),
+ }
+
+if __name__ == "__main__":
+ try:
+ gather()
+ print "instance_ping check_up=1"
+ except Exception:
+ print "instance_ping check_up=0"
diff --git a/telegraf/files/script/check_ovs_arping.py b/telegraf/files/script/check_ovs_arping.py
new file mode 100644
index 0000000..9a650be
--- /dev/null
+++ b/telegraf/files/script/check_ovs_arping.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python2
+
+import argparse
+from ctypes import cdll
+import multiprocessing
+from multiprocessing.pool import ThreadPool
+import os
+import re
+import select
+import socket
+import struct
+import subprocess
+import time
+
+import requests
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--host", default="mon")
+parser.add_argument("--port", default="15016")
+parser.add_argument("--arp_count", default=3, type=int)
+parser.add_argument("--timeout", default=0.5, type=float)
+parser.add_argument("--processes", default=multiprocessing.cpu_count(), type=int)
+args = parser.parse_args()
+
+PROMETHEUS_QUERY_API = "http://{}:{}/api/v1/query".format(args.host, args.port)
+
+libc = cdll.LoadLibrary('libc.so.6')
+setns = libc.setns
+
+HOSTNAME = socket.gethostname()
+
+# socket timeout
+TIMEOUT = args.timeout
+
+# Ethernet
+TYPEFRAME = 0x0806
+
+# ARP packet
+TYPEHRD = 1
+
+# protocol ip
+PROTOCOLTYPE = 0x0800
+
+# ARP default
+PACKETSIZE = 42
+
+ARPREQUEST = 1
+ARPREPLY = 2
+
+
+class ARPSendPacket(object):
+
+ @staticmethod
+ def _convert_mac(mac):
+ macbin = ''
+ for l in re.split(r':', mac):
+ macbin += chr(int('0x' + l, 16))
+ return macbin
+
+ def __init__(self, eth_dest, eth_src, ip_sedr, mac_sedr, ip_recvr, mac_recvr):
+ # ethernet header
+ self._eth_dest = self._convert_mac(eth_dest)
+ self._eth_src = self._convert_mac(eth_src)
+ self._type_frame = struct.pack('H', socket.htons(TYPEFRAME))
+
+ # ARP packet
+ self._type_hrd = struct.pack('H', socket.htons(TYPEHRD))
+ self._type_pro = struct.pack('H', socket.htons(PROTOCOLTYPE))
+ self._mac_len = struct.pack('B', struct.calcsize('6B'))
+ self._op = struct.pack('H', socket.htons(ARPREQUEST))
+ self._mac_sedr = self._convert_mac(mac_sedr)
+ self._ip_sedr = socket.inet_aton(ip_sedr)
+ self._mac_recvr = self._convert_mac(mac_recvr)
+ self._ip_recvr = socket.inet_aton(ip_recvr)
+ self._ip_len = struct.pack('B', len(self._ip_sedr))
+
+ def __str__(self):
+ return self._eth_dest + self._eth_src + self._type_frame + \
+ self._type_hrd + self._type_pro + self._mac_len + \
+ self._ip_len + self._op + self._mac_sedr + \
+ self._ip_sedr + self._mac_recvr + self._ip_recvr
+
+
+class Namespace(object):
+
+ @staticmethod
+ def _get_ns_path(nspath=None, nsname=None, nspid=None):
+ if nsname:
+ nspath = '/var/run/netns/%s' % nsname
+ elif nspid:
+ nspath = '/proc/%d/ns/net' % nspid
+ return nspath
+
+ def __init__(self, nsname=None, nspath=None, nspid=None):
+ self.mypath = self._get_ns_path(nspid=os.getpid())
+ self.targetpath = self._get_ns_path(nspath,
+ nsname=nsname,
+ nspid=nspid)
+ if not self.targetpath:
+ raise ValueError('invalid namespace')
+
+ def __enter__(self):
+ self.myns = open(self.mypath)
+ with open(self.targetpath) as fd:
+ setns(fd.fileno(), 0)
+
+ def __exit__(self, *args):
+ setns(self.myns.fileno(), 0)
+ self.myns.close()
+
+
+def call_process(cmd):
+ p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = p.communicate()
+ return output
+
+
+def recv_arp_packets(soc, ip_bin, ip_recvrs, timeout):
+ time_left = timeout
+ packets = set()
+ ips_left = ip_recvrs.copy()
+ while True:
+ time_start = time.time()
+ srecv = select.select([soc], [], [], time_left)
+ time_received = time.time()
+ time_left -= time_received - time_start
+ if not srecv[0]:
+ return packets
+ try:
+ data = soc.recv(PACKETSIZE)
+ except OSError:
+ return packets
+ if len(data) == PACKETSIZE and ord(data[21]) == ARPREPLY:
+ ip_to = data[38:42]
+ if ip_to == ip_bin:
+ ip_from = socket.inet_ntoa(data[28:32])
+ if ip_from in ips_left:
+ packets.add(data)
+ ips_left.discard(ip_from)
+ if time_left <= 0 or len(ips_left) == 0:
+ return packets
+
+
+def make_subnet_arping(namespace, interface, mac_sedr, ip_sedr, ip_recvrs):
+ with Namespace(nsname=namespace):
+ soc = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
+ try:
+ soc.bind((interface, TYPEFRAME))
+ except OSError:
+ return set(), ip_recvrs
+ for ip_recvr in ip_recvrs:
+ bc_packet = ARPSendPacket('ff:ff:ff:ff:ff:ff', mac_sedr, ip_sedr, mac_sedr, ip_recvr, '00:00:00:00:00:00')
+ try:
+ soc.send(bc_packet.__str__())
+ except OSError:
+ return set(), ip_recvrs
+ packets = recv_arp_packets(soc, socket.inet_aton(ip_sedr), ip_recvrs, TIMEOUT)
+ res_addrs = set()
+ for packet in packets:
+ ip_from = packet[28:32]
+ mac_from = packet[6:12]
+ res_addrs.add((ip_from, mac_from))
+ for _ in range(args.arp_count - 1):
+ tmp_res_addrs = set()
+ str_ips = set()
+ for addr in res_addrs:
+ ip_str = socket.inet_ntoa(addr[0])
+ str_ips.add(ip_str)
+ mac_str = ':'.join(x.encode('hex') for x in addr[1])
+ uc_packet = ARPSendPacket(mac_str, mac_sedr, ip_sedr, mac_sedr, ip_str, mac_str)
+ try:
+ soc.send(uc_packet.__str__())
+ except OSError:
+ return set(), ip_recvrs
+ packets = recv_arp_packets(soc, socket.inet_aton(ip_sedr), str_ips, TIMEOUT)
+ for packet in packets:
+ ip_from = packet[28:32]
+ mac_from = packet[6:12]
+ tmp_res_addrs.add((ip_from, mac_from))
+ res_addrs = tmp_res_addrs
+ ips = set()
+ for addr in res_addrs:
+ ips.add(socket.inet_ntoa(addr[0]))
+ soc.close()
+ return ips, ip_recvrs.difference(ips)
+
+
+def instant_query(expression):
+ params = {"query": expression}
+ result = requests.get(PROMETHEUS_QUERY_API, params=params).json()["data"]["result"]
+ return result
+
+
+def parse_ip_a(data):
+
+ # Expected data format:
+ # 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ # link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ # inet 127.0.0.1/8 scope host lo
+ # valid_lft forever preferred_lft forever
+ # inet6 ::1/128 scope host
+ # valid_lft forever preferred_lft forever
+
+ interfaces = {}
+ name = ""
+ mac = ""
+ addresses = []
+ for line in data.split('\n'):
+ m = re.match(r"(\d+):\s?(.+):", line)
+ if m:
+ interfaces[name] = {"addresses": addresses, "mac": mac}
+ mac = ""
+ addresses = []
+ name = m.group(2)
+ m = re.match(r"\s+link/\w+\s([0-9a-f:]+)", line)
+ if m:
+ mac = m.group(1)
+ m = re.match(r"\s+inet\s([0-9.]+)/(\d+)", line)
+ if m:
+ addresses.append((m.group(1), m.group(2)))
+ interfaces[name] = {"addresses": addresses, "mac": mac}
+ interfaces.pop("", None)
+ interfaces.pop("lo", None)
+ return interfaces
+
+
+def get_ip4_network(ip, mask):
+ host_bits = 32 - int(mask)
+ netmask = socket.inet_ntoa(struct.pack('!I', (1 << 32) - (1 << host_bits)))
+ subnet = ".".join(map(str, [i & m for i, m in zip(map(int, ip.split(".")), map(int, netmask.split(".")))]))
+ return subnet
+
+
+def gather():
+ hosted_networks = set()
+ dhcp_agents = instant_query("openstack_neutron_agent_dhcp_metadata")
+ for agent in dhcp_agents:
+ host = agent["metric"]["hostname"]
+ if host == HOSTNAME:
+ network_id = agent["metric"]["network_id"]
+ hosted_networks.add(network_id)
+
+ target_networks = set()
+ dhcp_subnets = set()
+ dhcp_enabled = instant_query("openstack_neutron_subnet_enable_dhcp")
+ for net in dhcp_enabled:
+ enabled = net["value"][1]
+ if enabled == "1":
+ subnet_id = net["metric"]["id"]
+ dhcp_subnets.add(subnet_id)
+ network_id = net["metric"]["network_id"]
+ if network_id in hosted_networks:
+ target_networks.add(network_id)
+
+ active_instances = set()
+ instances = instant_query("openstack_nova_instance_status")
+ for instance in instances:
+ status = instance["value"][1]
+ if status == "0":
+ instance_id = instance["metric"]["id"]
+ active_instances.add(instance_id)
+
+ thread_pool = ThreadPool(multiprocessing.cpu_count())
+ ns_checks = []
+ for network_id in target_networks:
+ cmd = "ip netns exec qdhcp-{} ip address".format(network_id)
+ result = thread_pool.apply_async(call_process, (cmd,))
+ ns_checks.append({"result": result, "network_id": network_id})
+ thread_pool.close()
+ thread_pool.join()
+
+ namespaces_ifs = {}
+ for ns_check in ns_checks:
+ network_id = ns_check["network_id"]
+ out, err = ns_check["result"].get()
+ namespaces_ifs[network_id] = parse_ip_a(out)
+
+ ports = instant_query("openstack_neutron_port_metadata")
+ check_map = {}
+ mapping = {}
+ for port in ports:
+ network_id = port["metric"].get("network_id", "")
+ if network_id not in hosted_networks:
+ continue
+ subnet_id = port["metric"].get("subnet_id", "")
+ if subnet_id not in dhcp_subnets:
+ continue
+ device_id = port["metric"].get("device_id", "")
+ if device_id not in active_instances:
+ continue
+ ip_address = port["metric"].get("ip_address", "")
+ if len(ip_address.split('.')) != 4:
+ continue
+
+ ns_ifs = namespaces_ifs[network_id]
+ if len(ns_ifs) == 0:
+ continue
+ name = ns_ifs.keys()[0]
+ interface = ns_ifs[name]
+ mac = interface.get("mac", "")
+ addresses = interface.get("addresses", [])
+ ns = "qdhcp-{}".format(network_id)
+ for addr in addresses:
+ a_ip = addr[0]
+ mask = addr[1]
+ net = get_ip4_network(a_ip, mask)
+ target_net = get_ip4_network(ip_address, mask)
+ if net == target_net:
+ id = ns + " " + name + " " + mac + " " + a_ip
+ if id not in check_map:
+ check_map[id] = set()
+ check_map[id].add(ip_address)
+ id_long = id + " " + ip_address
+ mapping[id_long] = {"network_id": network_id, "device_id": device_id, "ip": ip_address}
+ break
+ checks = []
+ pool = ThreadPool(args.processes)
+ for id in check_map:
+ ns, name, mac, a_ip = id.split(' ')
+ result = pool.apply_async(make_subnet_arping, (ns, name, mac, a_ip, check_map[id]))
+ checks.append({"result": result, "id": id})
+ pool.close()
+ pool.join()
+ for check in checks:
+ id = check["id"]
+ try:
+ passed, failed = check["result"].get()
+ except IOError:
+ passed, failed = set(), check_map[id]
+ for ip in passed:
+ id_long = id + " " + ip
+ res = mapping[id_long]
+ print "instance_arping,network_id=%(network_id)s,ip_address=%(ip_address)s,id=%(id)s success=%(success)s" % \
+ {
+ 'network_id': res['network_id'],
+ 'ip_address': res['ip'],
+ 'id': res['device_id'],
+ 'success': "1"
+ }
+ for ip in failed:
+ id_long = id + " " + ip
+ res = mapping[id_long]
+ print "instance_arping,network_id=%(network_id)s,ip_address=%(ip_address)s,id=%(id)s success=%(success)s" % \
+ {
+ 'network_id': res['network_id'],
+ 'ip_address': res['ip'],
+ 'id': res['device_id'],
+ 'success': "0"
+ }
+if __name__ == "__main__":
+ try:
+ gather()
+ print "instance_arping check_up=1"
+ except Exception:
+ print "instance_arping check_up=0"
diff --git a/telegraf/init.sls b/telegraf/init.sls
index 7345d3b..99cbb0f 100644
--- a/telegraf/init.sls
+++ b/telegraf/init.sls
@@ -5,3 +5,4 @@
{%- if pillar.telegraf.remote_agent is defined %}
- telegraf.remote_agent
{%- endif %}
+ - telegraf.script
diff --git a/telegraf/meta/prometheus.yml b/telegraf/meta/prometheus.yml
index c93b7f3..4e77e67 100644
--- a/telegraf/meta/prometheus.yml
+++ b/telegraf/meta/prometheus.yml
@@ -27,6 +27,82 @@
summary: "The {{ $labels.host }} node is down"
description: "The {{ $labels.host }} node is unreachable at {{ $labels.url }}, the Telegraf and Fluentd targets on the {{ $labels.host }} node are down."
{%- endraw %}
+{%- if pillar.neutron is defined %}
+ {%- if pillar.neutron.get('gateway', {}).get('enabled', False) == True or (pillar.neutron.get('compute',{}).get('enabled', False) == True and pillar.neutron.get('compute',{}).get('dhcp_agent_enabled', False) == True) %}
+ OVSInstanceArpingCheckDown:
+ if: instance_arping_check_up == 0
+ for: 2m
+ labels:
+ severity: major
+ service: ovs
+ annotations:
+ summary: "The OVS instance arping check is down"
+ {%- raw %}
+ description: "The OVS instance arping check on the {{ $labels.host }} node is down for 2 minutes."
+ {%- endraw %}
+ {%- endif %}
+{%- endif %}
+{%- if pillar.opencontrail is defined %}
+ {%- if pillar.opencontrail.get('compute', {}).get('enabled', False) == True %}
+ OpencontrailInstancePingCheckDown:
+ if: instance_ping_check_up == 0
+ for: 2m
+ labels:
+ severity: major
+ service: contrail
+ annotations:
+ summary: "The Opencontrail instance ping check is down"
+ {%- raw %}
+ description: "The Opencontrail instance ping check on the {{ $labels.host }} node is down for 2 minutes."
+ {%- endraw %}
+ {%- endif %}
+{%- endif %}
+{%- if pillar.neutron is defined %}
+ {%- if pillar.neutron.get('gateway', {}).get('enabled', False) == True or (pillar.neutron.get('compute',{}).get('enabled', False) == True and pillar.neutron.get('compute',{}).get('dhcp_agent_enabled', False) == True) %}
+ recording:
+ instance_id:instance_arping_success:
+ query: >-
+ avg(instance_arping_success) by (id)
+ instance_id:instance_arping_success:avg10m:for10m:
+ query: >-
+ avg_over_time(instance_id:instance_arping_success[10m]) and instance_id:instance_arping_success and instance_id:instance_arping_success offset 10m
+ total:instance_id:instance_arping_success:avg10m:for10m:
+ query: >-
+ count(instance_id:instance_arping_success:avg10m:for10m)
+ total:instance_id:instance_arping_success:avg10m:for10m:eq0:
+ query: >-
+ count(instance_id:instance_arping_success:avg10m:for10m == 0)
+ total:openstack_nova_instance_failed:
+ query: >-
+ count(instance_id:instance_arping_success:avg10m:for10m == 0 or on(id) openstack_nova_instance_status == 2)
+ total:openstack_nova_instance_all:
+ query: >-
+ count(instance_id:instance_arping_success:avg10m:for10m or on(id) openstack_nova_instance_status)
+ {%- endif %}
+{%- endif %}
+{%- if pillar.opencontrail is defined %}
+ {%- if pillar.opencontrail.get('compute', {}).get('enabled', False) == True %}
+ recording:
+ instance_id:instance_ping_success:
+ query: >-
+ avg(instance_ping_success) by (id) * on(id) instance_ping_valid or on(id) instance_ping_valid
+ instance_id:instance_ping_success:avg10m:for10m:
+ query: >-
+ avg_over_time(instance_id:instance_ping_success[10m]) and instance_id:instance_ping_success and instance_id:instance_ping_success offset 10m
+ total:instance_id:instance_ping_success:avg10m:for10m:
+ query: >-
+ count(instance_id:instance_ping_success:avg10m:for10m)
+ total:instance_id:instance_ping_success:avg10m:for10m:eq0:
+ query: >-
+ count(instance_id:instance_ping_success:avg10m:for10m == 0)
+ total:openstack_nova_instance_failed:
+ query: >-
+ count(instance_id:instance_ping_success:avg10m:for10m == 0 or on(id) openstack_nova_instance_status == 2)
+ total:openstack_nova_instance_all:
+ query: >-
+ count(instance_id:instance_ping_success:avg10m:for10m or on(id) openstack_nova_instance_status)
+ {%- endif %}
+{%- endif %}
{%- if address is defined %}
target:
static:
diff --git a/telegraf/meta/telegraf.yml b/telegraf/meta/telegraf.yml
index 2965166..d05b4fd 100644
--- a/telegraf/meta/telegraf.yml
+++ b/telegraf/meta/telegraf.yml
@@ -1,3 +1,27 @@
+{%- if pillar.neutron is defined %}
+ {%- if pillar.neutron.get('gateway', {}).get('enabled', False) == True or (pillar.neutron.get('compute',{}).get('enabled', False) == True and pillar.neutron.get('compute',{}).get('dhcp_agent_enabled', False) == True) %}
+ {%- set prometheus_address = pillar._param.stacklight_monitor_address %}
+agent:
+ input:
+ ovs_arping_check:
+ template: telegraf/files/input/exec.conf
+ commands: "/usr/local/bin/check_ovs_arping.py --host {{ prometheus_address }} --port 15016"
+ interval: 45s
+ {%- endif %}
+{%- endif %}
+
+{%- if pillar.opencontrail is defined %}
+ {%- if pillar.opencontrail.get('compute', {}).get('enabled', False) == True %}
+ {%- set prometheus_address = pillar._param.stacklight_monitor_address %}
+agent:
+ input:
+ opencontrail_ping_check:
+ template: telegraf/files/input/exec.conf
+ commands: "/usr/local/bin/check_opencontrail_ping.py --host {{ prometheus_address }} --port 15016"
+ interval: 45s
+ {%- endif %}
+{%- endif %}
+
{%- if pillar.telegraf.remote_agent is defined %}
{%- set addresses = [] %}
{%- for node_name, node_grains in salt['mine.get']('*', 'grains.items').items() %}
diff --git a/telegraf/script.sls b/telegraf/script.sls
new file mode 100644
index 0000000..f809924
--- /dev/null
+++ b/telegraf/script.sls
@@ -0,0 +1,26 @@
+{%- if pillar.neutron is defined %}
+ {%- if pillar.neutron.get('gateway', {}).get('enabled', False) == True or (pillar.neutron.get('compute',{}).get('enabled', False) == True and pillar.neutron.get('compute',{}).get('dhcp_agent_enabled', False) == True) %}
+
+ovs_arping_check_telegraf_script:
+ file.managed:
+ - name: /usr/local/bin/check_ovs_arping.py
+ - source: salt://telegraf/files/script/check_ovs_arping.py
+ - template: jinja
+ - mode: 755
+
+ {%- endif %}
+{%- endif %}
+
+{%- if pillar.opencontrail is defined %}
+ {%- if pillar.opencontrail.get('compute', {}).get('enabled', False) == True %}
+ {%- set prometheus_address = pillar._param.stacklight_monitor_address %}
+
+opencontrail_ping_check_telegraf_script:
+ file.managed:
+ - name: /usr/local/bin/check_opencontrail_ping.py
+ - source: salt://telegraf/files/script/check_opencontrail_ping.py
+ - template: jinja
+ - mode: 755
+
+ {%- endif %}
+{%- endif %}