Merge pull request #26 from simonpasquier/extend-glusterfs-metrics
Extend GlusterFS metrics
diff --git a/collectd/files/plugin/collectd_glusterfs.py b/collectd/files/plugin/collectd_glusterfs.py
index 1b57984..fbcfc1f 100644
--- a/collectd/files/plugin/collectd_glusterfs.py
+++ b/collectd/files/plugin/collectd_glusterfs.py
@@ -24,6 +24,30 @@
peer_re = re.compile(r'^Hostname: (?P<peer>.+)$', re.MULTILINE)
state_re = re.compile(r'^State: (?P<state>.+)$', re.MULTILINE)
+vol_status_re = re.compile(r'\n\s*\n', re.MULTILINE)
+vol_block_re = re.compile(r'^-+', re.MULTILINE)
+volume_re = re.compile(r'^Status of volume:\s+(?P<volume>.+)', re.MULTILINE)
+brick_server_re = re.compile(r'^Brick\s*:\s*Brick\s*(?P<peer>[^:]+)',
+ re.MULTILINE)
+disk_free_re = re.compile(
+ r'^Disk Space Free\s*:\s+(?P<disk_free>[.\d]+)(?P<unit>\S+)',
+ re.MULTILINE)
+disk_total_re = re.compile(
+ r'^Total Disk Space\s*:\s+(?P<disk_total>[.\d]+)(?P<unit>\S+)',
+ re.MULTILINE)
+inode_free_re = re.compile(r'^Free Inodes\s*:\s+(?P<inode_free>\d+)',
+ re.MULTILINE)
+inode_count_re = re.compile(r'^Inode Count\s*:\s+(?P<inode_count>\d+)',
+ re.MULTILINE)
+
+
+def convert_to_bytes(v, unit):
+ try:
+ i = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB').index(unit)
+ except ValueError:
+ i = 1
+ return float(v) * (1024 ** i)
+
class GlusterfsPlugin(base.Base):
@@ -32,11 +56,11 @@
self.plugin = NAME
def itermetrics(self):
- # Collect peers' status
+ # Collect peers' metrics
out, err = self.execute([GLUSTER_BINARY, 'peer', 'status'],
shell=False)
if not out:
- raise base.CheckException("Failed to execute gluster")
+ raise base.CheckException("Failed to execute 'gluster peer'")
total = 0
total_by_state = {
@@ -79,6 +103,105 @@
}
}
+ # Collect volumes' metrics
+ out, err = self.execute(
+ [GLUSTER_BINARY, 'volume', 'status', 'all', 'detail'],
+ shell=False)
+ if not out:
+ raise base.CheckException("Failed to execute 'gluster volume'")
+
+ for vol_block in vol_status_re.split(out):
+ volume_m = volume_re.search(vol_block)
+ if not volume_m:
+ continue
+ volume = volume_m.group('volume')
+ for line in vol_block_re.split(vol_block):
+ peer_m = brick_server_re.search(line)
+ if not peer_m:
+ continue
+ volume = volume_m.group('volume')
+ peer = peer_m.group('peer')
+ disk_free_m = disk_free_re.search(line)
+ disk_total_m = disk_total_re.search(line)
+ inode_free_m = inode_free_re.search(line)
+ inode_count_m = inode_count_re.search(line)
+ if disk_free_m and disk_total_m:
+ free = convert_to_bytes(
+ disk_free_m.group('disk_free'),
+ disk_free_m.group('unit'))
+ total = convert_to_bytes(
+ disk_total_m.group('disk_total'),
+ disk_total_m.group('unit'))
+ used = total - free
+ yield {
+ 'type_instance': 'space_free',
+ 'values': free,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'space_percent_free',
+ 'values': free * 100.0 / total,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'space_used',
+ 'values': used,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'space_percent_used',
+ 'values': used * 100.0 / total,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ if inode_free_m and inode_count_m:
+ free = int(inode_free_m.group('inode_free'))
+ total = int(inode_count_m.group('inode_count'))
+ used = total - free
+ yield {
+ 'type_instance': 'inodes_free',
+ 'values': free,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'inodes_percent_free',
+ 'values': free * 100.0 / total,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'inodes_used',
+ 'values': used,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+ yield {
+ 'type_instance': 'inodes_percent_used',
+ 'values': used * 100.0 / total,
+ 'meta': {
+ 'volume': volume,
+ 'peer': peer,
+ }
+ }
+
plugin = GlusterfsPlugin(collectd)