Merge "Fix decoders and filters for hostname-free metrics"
diff --git a/heka/files/lua/common/lma_utils.lua b/heka/files/lua/common/lma_utils.lua
index b310ec4..349e731 100644
--- a/heka/files/lua/common/lma_utils.lua
+++ b/heka/files/lua/common/lma_utils.lua
@@ -113,7 +113,6 @@
         Type = 'bulk_metric', -- prepended with 'heka.sandbox'
         Severity = label_to_severity_map.INFO,
         Fields = {
-            hostname = hostname,
             source = source,
             type = m_type or metric_type['GAUGE']
       }
diff --git a/heka/files/lua/decoders/collectd.lua b/heka/files/lua/decoders/collectd.lua
index ef5f27d..8647cd8 100644
--- a/heka/files/lua/decoders/collectd.lua
+++ b/heka/files/lua/decoders/collectd.lua
@@ -32,25 +32,19 @@
 }
 
 -- The following table keeps a list of metrics from plugin where the
--- hostname is not relevant.
+-- Fields[hostname] shouldn't be set by default.
 local hostname_free = {
-    -- Add "metric_source = true" to skip the hostname for all metrics
-    -- from the metric_source
-    -- Add "metric_source = { list of metrics } to skip hostname for a
-    -- subset of metrics. The list of metrics is referenced through the
-    -- field 'type_instance'.
-    hypervisor_stats = {
-        total_free_disk_GB = true,
-        total_free_ram_MB = true,
-        total_free_vcpus = true,
-        total_used_disk_GB = true,
-        total_used_ram_MB = true,
-        total_used_vcpus = true,
-        total_running_instances = true,
-        total_running_tasks = true,
-    },
+    ceph_mon = true,
+    ceph_pool = true,
     check_openstack_api = true,
+    cinder = true,
+    glance = true,
     http_check = true,
+    hypervisor_stats = true,
+    keystone = true,
+    neutron = true,
+    nova = true,
+    pacemaker = true,
 }
 
 -- this is needed for the libvirt metrics because in that case, collectd sends
@@ -100,27 +94,21 @@
                 }
             }
 
+            -- Check if Fields[hostname] should be added or not to the metric message
+            if not hostname_free[metric_source] then
+                msg['Fields']['hostname'] = sample['host']
+            end
+
             -- Normalize metric name, unfortunately collectd plugins aren't
             -- always consistent on metric namespaces so we need a few if/else
             -- statements to cover all cases.
-
-            -- Check if hostname is needed or not
-            local add_hostname = true
-            if hostname_free[metric_source] == true then
-                add_hostname = false
-            elseif hostname_free[metric_source] and
-                hostname_free[metric_source][sample['type_instance']] then
-                add_hostname = false
-            end
-
-            if add_hostname then
-                msg['Fields']['hostname'] = sample['host']
-                table.insert(msg['Fields']['tag_fields'], 'hostname')
-            end
-
             if sample['meta'] and sample['meta']['service_check'] then
                 msg['Fields']['name'] = sample['meta']['service_check'] .. sep .. 'check'
                 msg['Fields']['details'] = sample['meta']['failure']
+                if sample['meta']['local_check'] then
+                    -- if the check is local to the node, add the hostname
+                    msg['Fields']['hostname'] = sample['host']
+                end
             elseif metric_source == 'memory' or metric_source == 'contextswitch' or
                    metric_source == 'entropy' or metric_source == 'load' or
                    metric_source == 'swap' or metric_source == 'uptime' then
@@ -252,6 +240,9 @@
                 else
                     msg['Fields']['name'] = msg['Fields']['name'] .. sample['type_instance']
                 end
+                if sample['meta'] and sample['meta']['host'] then
+                    msg['Fields']['hostname'] = sample['meta']['host']
+                end
                 if sample['meta'] and sample['meta']['aggregate'] then
                     msg['Fields']['aggregate'] = sample['meta']['aggregate']
                     table.insert(msg['Fields']['tag_fields'], 'aggregate')
@@ -470,6 +461,13 @@
                 end
                 msg['Fields']['name'] = replace_dot_by_sep(msg['Fields']['name'])
 
+                -- check if the hostname field should be kept or not (eg for
+                -- cluster metrics, discard_hostname == true)
+                if sample['meta']['discard_hostname'] then
+                    msg['Fields']['hostname'] = nil
+                end
+                sample['meta']['discard_hostname'] = nil
+
                 -- add meta fields as tag_fields
                 for k, v in pairs(sample['meta'] or {}) do
                     if tostring(k) ~= '0' then
@@ -480,6 +478,9 @@
             end
 
             if not skip_it then
+                if msg['Fields']['hostname'] then
+                     table.insert(msg['Fields']['tag_fields'], 'hostname')
+                end
                 utils.inject_tags(msg)
                 -- Before injecting the message we need to check that tag_fields is not an
                 -- empty table otherwise the protobuf encoder fails to encode the table.
diff --git a/heka/files/lua/decoders/metric.lua b/heka/files/lua/decoders/metric.lua
index 0994a00..9b26158 100644
--- a/heka/files/lua/decoders/metric.lua
+++ b/heka/files/lua/decoders/metric.lua
@@ -68,7 +68,6 @@
             end
             fields['tag_fields'] = tag_fields
             fields['name'] = metric.name
-            fields['hostname'] = msg.Hostname
 
             new_msg.Type = metric_type
             new_msg.Fields = fields
diff --git a/heka/files/lua/filters/hdd_errors_counter.lua b/heka/files/lua/filters/hdd_errors_counter.lua
index 6e5146c..f34da24 100644
--- a/heka/files/lua/filters/hdd_errors_counter.lua
+++ b/heka/files/lua/filters/hdd_errors_counter.lua
@@ -76,9 +76,12 @@
 
     local delta_sec = (ns - (enter_at or 0)) / 1e9
     for dev, value in pairs(error_counters) do
-        -- Don`t send values from first ticker interval
+        -- Don`t send values at the first ticker interval
         if enter_at ~= nil then
-            utils.add_to_bulk_metric("hdd_errors_rate", value / delta_sec, {device=dev})
+            utils.add_to_bulk_metric(
+                "hdd_errors_rate",
+                value / delta_sec,
+                {device=dev, hostname=hostname})
         end
         error_counters[dev] = 0
     end
diff --git a/heka/files/lua/filters/heka_monitoring.lua b/heka/files/lua/filters/heka_monitoring.lua
index c40f0c3..c61251f 100644
--- a/heka/files/lua/filters/heka_monitoring.lua
+++ b/heka/files/lua/filters/heka_monitoring.lua
@@ -16,7 +16,7 @@
 require 'math'
 local utils  = require 'lma_utils'
 
-function process_table(typ, array)
+function process_table(typ, array, tags)
     -- NOTE: It has been written for "filters" and "decoders". If we need to
     -- use it to collect metrics from other components  of the Heka pipeline,
     -- we need to ensure that JSON provides names and table with
@@ -41,10 +41,7 @@
             -- strip off the '_decoder'/'_filter' suffix
             local name = v['Name']:gsub("_" .. typ, "")
 
-            local tags = {
-                ['type'] = typ,
-                ['name'] = name,
-            }
+            tags['name'] = name
 
             utils.add_to_bulk_metric('hekad_msg_count', v.ProcessMessageCount.value, tags)
             utils.add_to_bulk_metric('hekad_msg_avg_duration', v.ProcessMessageAvgDuration.value, tags)
@@ -76,7 +73,8 @@
 
     for k, v in pairs(data) do
         if k == "filters" or k == "decoders" then
-            process_table(singularize(k), v)
+            local typ = singularize(k)
+            process_table(typ, v, {hostname=hostname, ['type']=typ})
         end
     end