summaryrefslogtreecommitdiffstats
path: root/meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf
diff options
context:
space:
mode:
Diffstat (limited to 'meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf')
-rw-r--r--meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf383
1 files changed, 383 insertions, 0 deletions
diff --git a/meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf b/meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf
new file mode 100644
index 0000000..a94b4b5
--- /dev/null
+++ b/meta-linaro/recipes-extra/ganglia/ganglia/gmond-example.conf
@@ -0,0 +1,383 @@
1/* This configuration is as close to 2.5.x default behavior as possible
2 The values closely match ./gmond/metric.h definitions in 2.5.x */
3globals {
4 daemonize = yes
5 setuid = yes
6 user = nobody
7 debug_level = 0
8 max_udp_msg_len = 1472
9 mute = no
10 deaf = yes
11 allow_extra_data = yes
12 host_dmax = 86400 /*secs. Expires (removes from web interface) hosts in 1 day */
13 host_tmax = 20 /*secs */
14 cleanup_threshold = 300 /*secs */
15 gexec = no
16 # By default gmond will use reverse DNS resolution when displaying your hostname
17 # Uncommeting following value will override that value.
18 # override_hostname = "mywebserver.domain.com"
19 # If you are not using multicast this value should be set to something other than 0.
20 # Otherwise if you restart aggregator gmond you will get empty graphs. 60 seconds is reasonable
21 send_metadata_interval = 20 /*secs */
22
23}
24
25/*
26 * The cluster attributes specified will be used as part of the <CLUSTER>
27 * tag that will wrap all hosts collected by this instance.
28 */
29cluster {
30 name = "unspecified"
31 owner = "unspecified"
32 latlong = "unspecified"
33 url = "unspecified"
34}
35
36/* The host section describes attributes of the host, like the location */
37host {
38 location = "unspecified"
39}
40
41/* Feel free to specify as many udp_send_channels as you like. Gmond
42 used to only support having a single channel */
43udp_send_channel {
44 #bind_hostname = yes # Highly recommended, soon to be default.
45 # This option tells gmond to use a source address
46 # that resolves to the machine's hostname. Without
47 # this, the metrics may appear to come from any
48 # interface and the DNS names associated with
49 # those IPs will be used to create the RRDs.
50 mcast_join = 239.2.11.71
51 port = 8649
52 ttl = 1
53}
54
55/* You can specify as many udp_recv_channels as you like as well. */
56udp_recv_channel {
57 mcast_join = 239.2.11.71
58 port = 8649
59 bind = 239.2.11.71
60 retry_bind = true
61 # Size of the UDP buffer. If you are handling lots of metrics you really
62 # should bump it up to e.g. 10MB or even higher.
63 # buffer = 10485760
64}
65
66/* You can specify as many tcp_accept_channels as you like to share
67 an xml description of the state of the cluster */
68tcp_accept_channel {
69 port = 8649
70 # If you want to gzip XML output
71 gzip_output = no
72}
73
74/* Channel to receive sFlow datagrams */
75#udp_recv_channel {
76# port = 6343
77#}
78
79/* Optional sFlow settings */
80#sflow {
81# udp_port = 6343
82# accept_vm_metrics = yes
83# accept_jvm_metrics = yes
84# multiple_jvm_instances = no
85# accept_http_metrics = yes
86# multiple_http_instances = no
87# accept_memcache_metrics = yes
88# multiple_memcache_instances = no
89#}
90
91/* Each metrics module that is referenced by gmond must be specified and
92 loaded. If the module has been statically linked with gmond, it does
93 not require a load path. However all dynamically loadable modules must
94 include a load path. */
95modules {
96 module {
97 name = "core_metrics"
98 }
99 module {
100 name = "cpu_module"
101 path = "modcpu.so"
102 }
103 module {
104 name = "disk_module"
105 path = "moddisk.so"
106 }
107 module {
108 name = "load_module"
109 path = "modload.so"
110 }
111 module {
112 name = "mem_module"
113 path = "modmem.so"
114 }
115 module {
116 name = "net_module"
117 path = "modnet.so"
118 }
119 module {
120 name = "proc_module"
121 path = "modproc.so"
122 }
123 module {
124 name = "sys_module"
125 path = "modsys.so"
126 }
127}
128
129/* The old internal 2.5.x metric array has been replaced by the following
130 collection_group directives. What follows is the default behavior for
131 collecting and sending metrics that is as close to 2.5.x behavior as
132 possible. */
133
134/* This collection group will cause a heartbeat (or beacon) to be sent every
135 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
136 the age of the running gmond. */
137collection_group {
138 collect_once = yes
139 time_threshold = 20
140 metric {
141 name = "heartbeat"
142 }
143}
144
145/* This collection group will send general info about this host every
146 1200 secs.
147 This information doesn't change between reboots and is only collected
148 once. */
149collection_group {
150 collect_once = yes
151 time_threshold = 1200
152 metric {
153 name = "cpu_num"
154 title = "CPU Count"
155 }
156 metric {
157 name = "cpu_speed"
158 title = "CPU Speed"
159 }
160 metric {
161 name = "mem_total"
162 title = "Memory Total"
163 }
164 /* Should this be here? Swap can be added/removed between reboots. */
165 metric {
166 name = "swap_total"
167 title = "Swap Space Total"
168 }
169 metric {
170 name = "boottime"
171 title = "Last Boot Time"
172 }
173 metric {
174 name = "machine_type"
175 title = "Machine Type"
176 }
177 metric {
178 name = "os_name"
179 title = "Operating System"
180 }
181 metric {
182 name = "os_release"
183 title = "Operating System Release"
184 }
185 metric {
186 name = "location"
187 title = "Location"
188 }
189}
190
191/* This collection group will send the status of gexecd for this host
192 every 300 secs.*/
193/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
194collection_group {
195 collect_once = yes
196 time_threshold = 300
197 metric {
198 name = "gexec"
199 title = "Gexec Status"
200 }
201}
202
203/* This collection group will collect the CPU status info every 20 secs.
204 The time threshold is set to 90 seconds. In honesty, this
205 time_threshold could be set significantly higher to reduce
206 unneccessary network chatter. */
207collection_group {
208 collect_every = 20
209 time_threshold = 90
210 /* CPU status */
211 metric {
212 name = "cpu_user"
213 value_threshold = "1.0"
214 title = "CPU User"
215 }
216 metric {
217 name = "cpu_system"
218 value_threshold = "1.0"
219 title = "CPU System"
220 }
221 metric {
222 name = "cpu_idle"
223 value_threshold = "5.0"
224 title = "CPU Idle"
225 }
226 metric {
227 name = "cpu_nice"
228 value_threshold = "1.0"
229 title = "CPU Nice"
230 }
231 metric {
232 name = "cpu_aidle"
233 value_threshold = "5.0"
234 title = "CPU aidle"
235 }
236 metric {
237 name = "cpu_wio"
238 value_threshold = "1.0"
239 title = "CPU wio"
240 }
241 metric {
242 name = "cpu_steal"
243 value_threshold = "1.0"
244 title = "CPU steal"
245 }
246 /* The next two metrics are optional if you want more detail...
247 ... since they are accounted for in cpu_system.
248 metric {
249 name = "cpu_intr"
250 value_threshold = "1.0"
251 title = "CPU intr"
252 }
253 metric {
254 name = "cpu_sintr"
255 value_threshold = "1.0"
256 title = "CPU sintr"
257 }
258 */
259}
260
261collection_group {
262 collect_every = 20
263 time_threshold = 90
264 /* Load Averages */
265 metric {
266 name = "load_one"
267 value_threshold = "1.0"
268 title = "One Minute Load Average"
269 }
270 metric {
271 name = "load_five"
272 value_threshold = "1.0"
273 title = "Five Minute Load Average"
274 }
275 metric {
276 name = "load_fifteen"
277 value_threshold = "1.0"
278 title = "Fifteen Minute Load Average"
279 }
280}
281
282/* This group collects the number of running and total processes */
283collection_group {
284 collect_every = 80
285 time_threshold = 950
286 metric {
287 name = "proc_run"
288 value_threshold = "1.0"
289 title = "Total Running Processes"
290 }
291 metric {
292 name = "proc_total"
293 value_threshold = "1.0"
294 title = "Total Processes"
295 }
296}
297
298/* This collection group grabs the volatile memory metrics every 40 secs and
299 sends them at least every 180 secs. This time_threshold can be increased
300 significantly to reduce unneeded network traffic. */
301collection_group {
302 collect_every = 40
303 time_threshold = 180
304 metric {
305 name = "mem_free"
306 value_threshold = "1024.0"
307 title = "Free Memory"
308 }
309 metric {
310 name = "mem_shared"
311 value_threshold = "1024.0"
312 title = "Shared Memory"
313 }
314 metric {
315 name = "mem_buffers"
316 value_threshold = "1024.0"
317 title = "Memory Buffers"
318 }
319 metric {
320 name = "mem_cached"
321 value_threshold = "1024.0"
322 title = "Cached Memory"
323 }
324 metric {
325 name = "swap_free"
326 value_threshold = "1024.0"
327 title = "Free Swap Space"
328 }
329}
330
331collection_group {
332 collect_every = 40
333 time_threshold = 300
334 metric {
335 name = "bytes_out"
336 value_threshold = 4096
337 title = "Bytes Sent"
338 }
339 metric {
340 name = "bytes_in"
341 value_threshold = 4096
342 title = "Bytes Received"
343 }
344 metric {
345 name = "pkts_in"
346 value_threshold = 256
347 title = "Packets Received"
348 }
349 metric {
350 name = "pkts_out"
351 value_threshold = 256
352 title = "Packets Sent"
353 }
354}
355
356/* Different than 2.5.x default since the old config made no sense */
357collection_group {
358 collect_every = 1800
359 time_threshold = 3600
360 metric {
361 name = "disk_total"
362 value_threshold = 1.0
363 title = "Total Disk Space"
364 }
365}
366
367collection_group {
368 collect_every = 40
369 time_threshold = 180
370 metric {
371 name = "disk_free"
372 value_threshold = 1.0
373 title = "Disk Space Available"
374 }
375 metric {
376 name = "part_max_used"
377 value_threshold = 1.0
378 title = "Maximum Disk Space Used"
379 }
380}
381
382include ("/etc/conf.d/*.conf")
383