diff options
Diffstat (limited to 'scripts/pybootchartgui/pybootchartgui')
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/__init__.py | 0 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/batch.py | 23 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/draw.py | 355 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/gui.py | 273 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/main.py | 71 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/parsing.py | 223 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/process_tree.py | 270 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/samples.py | 93 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/tests/parser_test.py | 93 | ||||
-rw-r--r-- | scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py | 78 |
10 files changed, 1479 insertions, 0 deletions
diff --git a/scripts/pybootchartgui/pybootchartgui/__init__.py b/scripts/pybootchartgui/pybootchartgui/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/__init__.py | |||
diff --git a/scripts/pybootchartgui/pybootchartgui/batch.py b/scripts/pybootchartgui/pybootchartgui/batch.py new file mode 100644 index 0000000000..bd67c9350e --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/batch.py | |||
@@ -0,0 +1,23 @@ | |||
1 | import cairo | ||
2 | |||
3 | import draw | ||
4 | |||
5 | def render(res, format, filename): | ||
6 | handlers = { | ||
7 | "png": (lambda w,h: cairo.ImageSurface(cairo.FORMAT_ARGB32,w,h), lambda sfc: sfc.write_to_png(filename)), | ||
8 | "pdf": (lambda w,h: cairo.PDFSurface(filename, w, h), lambda sfc: 0), | ||
9 | "svg": (lambda w,h: cairo.SVGSurface(filename, w, h), lambda sfc: 0) | ||
10 | } | ||
11 | |||
12 | if not(handlers.has_key(format)): | ||
13 | print "Unknown format '%s'." % format | ||
14 | return 10 | ||
15 | |||
16 | make_surface, write_surface = handlers[format] | ||
17 | w,h = draw.extents(*res) | ||
18 | w = max(w, draw.MIN_IMG_W) | ||
19 | surface = make_surface(w,h) | ||
20 | ctx = cairo.Context(surface) | ||
21 | draw.render(ctx, *res) | ||
22 | write_surface(surface) | ||
23 | |||
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py new file mode 100644 index 0000000000..249cd2ef81 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/draw.py | |||
@@ -0,0 +1,355 @@ | |||
1 | import cairo | ||
2 | import math | ||
3 | import re | ||
4 | |||
5 | # Process tree background color. | ||
6 | BACK_COLOR = (1.0, 1.0, 1.0, 1.0) | ||
7 | |||
8 | WHITE = (1.0, 1.0, 1.0, 1.0) | ||
9 | # Process tree border color. | ||
10 | BORDER_COLOR = (0.63, 0.63, 0.63, 1.0) | ||
11 | # Second tick line color. | ||
12 | TICK_COLOR = (0.92, 0.92, 0.92, 1.0) | ||
13 | # 5-second tick line color. | ||
14 | TICK_COLOR_BOLD = (0.86, 0.86, 0.86, 1.0) | ||
15 | # Text color. | ||
16 | TEXT_COLOR = (0.0, 0.0, 0.0, 1.0) | ||
17 | |||
18 | # Font family | ||
19 | FONT_NAME = "Bitstream Vera Sans" | ||
20 | # Title text font. | ||
21 | TITLE_FONT_SIZE = 18 | ||
22 | # Default text font. | ||
23 | TEXT_FONT_SIZE = 12 | ||
24 | # Axis label font. | ||
25 | AXIS_FONT_SIZE = 11 | ||
26 | # Legend font. | ||
27 | LEGEND_FONT_SIZE = 12 | ||
28 | |||
29 | # CPU load chart color. | ||
30 | CPU_COLOR = (0.40, 0.55, 0.70, 1.0) | ||
31 | # IO wait chart color. | ||
32 | IO_COLOR = (0.76, 0.48, 0.48, 0.5) | ||
33 | # Disk throughput color. | ||
34 | DISK_TPUT_COLOR = (0.20, 0.71, 0.20, 1.0) | ||
35 | # CPU load chart color. | ||
36 | FILE_OPEN_COLOR = (0.20, 0.71, 0.71, 1.0) | ||
37 | |||
38 | # Process border color. | ||
39 | PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0) | ||
40 | # Waiting process color. | ||
41 | PROC_COLOR_D = (0.76, 0.48, 0.48, 0.125) | ||
42 | # Running process color. | ||
43 | PROC_COLOR_R = CPU_COLOR | ||
44 | # Sleeping process color. | ||
45 | PROC_COLOR_S = (0.94, 0.94, 0.94, 1.0) | ||
46 | # Stopped process color. | ||
47 | PROC_COLOR_T = (0.94, 0.50, 0.50, 1.0) | ||
48 | # Zombie process color. | ||
49 | PROC_COLOR_Z = (0.71, 0.71, 0.71, 1.0) | ||
50 | # Dead process color. | ||
51 | PROC_COLOR_X = (0.71, 0.71, 0.71, 0.125) | ||
52 | # Paging process color. | ||
53 | PROC_COLOR_W = (0.71, 0.71, 0.71, 0.125) | ||
54 | |||
55 | # Process label color. | ||
56 | PROC_TEXT_COLOR = (0.19, 0.19, 0.19, 1.0) | ||
57 | # Process label font. | ||
58 | PROC_TEXT_FONT_SIZE = 12 | ||
59 | |||
60 | # Signature color. | ||
61 | SIG_COLOR = (0.0, 0.0, 0.0, 0.3125) | ||
62 | # Signature font. | ||
63 | SIG_FONT_SIZE = 14 | ||
64 | # Signature text. | ||
65 | SIGNATURE = "http://code.google.com/p/pybootchartgui" | ||
66 | |||
67 | # Process dependency line color. | ||
68 | DEP_COLOR = (0.75, 0.75, 0.75, 1.0) | ||
69 | # Process dependency line stroke. | ||
70 | DEP_STROKE = 1.0 | ||
71 | |||
72 | # Process description date format. | ||
73 | DESC_TIME_FORMAT = "mm:ss.SSS" | ||
74 | |||
75 | # Process states | ||
76 | STATE_UNDEFINED = 0 | ||
77 | STATE_RUNNING = 1 | ||
78 | STATE_SLEEPING = 2 | ||
79 | STATE_WAITING = 3 | ||
80 | STATE_STOPPED = 4 | ||
81 | STATE_ZOMBIE = 5 | ||
82 | |||
83 | STATE_COLORS = [(0,0,0,0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W] | ||
84 | |||
85 | # Convert ps process state to an int | ||
86 | def get_proc_state(flag): | ||
87 | return "RSDTZXW".index(flag) + 1 | ||
88 | |||
89 | |||
90 | def draw_text(ctx, text, color, x, y): | ||
91 | ctx.set_source_rgba(*color) | ||
92 | ctx.move_to(x, y) | ||
93 | ctx.show_text(text) | ||
94 | |||
95 | |||
96 | def draw_fill_rect(ctx, color, rect): | ||
97 | ctx.set_source_rgba(*color) | ||
98 | ctx.rectangle(*rect) | ||
99 | ctx.fill() | ||
100 | |||
101 | |||
102 | def draw_rect(ctx, color, rect): | ||
103 | ctx.set_source_rgba(*color) | ||
104 | ctx.rectangle(*rect) | ||
105 | ctx.stroke() | ||
106 | |||
107 | |||
108 | def draw_legend_box(ctx, label, fill_color, x, y, s): | ||
109 | draw_fill_rect(ctx, fill_color, (x, y - s, s, s)) | ||
110 | draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s)) | ||
111 | draw_text(ctx, label, TEXT_COLOR, x + s + 5, y) | ||
112 | |||
113 | |||
114 | def draw_legend_line(ctx, label, fill_color, x, y, s): | ||
115 | draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3)) | ||
116 | ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi) | ||
117 | ctx.fill() | ||
118 | draw_text(ctx, label, TEXT_COLOR, x + s + 5, y) | ||
119 | |||
120 | |||
121 | def draw_label_in_box(ctx, color, label, x, y, w, maxx): | ||
122 | label_w = ctx.text_extents(label)[2] | ||
123 | label_x = x + w / 2 - label_w / 2 | ||
124 | if label_w + 10 > w: | ||
125 | label_x = x + w + 5 | ||
126 | if label_x + label_w > maxx: | ||
127 | label_x = x - label_w - 5 | ||
128 | draw_text(ctx, label, color, label_x, y) | ||
129 | |||
130 | |||
131 | def draw_5sec_labels(ctx, rect, sec_w): | ||
132 | ctx.set_font_size(AXIS_FONT_SIZE) | ||
133 | for i in range(0, rect[2] + 1, sec_w): | ||
134 | if ((i / sec_w) % 5 == 0) : | ||
135 | label = "%ds" % (i / sec_w) | ||
136 | label_w = ctx.text_extents(label)[2] | ||
137 | draw_text(ctx, label, TEXT_COLOR, rect[0] + i - label_w/2, rect[1] - 2) | ||
138 | |||
139 | |||
140 | def draw_box_ticks(ctx, rect, sec_w): | ||
141 | draw_rect(ctx, BORDER_COLOR, tuple(rect)) | ||
142 | |||
143 | ctx.set_line_cap(cairo.LINE_CAP_SQUARE) | ||
144 | |||
145 | for i in range(sec_w, rect[2] + 1, sec_w): | ||
146 | if ((i / sec_w) % 5 == 0) : | ||
147 | ctx.set_source_rgba(*TICK_COLOR_BOLD) | ||
148 | else : | ||
149 | ctx.set_source_rgba(*TICK_COLOR) | ||
150 | ctx.move_to(rect[0] + i, rect[1] + 1) | ||
151 | ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1) | ||
152 | ctx.stroke() | ||
153 | |||
154 | ctx.set_line_cap(cairo.LINE_CAP_BUTT) | ||
155 | |||
156 | def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree): | ||
157 | ctx.set_line_width(0.5) | ||
158 | x_shift = proc_tree.start_time | ||
159 | x_scale = proc_tree.duration | ||
160 | |||
161 | def transform_point_coords(point, x_base, y_base, xscale, yscale, x_trans, y_trans): | ||
162 | x = (point[0] - x_base) * xscale + x_trans | ||
163 | y = (point[1] - y_base) * -yscale + y_trans + bar_h | ||
164 | return x, y | ||
165 | |||
166 | xscale = float(chart_bounds[2]) / max(x for (x,y) in data) | ||
167 | yscale = float(chart_bounds[3]) / max(y for (x,y) in data) | ||
168 | |||
169 | first = transform_point_coords(data[0], x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1]) | ||
170 | last = transform_point_coords(data[-1], x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1]) | ||
171 | |||
172 | ctx.set_source_rgba(*color) | ||
173 | ctx.move_to(*first) | ||
174 | for point in data: | ||
175 | x, y = transform_point_coords(point, x_shift, 0, xscale, yscale, chart_bounds[0], chart_bounds[1]) | ||
176 | ctx.line_to(x, y) | ||
177 | if fill: | ||
178 | ctx.stroke_preserve() | ||
179 | ctx.line_to(last[0], chart_bounds[1]+bar_h) | ||
180 | ctx.line_to(first[0], chart_bounds[1]+bar_h) | ||
181 | ctx.line_to(first[0], first[1]) | ||
182 | ctx.fill() | ||
183 | else: | ||
184 | ctx.stroke() | ||
185 | ctx.set_line_width(1.0) | ||
186 | |||
187 | header_h = 280 | ||
188 | bar_h = 55 | ||
189 | # offsets | ||
190 | off_x, off_y = 10, 10 | ||
191 | sec_w = 25 # the width of a second | ||
192 | proc_h = 16 # the height of a process | ||
193 | leg_s = 10 | ||
194 | MIN_IMG_W = 800 | ||
195 | |||
196 | |||
197 | def extents(headers, cpu_stats, disk_stats, proc_tree): | ||
198 | w = (proc_tree.duration * sec_w / 100) + 2*off_x | ||
199 | h = proc_h * proc_tree.num_proc + header_h + 2*off_y | ||
200 | return (w,h) | ||
201 | |||
202 | # | ||
203 | # Render the chart. | ||
204 | # | ||
205 | def render(ctx, headers, cpu_stats, disk_stats, proc_tree): | ||
206 | (w, h) = extents(headers, cpu_stats, disk_stats, proc_tree) | ||
207 | |||
208 | ctx.set_line_width(1.0) | ||
209 | ctx.select_font_face(FONT_NAME) | ||
210 | draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h)) | ||
211 | w -= 2*off_x | ||
212 | # draw the title and headers | ||
213 | curr_y = draw_header(ctx, headers, off_x, proc_tree.duration) | ||
214 | |||
215 | # render bar legend | ||
216 | ctx.set_font_size(LEGEND_FONT_SIZE) | ||
217 | |||
218 | draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s) | ||
219 | draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s) | ||
220 | |||
221 | # render I/O wait | ||
222 | chart_rect = (off_x, curr_y+30, w, bar_h) | ||
223 | draw_box_ticks(ctx, chart_rect, sec_w) | ||
224 | draw_chart(ctx, IO_COLOR, True, chart_rect, [(sample.time, sample.user + sample.sys + sample.io) for sample in cpu_stats], proc_tree) | ||
225 | # render CPU load | ||
226 | draw_chart(ctx, CPU_COLOR, True, chart_rect, [(sample.time, sample.user + sample.sys) for sample in cpu_stats], proc_tree) | ||
227 | |||
228 | curr_y = curr_y + 30 + bar_h | ||
229 | |||
230 | # render second chart | ||
231 | draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s) | ||
232 | draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s) | ||
233 | |||
234 | # render I/O utilization | ||
235 | chart_rect = (off_x, curr_y+30, w, bar_h) | ||
236 | draw_box_ticks(ctx, chart_rect, sec_w) | ||
237 | draw_chart(ctx, IO_COLOR, True, chart_rect, [(sample.time, sample.util) for sample in disk_stats], proc_tree) | ||
238 | |||
239 | # render disk throughput | ||
240 | max_sample = max(disk_stats, key=lambda s: s.tput) | ||
241 | draw_chart(ctx, DISK_TPUT_COLOR, False, chart_rect, [(sample.time, sample.tput) for sample in disk_stats], proc_tree) | ||
242 | |||
243 | pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration) | ||
244 | |||
245 | shift_x, shift_y = -20, 20 | ||
246 | if (pos_x < off_x + 245): | ||
247 | shift_x, shift_y = 5, 40 | ||
248 | |||
249 | label = "%dMB/s" % round((max_sample.tput) / 1024.0) | ||
250 | draw_text(ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y) | ||
251 | |||
252 | |||
253 | # draw process boxes | ||
254 | draw_process_bar_chart(ctx, proc_tree, curr_y + bar_h, w, h) | ||
255 | |||
256 | ctx.set_font_size(SIG_FONT_SIZE) | ||
257 | draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, h - off_y - 5) | ||
258 | |||
259 | def draw_process_bar_chart(ctx, proc_tree, curr_y, w, h): | ||
260 | draw_legend_box(ctx, "Running (%cpu)", PROC_COLOR_R, off_x , curr_y + 45, leg_s) | ||
261 | draw_legend_box(ctx, "Unint.sleep (I/O)", PROC_COLOR_D, off_x+120, curr_y + 45, leg_s) | ||
262 | draw_legend_box(ctx, "Sleeping", PROC_COLOR_S, off_x+240, curr_y + 45, leg_s) | ||
263 | draw_legend_box(ctx, "Zombie", PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s) | ||
264 | |||
265 | chart_rect = [off_x, curr_y+60, w, h - 2 * off_y - (curr_y+60) + proc_h] | ||
266 | ctx.set_font_size(PROC_TEXT_FONT_SIZE) | ||
267 | |||
268 | draw_box_ticks(ctx, chart_rect, sec_w) | ||
269 | draw_5sec_labels(ctx, chart_rect, sec_w) | ||
270 | |||
271 | y = curr_y+60 | ||
272 | for root in proc_tree.process_tree: | ||
273 | draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect) | ||
274 | y = y + proc_h * proc_tree.num_nodes([root]) | ||
275 | |||
276 | |||
277 | def draw_header(ctx, headers, off_x, duration): | ||
278 | dur = duration / 100.0 | ||
279 | toshow = [ | ||
280 | ('system.uname', 'uname', lambda s: s), | ||
281 | ('system.release', 'release', lambda s: s), | ||
282 | ('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)), | ||
283 | ('system.kernel.options', 'kernel options', lambda s: s), | ||
284 | ('pseudo.header', 'time', lambda s: '%02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))) | ||
285 | ] | ||
286 | |||
287 | header_y = ctx.font_extents()[2] + 10 | ||
288 | ctx.set_font_size(TITLE_FONT_SIZE) | ||
289 | draw_text(ctx, headers['title'], TEXT_COLOR, off_x, header_y) | ||
290 | ctx.set_font_size(TEXT_FONT_SIZE) | ||
291 | |||
292 | for (headerkey, headertitle, mangle) in toshow: | ||
293 | header_y += ctx.font_extents()[2] | ||
294 | txt = headertitle + ': ' + mangle(headers.get(headerkey)) | ||
295 | draw_text(ctx, txt, TEXT_COLOR, off_x, header_y) | ||
296 | |||
297 | return header_y | ||
298 | |||
299 | def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect) : | ||
300 | x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration) | ||
301 | w = ((proc.duration) * rect[2] / proc_tree.duration) | ||
302 | |||
303 | draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect) | ||
304 | draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h)) | ||
305 | draw_label_in_box(ctx, PROC_TEXT_COLOR, proc.cmd, x, y + proc_h - 4, w, rect[0] + rect[2]) | ||
306 | |||
307 | next_y = y + proc_h | ||
308 | for child in proc.child_list: | ||
309 | child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect) | ||
310 | draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h) | ||
311 | next_y = next_y + proc_h * proc_tree.num_nodes([child]) | ||
312 | |||
313 | return x, y | ||
314 | |||
315 | |||
316 | def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect): | ||
317 | draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h)) | ||
318 | |||
319 | last_tx = -1 | ||
320 | for sample in proc.samples : | ||
321 | tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration)) | ||
322 | tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration)) | ||
323 | if last_tx != -1 and abs(last_tx - tx) <= tw: | ||
324 | tw -= last_tx - tx | ||
325 | tx = last_tx | ||
326 | |||
327 | last_tx = tx + tw | ||
328 | state = get_proc_state( sample.state ) | ||
329 | |||
330 | color = STATE_COLORS[state] | ||
331 | if state == STATE_RUNNING: | ||
332 | alpha = sample.cpu_sample.user + sample.cpu_sample.sys | ||
333 | color = tuple(list(PROC_COLOR_R[0:3]) + [alpha]) | ||
334 | elif state == STATE_SLEEPING: | ||
335 | continue | ||
336 | |||
337 | draw_fill_rect(ctx, color, (tx, y, tw, proc_h)) | ||
338 | |||
339 | |||
340 | def draw_process_connecting_lines(ctx, px, py, x, y, proc_h): | ||
341 | ctx.set_source_rgba(*DEP_COLOR) | ||
342 | ctx.set_dash([2,2]) | ||
343 | if abs(px - x) < 3: | ||
344 | dep_off_x = 3 | ||
345 | dep_off_y = proc_h / 4 | ||
346 | ctx.move_to(x, y + proc_h / 2) | ||
347 | ctx.line_to(px - dep_off_x, y + proc_h / 2) | ||
348 | ctx.line_to(px - dep_off_x, py - dep_off_y) | ||
349 | ctx.line_to(px, py - dep_off_y) | ||
350 | else: | ||
351 | ctx.move_to(x, y + proc_h / 2) | ||
352 | ctx.line_to(px, y + proc_h / 2) | ||
353 | ctx.line_to(px, py) | ||
354 | ctx.stroke() | ||
355 | ctx.set_dash([]) | ||
diff --git a/scripts/pybootchartgui/pybootchartgui/gui.py b/scripts/pybootchartgui/pybootchartgui/gui.py new file mode 100644 index 0000000000..87081e30eb --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/gui.py | |||
@@ -0,0 +1,273 @@ | |||
1 | import gobject | ||
2 | import gtk | ||
3 | import gtk.gdk | ||
4 | import gtk.keysyms | ||
5 | |||
6 | import draw | ||
7 | |||
8 | class PyBootchartWidget(gtk.DrawingArea): | ||
9 | __gsignals__ = { | ||
10 | 'expose-event': 'override', | ||
11 | 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)), | ||
12 | 'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)), | ||
13 | 'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment)) | ||
14 | } | ||
15 | |||
16 | def __init__(self, res): | ||
17 | gtk.DrawingArea.__init__(self) | ||
18 | |||
19 | self.res = res | ||
20 | |||
21 | self.set_flags(gtk.CAN_FOCUS) | ||
22 | |||
23 | self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK) | ||
24 | self.connect("button-press-event", self.on_area_button_press) | ||
25 | self.connect("button-release-event", self.on_area_button_release) | ||
26 | self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK) | ||
27 | self.connect("motion-notify-event", self.on_area_motion_notify) | ||
28 | self.connect("scroll-event", self.on_area_scroll_event) | ||
29 | self.connect('key-press-event', self.on_key_press_event) | ||
30 | |||
31 | self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments) | ||
32 | self.connect("size-allocate", self.on_allocation_size_changed) | ||
33 | self.connect("position-changed", self.on_position_changed) | ||
34 | |||
35 | self.zoom_ratio = 1.0 | ||
36 | self.x, self.y = 0.0, 0.0 | ||
37 | |||
38 | self.chart_width, self.chart_height = draw.extents(*res) | ||
39 | self.hadj = None | ||
40 | self.vadj = None | ||
41 | |||
42 | def do_expose_event(self, event): | ||
43 | cr = self.window.cairo_create() | ||
44 | |||
45 | # set a clip region for the expose event | ||
46 | cr.rectangle( | ||
47 | event.area.x, event.area.y, | ||
48 | event.area.width, event.area.height | ||
49 | ) | ||
50 | cr.clip() | ||
51 | self.draw(cr, self.get_allocation()) | ||
52 | return False | ||
53 | |||
54 | def draw(self, cr, rect): | ||
55 | cr.set_source_rgba(1.0, 1.0, 1.0, 1.0) | ||
56 | cr.paint() | ||
57 | cr.scale(self.zoom_ratio, self.zoom_ratio) | ||
58 | cr.translate(-self.x, -self.y) | ||
59 | draw.render(cr, *self.res) | ||
60 | |||
61 | def position_changed(self): | ||
62 | self.emit("position-changed", self.x, self.y) | ||
63 | |||
64 | ZOOM_INCREMENT = 1.25 | ||
65 | |||
66 | def zoom_image(self, zoom_ratio): | ||
67 | self.zoom_ratio = zoom_ratio | ||
68 | self._set_scroll_adjustments(self.hadj, self.vadj) | ||
69 | self.queue_draw() | ||
70 | |||
71 | def zoom_to_rect(self, rect): | ||
72 | zoom_ratio = float(rect.width)/float(self.chart_width) | ||
73 | self.zoom_image(zoom_ratio) | ||
74 | self.x = 0 | ||
75 | self.position_changed() | ||
76 | |||
77 | def on_zoom_in(self, action): | ||
78 | self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT) | ||
79 | |||
80 | def on_zoom_out(self, action): | ||
81 | self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT) | ||
82 | |||
83 | def on_zoom_fit(self, action): | ||
84 | self.zoom_to_rect(self.get_allocation()) | ||
85 | |||
86 | def on_zoom_100(self, action): | ||
87 | self.zoom_image(1.0) | ||
88 | |||
89 | POS_INCREMENT = 100 | ||
90 | |||
91 | def on_key_press_event(self, widget, event): | ||
92 | if event.keyval == gtk.keysyms.Left: | ||
93 | self.x -= self.POS_INCREMENT/self.zoom_ratio | ||
94 | elif event.keyval == gtk.keysyms.Right: | ||
95 | self.x += self.POS_INCREMENT/self.zoom_ratio | ||
96 | elif event.keyval == gtk.keysyms.Up: | ||
97 | self.y -= self.POS_INCREMENT/self.zoom_ratio | ||
98 | elif event.keyval == gtk.keysyms.Down: | ||
99 | self.y += self.POS_INCREMENT/self.zoom_ratio | ||
100 | elif event.keyval == gtk.keysyms.Page_Up: | ||
101 | self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT) | ||
102 | elif event.keyval == gtk.keysyms.Page_Down: | ||
103 | self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT) | ||
104 | else: | ||
105 | return False | ||
106 | self.queue_draw() | ||
107 | self.position_changed() | ||
108 | return True | ||
109 | |||
110 | def on_area_button_press(self, area, event): | ||
111 | if event.button == 2 or event.button == 1: | ||
112 | area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR)) | ||
113 | self.prevmousex = event.x | ||
114 | self.prevmousey = event.y | ||
115 | if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE): | ||
116 | return False | ||
117 | return False | ||
118 | |||
119 | def on_area_button_release(self, area, event): | ||
120 | if event.button == 2 or event.button == 1: | ||
121 | area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW)) | ||
122 | self.prevmousex = None | ||
123 | self.prevmousey = None | ||
124 | return True | ||
125 | return False | ||
126 | |||
127 | def on_area_scroll_event(self, area, event): | ||
128 | if event.direction == gtk.gdk.SCROLL_UP: | ||
129 | self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT) | ||
130 | return True | ||
131 | if event.direction == gtk.gdk.SCROLL_DOWN: | ||
132 | self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT) | ||
133 | return True | ||
134 | return False | ||
135 | |||
136 | def on_area_motion_notify(self, area, event): | ||
137 | state = event.state | ||
138 | if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK: | ||
139 | x, y = int(event.x), int(event.y) | ||
140 | # pan the image | ||
141 | self.x += (self.prevmousex - x)/self.zoom_ratio | ||
142 | self.y += (self.prevmousey - y)/self.zoom_ratio | ||
143 | self.queue_draw() | ||
144 | self.prevmousex = x | ||
145 | self.prevmousey = y | ||
146 | self.position_changed() | ||
147 | return True | ||
148 | |||
149 | def on_set_scroll_adjustments(self, area, hadj, vadj): | ||
150 | self._set_scroll_adjustments(hadj, vadj) | ||
151 | |||
152 | def on_allocation_size_changed(self, widget, allocation): | ||
153 | self.hadj.page_size = allocation.width | ||
154 | self.hadj.page_increment = allocation.width * 0.9 | ||
155 | self.vadj.page_size = allocation.height | ||
156 | self.vadj.page_increment = allocation.height * 0.9 | ||
157 | |||
158 | def _set_scroll_adjustments(self, hadj, vadj): | ||
159 | if hadj == None: | ||
160 | hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) | ||
161 | if vadj == None: | ||
162 | vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) | ||
163 | |||
164 | if self.hadj != None and hadj != self.hadj: | ||
165 | self.hadj.disconnect(self.hadj_changed_signal_id) | ||
166 | if self.vadj != None and vadj != self.vadj: | ||
167 | self.vadj.disconnect(self.vadj_changed_signal_id) | ||
168 | |||
169 | if hadj != None: | ||
170 | self.hadj = hadj | ||
171 | self._set_adj_upper(self.hadj, self.zoom_ratio * self.chart_width) | ||
172 | self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed) | ||
173 | |||
174 | if vadj != None: | ||
175 | self.vadj = vadj | ||
176 | self._set_adj_upper(self.vadj, self.zoom_ratio * self.chart_height) | ||
177 | self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed) | ||
178 | |||
179 | def _set_adj_upper(self, adj, upper): | ||
180 | changed = False | ||
181 | value_changed = False | ||
182 | |||
183 | if adj.upper != upper: | ||
184 | adj.upper = upper | ||
185 | changed = True | ||
186 | |||
187 | max_value = max(0.0, upper - adj.page_size) | ||
188 | if adj.value > max_value: | ||
189 | adj.value = max_value | ||
190 | value_changed = True | ||
191 | |||
192 | if changed: | ||
193 | adj.changed() | ||
194 | if value_changed: | ||
195 | adj.value_changed() | ||
196 | |||
197 | def on_adjustments_changed(self, adj): | ||
198 | self.x = self.hadj.value / self.zoom_ratio | ||
199 | self.y = self.vadj.value / self.zoom_ratio | ||
200 | self.queue_draw() | ||
201 | |||
202 | def on_position_changed(self, widget, x, y): | ||
203 | self.hadj.value = x * self.zoom_ratio | ||
204 | self.vadj.value = y * self.zoom_ratio | ||
205 | |||
206 | PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments') | ||
207 | |||
208 | class PyBootchartWindow(gtk.Window): | ||
209 | |||
210 | ui = ''' | ||
211 | <ui> | ||
212 | <toolbar name="ToolBar"> | ||
213 | <toolitem action="ZoomIn"/> | ||
214 | <toolitem action="ZoomOut"/> | ||
215 | <toolitem action="ZoomFit"/> | ||
216 | <toolitem action="Zoom100"/> | ||
217 | </toolbar> | ||
218 | </ui> | ||
219 | ''' | ||
220 | |||
221 | def __init__(self, res): | ||
222 | gtk.Window.__init__(self) | ||
223 | |||
224 | window = self | ||
225 | window.set_title('Bootchart') | ||
226 | window.set_default_size(512, 512) | ||
227 | vbox = gtk.VBox() | ||
228 | window.add(vbox) | ||
229 | |||
230 | self.widget = PyBootchartWidget(res) | ||
231 | |||
232 | # Create a UIManager instance | ||
233 | uimanager = self.uimanager = gtk.UIManager() | ||
234 | |||
235 | # Add the accelerator group to the toplevel window | ||
236 | accelgroup = uimanager.get_accel_group() | ||
237 | window.add_accel_group(accelgroup) | ||
238 | |||
239 | # Create an ActionGroup | ||
240 | actiongroup = gtk.ActionGroup('Actions') | ||
241 | self.actiongroup = actiongroup | ||
242 | |||
243 | # Create actions | ||
244 | actiongroup.add_actions(( | ||
245 | ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in), | ||
246 | ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out), | ||
247 | ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit), | ||
248 | ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100), | ||
249 | )) | ||
250 | |||
251 | # Add the actiongroup to the uimanager | ||
252 | uimanager.insert_action_group(actiongroup, 0) | ||
253 | |||
254 | # Add a UI description | ||
255 | uimanager.add_ui_from_string(self.ui) | ||
256 | |||
257 | # Scrolled window | ||
258 | scrolled = gtk.ScrolledWindow() | ||
259 | scrolled.add(self.widget) | ||
260 | |||
261 | # Create a Toolbar | ||
262 | toolbar = uimanager.get_widget('/ToolBar') | ||
263 | vbox.pack_start(toolbar, False) | ||
264 | vbox.pack_start(scrolled) | ||
265 | |||
266 | self.set_focus(self.widget) | ||
267 | |||
268 | self.show_all() | ||
269 | |||
270 | def show(res): | ||
271 | win = PyBootchartWindow(res) | ||
272 | win.connect('destroy', gtk.main_quit) | ||
273 | gtk.main() | ||
diff --git a/scripts/pybootchartgui/pybootchartgui/main.py b/scripts/pybootchartgui/pybootchartgui/main.py new file mode 100644 index 0000000000..bf50afb6c3 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/main.py | |||
@@ -0,0 +1,71 @@ | |||
1 | import sys | ||
2 | import os | ||
3 | import optparse | ||
4 | |||
5 | import parsing | ||
6 | import gui | ||
7 | import batch | ||
8 | |||
9 | def _mk_options_parser(): | ||
10 | """Make an options parser.""" | ||
11 | usage = "%prog [options] PATH, ..., PATH" | ||
12 | version = "%prog v0.0.0" | ||
13 | parser = optparse.OptionParser(usage, version=version) | ||
14 | parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False, | ||
15 | help="start in active mode") | ||
16 | parser.add_option("-f", "--format", dest="format", default = None, | ||
17 | help="image format (...); default format ...") | ||
18 | parser.add_option("-o", "--output", dest="output", metavar="PATH", default=None, | ||
19 | help="output path (file or directory) where charts are stored") | ||
20 | parser.add_option("-n", "--no-prune", action="store_false", dest="prune", default=True, | ||
21 | help="do not prune the process tree") | ||
22 | parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, | ||
23 | help="suppress informational messages") | ||
24 | parser.add_option("--very-quiet", action="store_true", dest="veryquiet", default=False, | ||
25 | help="suppress all messages except errors") | ||
26 | parser.add_option("--verbose", action="store_true", dest="verbose", default=False, | ||
27 | help="print all messages") | ||
28 | return parser | ||
29 | |||
30 | def _get_filename(paths, options): | ||
31 | """Construct a usable filename for outputs based on the paths and options given on the commandline.""" | ||
32 | dir = "" | ||
33 | file = "bootchart" | ||
34 | if options.output != None and not(os.path.isdir(options.output)): | ||
35 | return options.output | ||
36 | if options.output != None: | ||
37 | dir = options.output | ||
38 | if len(paths) == 1: | ||
39 | if os.path.isdir(paths[0]): | ||
40 | file = os.path.split(paths[0])[-1] | ||
41 | elif os.path.splitext(paths[0])[1] in [".tar", ".tgz", ".tar.gz"]: | ||
42 | file = os.path.splitext(paths[0])[0] | ||
43 | return os.path.join(dir, file + "." + options.format) | ||
44 | |||
45 | def main(argv=None): | ||
46 | try: | ||
47 | if argv is None: | ||
48 | argv = sys.argv[1:] | ||
49 | |||
50 | parser = _mk_options_parser() | ||
51 | options, args = parser.parse_args(argv) | ||
52 | |||
53 | if len(args) == 0: | ||
54 | parser.error("insufficient arguments, expected at least one path.") | ||
55 | return 2 | ||
56 | |||
57 | res = parsing.parse(args, options.prune) | ||
58 | if options.interactive or options.format == None: | ||
59 | gui.show(res) | ||
60 | else: | ||
61 | filename = _get_filename(args, options) | ||
62 | batch.render(res, options.format, filename) | ||
63 | print "bootchart written to", filename | ||
64 | return 0 | ||
65 | except parsing.ParseError, ex: | ||
66 | print("Parse error: %s" % ex) | ||
67 | return 2 | ||
68 | |||
69 | |||
70 | if __name__ == '__main__': | ||
71 | sys.exit(main()) | ||
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py new file mode 100644 index 0000000000..a350a3eb08 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/parsing.py | |||
@@ -0,0 +1,223 @@ | |||
1 | from __future__ import with_statement | ||
2 | |||
3 | import os | ||
4 | import string | ||
5 | import re | ||
6 | import tarfile | ||
7 | from collections import defaultdict | ||
8 | |||
9 | from samples import * | ||
10 | from process_tree import ProcessTree | ||
11 | |||
12 | class ParseError(Exception): | ||
13 | """Represents errors during parse of the bootchart.""" | ||
14 | def __init__(self, value): | ||
15 | self.value = value | ||
16 | |||
17 | def __str__(self): | ||
18 | return self.value | ||
19 | |||
20 | def _parse_headers(file): | ||
21 | """Parses the headers of the bootchart.""" | ||
22 | def parse((headers,last), line): | ||
23 | if '=' in line: last,value = map(string.strip, line.split('=', 1)) | ||
24 | else: value = line.strip() | ||
25 | headers[last] += value | ||
26 | return headers,last | ||
27 | return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0] | ||
28 | |||
29 | def _parse_timed_blocks(file): | ||
30 | """Parses (ie., splits) a file into so-called timed-blocks. A | ||
31 | timed-block consists of a timestamp on a line by itself followed | ||
32 | by zero or more lines of data for that point in time.""" | ||
33 | def parse(block): | ||
34 | lines = block.split('\n') | ||
35 | if not lines: | ||
36 | raise ParseError('expected a timed-block consisting a timestamp followed by data lines') | ||
37 | try: | ||
38 | return (int(lines[0]), lines[1:]) | ||
39 | except ValueError: | ||
40 | raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0]) | ||
41 | blocks = file.read().split('\n\n') | ||
42 | return [parse(block) for block in blocks if block.strip()] | ||
43 | |||
44 | def _parse_proc_ps_log(file): | ||
45 | """ | ||
46 | * See proc(5) for details. | ||
47 | * | ||
48 | * {pid, comm, state, ppid, pgrp, session, tty_nr, tpgid, flags, minflt, cminflt, majflt, cmajflt, utime, stime, | ||
49 | * cutime, cstime, priority, nice, 0, itrealvalue, starttime, vsize, rss, rlim, startcode, endcode, startstack, | ||
50 | * kstkesp, kstkeip} | ||
51 | """ | ||
52 | processMap = {} | ||
53 | ltime = 0 | ||
54 | timed_blocks = _parse_timed_blocks(file) | ||
55 | for time, lines in timed_blocks: | ||
56 | for line in lines: | ||
57 | tokens = line.split(' ') | ||
58 | |||
59 | offset = [index for index, token in enumerate(tokens[1:]) if token.endswith(')')][0] | ||
60 | pid, cmd, state, ppid = int(tokens[0]), ' '.join(tokens[1:2+offset]), tokens[2+offset], int(tokens[3+offset]) | ||
61 | userCpu, sysCpu, stime= int(tokens[13+offset]), int(tokens[14+offset]), int(tokens[21+offset]) | ||
62 | |||
63 | if processMap.has_key(pid): | ||
64 | process = processMap[pid] | ||
65 | process.cmd = cmd.replace('(', '').replace(')', '') # why rename after latest name?? | ||
66 | else: | ||
67 | process = Process(pid, cmd, ppid, min(time, stime)) | ||
68 | processMap[pid] = process | ||
69 | |||
70 | if process.last_user_cpu_time is not None and process.last_sys_cpu_time is not None and ltime is not None: | ||
71 | userCpuLoad, sysCpuLoad = process.calc_load(userCpu, sysCpu, time - ltime) | ||
72 | cpuSample = CPUSample('null', userCpuLoad, sysCpuLoad, 0.0) | ||
73 | process.samples.append(ProcessSample(time, state, cpuSample)) | ||
74 | |||
75 | process.last_user_cpu_time = userCpu | ||
76 | process.last_sys_cpu_time = sysCpu | ||
77 | ltime = time | ||
78 | |||
79 | startTime = timed_blocks[0][0] | ||
80 | avgSampleLength = (ltime - startTime)/(len(timed_blocks)-1) | ||
81 | |||
82 | for process in processMap.values(): | ||
83 | process.set_parent(processMap) | ||
84 | |||
85 | for process in processMap.values(): | ||
86 | process.calc_stats(avgSampleLength) | ||
87 | |||
88 | return ProcessStats(processMap.values(), avgSampleLength, startTime, ltime) | ||
89 | |||
90 | def _parse_proc_stat_log(file): | ||
91 | samples = [] | ||
92 | ltimes = None | ||
93 | for time, lines in _parse_timed_blocks(file): | ||
94 | # CPU times {user, nice, system, idle, io_wait, irq, softirq} | ||
95 | tokens = lines[0].split(); | ||
96 | times = [ int(token) for token in tokens[1:] ] | ||
97 | if ltimes: | ||
98 | user = float((times[0] + times[1]) - (ltimes[0] + ltimes[1])) | ||
99 | system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6])) | ||
100 | idle = float(times[3] - ltimes[3]) | ||
101 | iowait = float(times[4] - ltimes[4]) | ||
102 | |||
103 | aSum = max(user + system + idle + iowait, 1) | ||
104 | samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) ) | ||
105 | |||
106 | ltimes = times | ||
107 | # skip the rest of statistics lines | ||
108 | return samples | ||
109 | |||
110 | |||
111 | def _parse_proc_disk_stat_log(file, numCpu): | ||
112 | """ | ||
113 | Parse file for disk stats, but only look at the whole disks, eg. sda, | ||
114 | not sda1, sda2 etc. The format of relevant lines should be: | ||
115 | {major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq} | ||
116 | """ | ||
117 | DISK_REGEX = 'hd.$|sd.$' | ||
118 | |||
119 | def is_relevant_line(line): | ||
120 | return len(line.split()) == 14 and re.match(DISK_REGEX, line.split()[2]) | ||
121 | |||
122 | disk_stat_samples = [] | ||
123 | |||
124 | for time, lines in _parse_timed_blocks(file): | ||
125 | sample = DiskStatSample(time) | ||
126 | relevant_tokens = [line.split() for line in lines if is_relevant_line(line)] | ||
127 | |||
128 | for tokens in relevant_tokens: | ||
129 | disk, rsect, wsect, use = tokens[2], int(tokens[5]), int(tokens[9]), int(tokens[12]) | ||
130 | sample.add_diskdata([rsect, wsect, use]) | ||
131 | |||
132 | disk_stat_samples.append(sample) | ||
133 | |||
134 | disk_stats = [] | ||
135 | for sample1, sample2 in zip(disk_stat_samples[:-1], disk_stat_samples[1:]): | ||
136 | interval = sample1.time - sample2.time | ||
137 | sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ] | ||
138 | readTput = sums[0] / 2.0 * 100.0 / interval | ||
139 | writeTput = sums[1] / 2.0 * 100.0 / interval | ||
140 | util = float( sums[2] ) / 10 / interval / numCpu | ||
141 | util = max(0.0, min(1.0, util)) | ||
142 | disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util)) | ||
143 | |||
144 | return disk_stats | ||
145 | |||
146 | |||
147 | def get_num_cpus(headers): | ||
148 | """Get the number of CPUs from the system.cpu header property. As the | ||
149 | CPU utilization graphs are relative, the number of CPUs currently makes | ||
150 | no difference.""" | ||
151 | if headers is None: | ||
152 | return 1 | ||
153 | cpu_model = headers.get("system.cpu") | ||
154 | if cpu_model is None: | ||
155 | return 1 | ||
156 | mat = re.match(".*\\((\\d+)\\)", cpu_model) | ||
157 | if mat is None: | ||
158 | return 1 | ||
159 | return int(mat.group(1)) | ||
160 | |||
161 | class ParserState: | ||
162 | def __init__(self): | ||
163 | self.headers = None | ||
164 | self.disk_stats = None | ||
165 | self.ps_stats = None | ||
166 | self.cpu_stats = None | ||
167 | |||
168 | def valid(self): | ||
169 | return self.headers != None and self.disk_stats != None and self.ps_stats != None and self.cpu_stats != None | ||
170 | |||
171 | |||
172 | _relevant_files = set(["header", "proc_diskstats.log", "proc_ps.log", "proc_stat.log"]) | ||
173 | |||
174 | def _do_parse(state, name, file): | ||
175 | if name == "header": | ||
176 | state.headers = _parse_headers(file) | ||
177 | elif name == "proc_diskstats.log": | ||
178 | state.disk_stats = _parse_proc_disk_stat_log(file, get_num_cpus(state.headers)) | ||
179 | elif name == "proc_ps.log": | ||
180 | state.ps_stats = _parse_proc_ps_log(file) | ||
181 | elif name == "proc_stat.log": | ||
182 | state.cpu_stats = _parse_proc_stat_log(file) | ||
183 | return state | ||
184 | |||
185 | def parse_file(state, filename): | ||
186 | basename = os.path.basename(filename) | ||
187 | if not(basename in _relevant_files): | ||
188 | return state | ||
189 | with open(filename, "rb") as file: | ||
190 | return _do_parse(state, basename, file) | ||
191 | |||
192 | def parse_paths(state, paths): | ||
193 | for path in paths: | ||
194 | root,extension = os.path.splitext(path) | ||
195 | if not(os.path.exists(path)): | ||
196 | print "warning: path '%s' does not exist, ignoring." % path | ||
197 | continue | ||
198 | if os.path.isdir(path): | ||
199 | files = [ f for f in [os.path.join(path, f) for f in os.listdir(path)] if os.path.isfile(f) ] | ||
200 | files.sort() | ||
201 | state = parse_paths(state, files) | ||
202 | elif extension in [".tar", ".tgz", ".tar.gz"]: | ||
203 | tf = None | ||
204 | try: | ||
205 | tf = tarfile.open(path, 'r:*') | ||
206 | for name in tf.getnames(): | ||
207 | state = _do_parse(state, name, tf.extractfile(name)) | ||
208 | except tarfile.ReadError, error: | ||
209 | raise ParseError("error: could not read tarfile '%s': %s." % (path, error)) | ||
210 | finally: | ||
211 | if tf != None: | ||
212 | tf.close() | ||
213 | else: | ||
214 | state = parse_file(state, path) | ||
215 | return state | ||
216 | |||
217 | def parse(paths, prune): | ||
218 | state = parse_paths(ParserState(), paths) | ||
219 | if not state.valid(): | ||
220 | raise ParseError("empty state: '%s' does not contain a valid bootchart" % ", ".join(paths)) | ||
221 | monitored_app = state.headers.get("profile.process") | ||
222 | proc_tree = ProcessTree(state.ps_stats, monitored_app, prune) | ||
223 | return (state.headers, state.cpu_stats, state.disk_stats, proc_tree) | ||
diff --git a/scripts/pybootchartgui/pybootchartgui/process_tree.py b/scripts/pybootchartgui/pybootchartgui/process_tree.py new file mode 100644 index 0000000000..bde29ebda8 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/process_tree.py | |||
@@ -0,0 +1,270 @@ | |||
1 | class ProcessTree: | ||
2 | """ProcessTree encapsulates a process tree. The tree is built from log files | ||
3 | retrieved during the boot process. When building the process tree, it is | ||
4 | pruned and merged in order to be able to visualize it in a comprehensible | ||
5 | manner. | ||
6 | |||
7 | The following pruning techniques are used: | ||
8 | |||
9 | * idle processes that keep running during the last process sample | ||
10 | (which is a heuristic for a background processes) are removed, | ||
11 | * short-lived processes (i.e. processes that only live for the | ||
12 | duration of two samples or less) are removed, | ||
13 | * the processes used by the boot logger are removed, | ||
14 | * exploders (i.e. processes that are known to spawn huge meaningless | ||
15 | process subtrees) have their subtrees merged together, | ||
16 | * siblings (i.e. processes with the same command line living | ||
17 | concurrently -- thread heuristic) are merged together, | ||
18 | * process runs (unary trees with processes sharing the command line) | ||
19 | are merged together. | ||
20 | |||
21 | """ | ||
22 | LOGGER_PROC = 'bootchartd' | ||
23 | EXPLODER_PROCESSES = set(['hwup']) | ||
24 | |||
25 | def __init__(self, psstats, monitoredApp, prune, for_testing = False): | ||
26 | self.process_tree = [] | ||
27 | self.psstats = psstats | ||
28 | self.process_list = sorted(psstats.process_list, key = lambda p: p.pid) | ||
29 | self.sample_period = psstats.sample_period | ||
30 | |||
31 | self.build() | ||
32 | self.update_ppids_for_daemons(self.process_list) | ||
33 | |||
34 | self.start_time = self.get_start_time(self.process_tree) | ||
35 | self.end_time = self.get_end_time(self.process_tree) | ||
36 | self.duration = self.end_time - self.start_time | ||
37 | |||
38 | if for_testing: | ||
39 | return | ||
40 | |||
41 | # print 'proc_tree before prune: num_proc=%i, duration=%i' % (self.num_nodes(self.process_list), self.duration) | ||
42 | |||
43 | removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False) | ||
44 | print "Merged %i logger processes" % removed | ||
45 | |||
46 | if prune: | ||
47 | removed = self.prune(self.process_tree, None) | ||
48 | print "Pruned %i processes" % removed | ||
49 | removed = self.merge_exploders(self.process_tree, self.EXPLODER_PROCESSES) | ||
50 | print "Pruned %i exploders" % removed | ||
51 | removed = self.merge_siblings(self.process_tree) | ||
52 | print "Pruned %i threads" % removed | ||
53 | removed = self.merge_runs(self.process_tree) | ||
54 | print "Pruned %i runs" % removed | ||
55 | |||
56 | self.sort(self.process_tree) | ||
57 | |||
58 | self.start_time = self.get_start_time(self.process_tree) | ||
59 | self.end_time = self.get_end_time(self.process_tree) | ||
60 | self.duration = self.end_time - self.start_time | ||
61 | |||
62 | self.num_proc = self.num_nodes(self.process_tree) | ||
63 | |||
64 | def build(self): | ||
65 | """Build the process tree from the list of top samples.""" | ||
66 | self.process_tree = [] | ||
67 | for proc in self.process_list: | ||
68 | if not proc.parent: | ||
69 | self.process_tree.append(proc) | ||
70 | else: | ||
71 | proc.parent.child_list.append(proc) | ||
72 | |||
73 | def sort(self, process_subtree): | ||
74 | """Sort process tree.""" | ||
75 | for p in process_subtree: | ||
76 | p.child_list.sort(key = lambda p: p.pid) | ||
77 | self.sort(p.child_list) | ||
78 | |||
79 | def num_nodes(self, process_list): | ||
80 | "Counts the number of nodes in the specified process tree.""" | ||
81 | nodes = 0 | ||
82 | for proc in process_list: | ||
83 | nodes = nodes + self.num_nodes(proc.child_list) | ||
84 | return nodes + len(process_list) | ||
85 | |||
86 | def get_start_time(self, process_subtree): | ||
87 | """Returns the start time of the process subtree. This is the start | ||
88 | time of the earliest process. | ||
89 | |||
90 | """ | ||
91 | if not process_subtree: | ||
92 | return 100000000; | ||
93 | return min( [min(proc.start_time, self.get_start_time(proc.child_list)) for proc in process_subtree] ) | ||
94 | |||
95 | def get_end_time(self, process_subtree): | ||
96 | """Returns the end time of the process subtree. This is the end time | ||
97 | of the last collected sample. | ||
98 | |||
99 | """ | ||
100 | if not process_subtree: | ||
101 | return -100000000; | ||
102 | return max( [max(proc.start_time + proc.duration, self.get_end_time(proc.child_list)) for proc in process_subtree] ) | ||
103 | |||
104 | def get_max_pid(self, process_subtree): | ||
105 | """Returns the max PID found in the process tree.""" | ||
106 | if not process_subtree: | ||
107 | return -100000000; | ||
108 | return max( [max(proc.pid, self.get_max_pid(proc.child_list)) for proc in process_subtree] ) | ||
109 | |||
110 | def update_ppids_for_daemons(self, process_list): | ||
111 | """Fedora hack: when loading the system services from rc, runuser(1) | ||
112 | is used. This sets the PPID of all daemons to 1, skewing | ||
113 | the process tree. Try to detect this and set the PPID of | ||
114 | these processes the PID of rc. | ||
115 | |||
116 | """ | ||
117 | rcstartpid = -1 | ||
118 | rcendpid = -1 | ||
119 | rcproc = None | ||
120 | for p in process_list: | ||
121 | if p.cmd == "rc" and p.ppid == 1: | ||
122 | rcproc = p | ||
123 | rcstartpid = p.pid | ||
124 | rcendpid = self.get_max_pid(p.child_list) | ||
125 | if rcstartpid != -1 and rcendpid != -1: | ||
126 | for p in process_list: | ||
127 | if p.pid > rcstartpid and p.pid < rcendpid and p.ppid == 1: | ||
128 | p.ppid = rcstartpid | ||
129 | p.parent = rcproc | ||
130 | for p in process_list: | ||
131 | p.child_list = [] | ||
132 | self.build() | ||
133 | |||
134 | def prune(self, process_subtree, parent): | ||
135 | """Prunes the process tree by removing idle processes and processes | ||
136 | that only live for the duration of a single top sample. Sibling | ||
137 | processes with the same command line (i.e. threads) are merged | ||
138 | together. This filters out sleepy background processes, short-lived | ||
139 | processes and bootcharts' analysis tools. | ||
140 | """ | ||
141 | def is_idle_background_process_without_children(p): | ||
142 | process_end = p.start_time + p.duration | ||
143 | return not p.active and \ | ||
144 | process_end >= self.start_time + self.duration and \ | ||
145 | p.start_time > self.start_time and \ | ||
146 | p.duration > 0.9 * self.duration and \ | ||
147 | self.num_nodes(p.child_list) == 0 | ||
148 | |||
149 | num_removed = 0 | ||
150 | idx = 0 | ||
151 | while idx < len(process_subtree): | ||
152 | p = process_subtree[idx] | ||
153 | if parent != None or len(p.child_list) == 0: | ||
154 | |||
155 | prune = False | ||
156 | if is_idle_background_process_without_children(p): | ||
157 | prune = True | ||
158 | elif p.duration <= 2 * self.sample_period: | ||
159 | # short-lived process | ||
160 | prune = True | ||
161 | |||
162 | if prune: | ||
163 | process_subtree.pop(idx) | ||
164 | for c in p.child_list: | ||
165 | process_subtree.insert(idx, c) | ||
166 | num_removed += 1 | ||
167 | continue | ||
168 | else: | ||
169 | num_removed += self.prune(p.child_list, p) | ||
170 | else: | ||
171 | num_removed += self.prune(p.child_list, p) | ||
172 | idx += 1 | ||
173 | |||
174 | return num_removed | ||
175 | |||
176 | def merge_logger(self, process_subtree, logger_proc, monitored_app, app_tree): | ||
177 | """Merges the logger's process subtree. The logger will typically | ||
178 | spawn lots of sleep and cat processes, thus polluting the | ||
179 | process tree. | ||
180 | |||
181 | """ | ||
182 | num_removed = 0 | ||
183 | for p in process_subtree: | ||
184 | is_app_tree = app_tree | ||
185 | if logger_proc == p.cmd and not app_tree: | ||
186 | is_app_tree = True | ||
187 | num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree) | ||
188 | # don't remove the logger itself | ||
189 | continue | ||
190 | |||
191 | if app_tree and monitored_app != None and monitored_app == p.cmd: | ||
192 | is_app_tree = False | ||
193 | |||
194 | if is_app_tree: | ||
195 | for child in p.child_list: | ||
196 | self.__merge_processes(p, child) | ||
197 | num_removed += 1 | ||
198 | p.child_list = [] | ||
199 | else: | ||
200 | num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree) | ||
201 | return num_removed | ||
202 | |||
203 | def merge_exploders(self, process_subtree, processes): | ||
204 | """Merges specific process subtrees (used for processes which usually | ||
205 | spawn huge meaningless process trees). | ||
206 | |||
207 | """ | ||
208 | num_removed = 0 | ||
209 | for p in process_subtree: | ||
210 | if processes in processes and len(p.child_list) > 0: | ||
211 | subtreemap = self.getProcessMap(p.child_list) | ||
212 | for child in subtreemap.values(): | ||
213 | self.__merge_processes(p, child) | ||
214 | num_removed += len(subtreemap) | ||
215 | p.child_list = [] | ||
216 | p.cmd += " (+)" | ||
217 | else: | ||
218 | num_removed += self.merge_exploders(p.child_list, processes) | ||
219 | return num_removed | ||
220 | |||
221 | def merge_siblings(self,process_subtree): | ||
222 | """Merges thread processes. Sibling processes with the same command | ||
223 | line are merged together. | ||
224 | |||
225 | """ | ||
226 | num_removed = 0 | ||
227 | idx = 0 | ||
228 | while idx < len(process_subtree)-1: | ||
229 | p = process_subtree[idx] | ||
230 | nextp = process_subtree[idx+1] | ||
231 | if nextp.cmd == p.cmd: | ||
232 | process_subtree.pop(idx+1) | ||
233 | idx -= 1 | ||
234 | num_removed += 1 | ||
235 | p.child_list.extend(nextp.child_list) | ||
236 | self.__merge_processes(p, nextp) | ||
237 | num_removed += self.merge_siblings(p.child_list) | ||
238 | idx += 1 | ||
239 | if len(process_subtree) > 0: | ||
240 | p = process_subtree[-1] | ||
241 | num_removed += self.merge_siblings(p.child_list) | ||
242 | return num_removed | ||
243 | |||
244 | def merge_runs(self, process_subtree): | ||
245 | """Merges process runs. Single child processes which share the same | ||
246 | command line with the parent are merged. | ||
247 | |||
248 | """ | ||
249 | num_removed = 0 | ||
250 | idx = 0 | ||
251 | while idx < len(process_subtree): | ||
252 | p = process_subtree[idx] | ||
253 | if len(p.child_list) == 1 and p.child_list[0].cmd == p.cmd: | ||
254 | child = p.child_list[0] | ||
255 | p.child_list = list(child.child_list) | ||
256 | self.__merge_processes(p, child) | ||
257 | num_removed += 1 | ||
258 | continue | ||
259 | num_removed += self.merge_runs(p.child_list) | ||
260 | idx += 1 | ||
261 | return num_removed | ||
262 | |||
263 | def __merge_processes(self, p1, p2): | ||
264 | """Merges two process samples.""" | ||
265 | p1.samples.extend(p2.samples) | ||
266 | p1time = p1.start_time | ||
267 | p2time = p2.start_time | ||
268 | p1.start_time = min(p1time, p2time) | ||
269 | pendtime = max(p1time + p1.duration, p2time + p2.duration) | ||
270 | p1.duration = pendtime - p1.start_time | ||
diff --git a/scripts/pybootchartgui/pybootchartgui/samples.py b/scripts/pybootchartgui/pybootchartgui/samples.py new file mode 100644 index 0000000000..c94b30d032 --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/samples.py | |||
@@ -0,0 +1,93 @@ | |||
1 | class DiskStatSample: | ||
2 | def __init__(self, time): | ||
3 | self.time = time | ||
4 | self.diskdata = [0, 0, 0] | ||
5 | def add_diskdata(self, new_diskdata): | ||
6 | self.diskdata = [ a + b for a, b in zip(self.diskdata, new_diskdata) ] | ||
7 | |||
8 | class CPUSample: | ||
9 | def __init__(self, time, user, sys, io): | ||
10 | self.time = time | ||
11 | self.user = user | ||
12 | self.sys = sys | ||
13 | self.io = io | ||
14 | |||
15 | def __str__(self): | ||
16 | return str(self.time) + "\t" + str(self.user) + "\t" + str(self.sys) + "\t" + str(self.io); | ||
17 | |||
18 | class ProcessSample: | ||
19 | def __init__(self, time, state, cpu_sample): | ||
20 | self.time = time | ||
21 | self.state = state | ||
22 | self.cpu_sample = cpu_sample | ||
23 | |||
24 | def __str__(self): | ||
25 | return str(self.time) + "\t" + str(self.state) + "\t" + str(self.cpu_sample); | ||
26 | |||
27 | class ProcessStats: | ||
28 | def __init__(self, process_list, sample_period, start_time, end_time): | ||
29 | self.process_list = process_list | ||
30 | self.sample_period = sample_period | ||
31 | self.start_time = start_time | ||
32 | self.end_time = end_time | ||
33 | |||
34 | class Process: | ||
35 | def __init__(self, pid, cmd, ppid, start_time): | ||
36 | self.pid = pid | ||
37 | self.cmd = cmd.strip('(').strip(')') | ||
38 | self.ppid = ppid | ||
39 | self.start_time = start_time | ||
40 | self.samples = [] | ||
41 | self.parent = None | ||
42 | self.child_list = [] | ||
43 | |||
44 | self.duration = 0 | ||
45 | self.active = None | ||
46 | |||
47 | self.last_user_cpu_time = None | ||
48 | self.last_sys_cpu_time = None | ||
49 | |||
50 | def __str__(self): | ||
51 | return " ".join([str(self.pid), self.cmd, str(self.ppid), '[ ' + str(len(self.samples)) + ' samples ]' ]) | ||
52 | |||
53 | def calc_stats(self, samplePeriod): | ||
54 | if self.samples: | ||
55 | firstSample = self.samples[0] | ||
56 | lastSample = self.samples[-1] | ||
57 | self.start_time = min(firstSample.time, self.start_time) | ||
58 | self.duration = lastSample.time - self.start_time + samplePeriod | ||
59 | |||
60 | activeCount = sum( [1 for sample in self.samples if sample.cpu_sample and sample.cpu_sample.sys + sample.cpu_sample.user + sample.cpu_sample.io > 0.0] ) | ||
61 | activeCount = activeCount + sum( [1 for sample in self.samples if sample.state == 'D'] ) | ||
62 | self.active = (activeCount>2) | ||
63 | |||
64 | def calc_load(self, userCpu, sysCpu, interval): | ||
65 | |||
66 | userCpuLoad = float(userCpu - self.last_user_cpu_time) / interval | ||
67 | sysCpuLoad = float(sysCpu - self.last_sys_cpu_time) / interval | ||
68 | cpuLoad = userCpuLoad + sysCpuLoad | ||
69 | # normalize | ||
70 | if cpuLoad > 1.0: | ||
71 | userCpuLoad = userCpuLoad / cpuLoad; | ||
72 | sysCpuLoad = sysCpuLoad / cpuLoad; | ||
73 | return (userCpuLoad, sysCpuLoad) | ||
74 | |||
75 | def set_parent(self, processMap): | ||
76 | if self.ppid != None: | ||
77 | self.parent = processMap.get(self.ppid) | ||
78 | if self.parent == None and self.pid > 1: | ||
79 | print "warning: no parent for pid '%i' with ppid '%i'" % (self.pid,self.ppid) | ||
80 | def get_end_time(self): | ||
81 | return self.start_time + self.duration | ||
82 | |||
83 | class DiskSample: | ||
84 | def __init__(self, time, read, write, util): | ||
85 | self.time = time | ||
86 | self.read = read | ||
87 | self.write = write | ||
88 | self.util = util | ||
89 | self.tput = read + write | ||
90 | |||
91 | def __str__(self): | ||
92 | return "\t".join([str(self.time), str(self.read), str(self.write), str(self.util)]) | ||
93 | |||
diff --git a/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py b/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py new file mode 100644 index 0000000000..574c2c7a2b --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py | |||
@@ -0,0 +1,93 @@ | |||
1 | import sys, os, re, struct, operator, math | ||
2 | from collections import defaultdict | ||
3 | import unittest | ||
4 | |||
5 | sys.path.insert(0, os.getcwd()) | ||
6 | |||
7 | import parsing | ||
8 | |||
9 | debug = False | ||
10 | |||
11 | def floatEq(f1, f2): | ||
12 | return math.fabs(f1-f2) < 0.00001 | ||
13 | |||
14 | class TestBCParser(unittest.TestCase): | ||
15 | |||
16 | def setUp(self): | ||
17 | self.name = "My first unittest" | ||
18 | self.rootdir = '../examples/1' | ||
19 | |||
20 | def mk_fname(self,f): | ||
21 | return os.path.join(self.rootdir, f) | ||
22 | |||
23 | def testParseHeader(self): | ||
24 | state = parsing.parse_file(parsing.ParserState(), self.mk_fname('header')) | ||
25 | self.assertEqual(6, len(state.headers)) | ||
26 | self.assertEqual(2, parsing.get_num_cpus(state.headers)) | ||
27 | |||
28 | def test_parseTimedBlocks(self): | ||
29 | state = parsing.parse_file(parsing.ParserState(), self.mk_fname('proc_diskstats.log')) | ||
30 | self.assertEqual(141, len(state.disk_stats)) | ||
31 | |||
32 | def testParseProcPsLog(self): | ||
33 | state = parsing.parse_file(parsing.ParserState(), self.mk_fname('proc_ps.log')) | ||
34 | samples = state.ps_stats | ||
35 | processes = samples.process_list | ||
36 | sorted_processes = sorted(processes, key=lambda p: p.pid ) | ||
37 | |||
38 | for index, line in enumerate(open(self.mk_fname('extract2.proc_ps.log'))): | ||
39 | tokens = line.split(); | ||
40 | process = sorted_processes[index] | ||
41 | if debug: | ||
42 | print tokens[0:4] | ||
43 | print process.pid, process.cmd, process.ppid, len(process.samples) | ||
44 | print '-------------------' | ||
45 | |||
46 | self.assertEqual(tokens[0], str(process.pid)) | ||
47 | self.assertEqual(tokens[1], str(process.cmd)) | ||
48 | self.assertEqual(tokens[2], str(process.ppid)) | ||
49 | self.assertEqual(tokens[3], str(len(process.samples))) | ||
50 | |||
51 | |||
52 | def testparseProcDiskStatLog(self): | ||
53 | state_with_headers = parsing.parse_file(parsing.ParserState(), self.mk_fname('header')) | ||
54 | state_with_headers.headers['system.cpu'] = 'xxx (2)' | ||
55 | samples = parsing.parse_file(state_with_headers, self.mk_fname('proc_diskstats.log')).disk_stats | ||
56 | self.assertEqual(141, len(samples)) | ||
57 | |||
58 | for index, line in enumerate(open(self.mk_fname('extract.proc_diskstats.log'))): | ||
59 | tokens = line.split('\t') | ||
60 | sample = samples[index] | ||
61 | if debug: | ||
62 | print line.rstrip(), | ||
63 | print sample | ||
64 | print '-------------------' | ||
65 | |||
66 | self.assertEqual(tokens[0], str(sample.time)) | ||
67 | self.assert_(floatEq(float(tokens[1]), sample.read)) | ||
68 | self.assert_(floatEq(float(tokens[2]), sample.write)) | ||
69 | self.assert_(floatEq(float(tokens[3]), sample.util)) | ||
70 | |||
71 | def testparseProcStatLog(self): | ||
72 | samples = parsing.parse_file(parsing.ParserState(), self.mk_fname('proc_stat.log')).cpu_stats | ||
73 | self.assertEqual(141, len(samples)) | ||
74 | |||
75 | for index, line in enumerate(open(self.mk_fname('extract.proc_stat.log'))): | ||
76 | tokens = line.split('\t') | ||
77 | sample = samples[index] | ||
78 | if debug: | ||
79 | print line.rstrip() | ||
80 | print sample | ||
81 | print '-------------------' | ||
82 | self.assert_(floatEq(float(tokens[0]), sample.time)) | ||
83 | self.assert_(floatEq(float(tokens[1]), sample.user)) | ||
84 | self.assert_(floatEq(float(tokens[2]), sample.sys)) | ||
85 | self.assert_(floatEq(float(tokens[3]), sample.io)) | ||
86 | |||
87 | def testParseLogDir(self): | ||
88 | res = parsing.parse([self.rootdir], False) | ||
89 | self.assertEqual(4, len(res)) | ||
90 | |||
91 | if __name__ == '__main__': | ||
92 | unittest.main() | ||
93 | |||
diff --git a/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py b/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py new file mode 100644 index 0000000000..971e125eab --- /dev/null +++ b/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py | |||
@@ -0,0 +1,78 @@ | |||
1 | import sys | ||
2 | import os | ||
3 | import unittest | ||
4 | |||
5 | sys.path.insert(0, os.getcwd()) | ||
6 | |||
7 | import parsing | ||
8 | import process_tree | ||
9 | |||
10 | class TestProcessTree(unittest.TestCase): | ||
11 | |||
12 | def setUp(self): | ||
13 | self.name = "Process tree unittest" | ||
14 | self.rootdir = '../examples/1' | ||
15 | self.ps_stats = parsing.parse_file(parsing.ParserState(), self.mk_fname('proc_ps.log')).ps_stats | ||
16 | self.processtree = process_tree.ProcessTree(self.ps_stats, None, False, for_testing = True) | ||
17 | |||
18 | def mk_fname(self,f): | ||
19 | return os.path.join(self.rootdir, f) | ||
20 | |||
21 | def flatten(self, process_tree): | ||
22 | flattened = [] | ||
23 | for p in process_tree: | ||
24 | flattened.append(p) | ||
25 | flattened.extend(self.flatten(p.child_list)) | ||
26 | return flattened | ||
27 | |||
28 | def checkAgainstJavaExtract(self, filename, process_tree): | ||
29 | for expected, actual in zip(open(filename), self.flatten(process_tree)): | ||
30 | tokens = expected.split('\t') | ||
31 | self.assertEqual(int(tokens[0]), actual.pid) | ||
32 | self.assertEqual(tokens[1], actual.cmd) | ||
33 | self.assertEqual(long(tokens[2]), 10 * actual.start_time) | ||
34 | self.assert_(long(tokens[3]) - 10 * actual.duration < 5, "duration") | ||
35 | self.assertEqual(int(tokens[4]), len(actual.child_list)) | ||
36 | self.assertEqual(int(tokens[5]), len(actual.samples)) | ||
37 | |||
38 | def testBuild(self): | ||
39 | process_tree = self.processtree.process_tree | ||
40 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.1.log'), process_tree) | ||
41 | |||
42 | def testMergeLogger(self): | ||
43 | self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False) | ||
44 | process_tree = self.processtree.process_tree | ||
45 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.2.log'), process_tree) | ||
46 | |||
47 | def testPrune(self): | ||
48 | self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False) | ||
49 | self.processtree.prune(self.processtree.process_tree, None) | ||
50 | process_tree = self.processtree.process_tree | ||
51 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3b.log'), process_tree) | ||
52 | |||
53 | def testMergeExploders(self): | ||
54 | self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False) | ||
55 | self.processtree.prune(self.processtree.process_tree, None) | ||
56 | self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup'])) | ||
57 | process_tree = self.processtree.process_tree | ||
58 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3c.log'), process_tree) | ||
59 | |||
60 | def testMergeSiblings(self): | ||
61 | self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False) | ||
62 | self.processtree.prune(self.processtree.process_tree, None) | ||
63 | self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup'])) | ||
64 | self.processtree.merge_siblings(self.processtree.process_tree) | ||
65 | process_tree = self.processtree.process_tree | ||
66 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3d.log'), process_tree) | ||
67 | |||
68 | def testMergeRuns(self): | ||
69 | self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False) | ||
70 | self.processtree.prune(self.processtree.process_tree, None) | ||
71 | self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup'])) | ||
72 | self.processtree.merge_siblings(self.processtree.process_tree) | ||
73 | self.processtree.merge_runs(self.processtree.process_tree) | ||
74 | process_tree = self.processtree.process_tree | ||
75 | self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3e.log'), process_tree) | ||
76 | |||
77 | if __name__ == '__main__': | ||
78 | unittest.main() | ||