Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 47 additions & 3 deletions pybootchartgui/draw.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,16 +308,60 @@ def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
if clip_visible (clip, chart_rect):
draw_box_ticks (ctx, chart_rect, sec_w)
draw_annotations (ctx, proc_tree, trace.times, chart_rect)

cpu_stats = trace.cpu_stats
# Backwards compatible: cpu_stats can be List[CPUSample] or
# {'all': List[CPUSample], 'per_cpu': {idx: List[CPUSample]}}
if isinstance(cpu_stats, dict):
all_cpu = cpu_stats.get('all', [])
per_cpu = cpu_stats.get('per_cpu', {})
else:
all_cpu = cpu_stats
per_cpu = {}

draw_chart (ctx, IO_COLOR, True, chart_rect, \
[(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
[(sample.time, sample.user + sample.sys + sample.io) for sample in all_cpu], \
proc_tree, None)
# render CPU load
draw_chart (ctx, CPU_COLOR, True, chart_rect, \
[(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
[(sample.time, sample.user + sample.sys) for sample in all_cpu], \
proc_tree, None)

curr_y = curr_y + 30 + bar_h

# If per-cpu data is available, render each core as a separate line chart
# (filled CPU and filled I/O wait) stacked vertically.
if per_cpu:
# deterministic per-core colors
def core_color(idx, alpha=1.0):
# Spread colors across hue wheel.
h = ((idx * 0.61803398875) % 1.0)
r, g, b = colorsys.hsv_to_rgb(h, 0.45, 0.90)
return (r, g, b, alpha)

ctx.set_font_size(LEGEND_FONT_SIZE)
# render a small legend header once
draw_text(ctx, "Per-core CPU utilization", TEXT_COLOR, off_x, curr_y+20)

ordered = sorted(per_cpu.items(), key=lambda kv: kv[0])
for idx, series in ordered:
chart_rect = (off_x, curr_y+30, w, bar_h)
if clip_visible(clip, chart_rect):
draw_box_ticks(ctx, chart_rect, sec_w)
draw_annotations(ctx, proc_tree, trace.times, chart_rect)
c = core_color(idx, 0.85)
io_c = tuple(list(c[0:3]) + [0.35])
draw_chart(ctx, io_c, True, chart_rect,
[(s.time, s.user + s.sys + s.io) for s in series],
proc_tree, None)
draw_chart(ctx, c, True, chart_rect,
[(s.time, s.user + s.sys) for s in series],
proc_tree, None)
# core label
draw_text(ctx, f"cpu{idx}", TEXT_COLOR, off_x + 5, curr_y + 30 + 15)

curr_y = curr_y + 30 + bar_h

# render second chart
draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
Expand Down Expand Up @@ -469,7 +513,7 @@ def draw_header (ctx, headers, duration):
toshow = [
('system.uname', 'uname', lambda s: s),
('system.release', 'release', lambda s: s),
('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)),
('system.cpu', 'CPU', lambda s: re.sub(r'model name\s*:\s*', '', s, 1)),
('system.kernel.options', 'kernel options', lambda s: s),
]

Expand Down
105 changes: 84 additions & 21 deletions pybootchartgui/parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,37 +421,100 @@ def _parse_taskstats_log(writer, file):
return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)

def _parse_proc_stat_log(file):
samples = []
ltimes = None
"""Parse /proc/stat samples.

Historically this returned only overall CPU utilization samples based on
the first 'cpu' line. This function now also parses per-core lines
(cpu0, cpu1, ...).

Return value (backwards compatible):
- If only the aggregate 'cpu' line is found: List[CPUSample]
- Otherwise: {'all': List[CPUSample], 'per_cpu': Dict[int, List[CPUSample]]}
"""
all_samples = []
per_cpu_samples = defaultdict(list)

# last observed raw times
last_all = None
last_per_cpu = {}

for time, lines in _parse_timed_blocks(file):
# skip emtpy lines
if not lines:
continue
tokens = lines[0].split()
if len(tokens) < 8:

# Find all cpu lines in this block. Format:
# cpu user nice system idle iowait irq softirq [steal [guest [guest_nice]]]
cpu_lines = []
for line in lines:
if not line:
continue
if line.startswith('cpu'):
cpu_lines.append(line)

if not cpu_lines:
continue
# CPU times {user, nice, system, idle, io_wait, irq, softirq}
times = [ int(token) for token in tokens[1:] ]
if ltimes:
user = float((times[0] + times[1]) - (ltimes[0] + ltimes[1]))
system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6]))
idle = float(times[3] - ltimes[3])
iowait = float(times[4] - ltimes[4])

aSum = max(user + system + idle + iowait, 1)
samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) )
def to_times(tokens):
# Need at least user..softirq
if len(tokens) < 8:
return None
try:
return [int(t) for t in tokens[1:]]
except ValueError:
return None

def mk_sample(now, prev):
user = float((now[0] + now[1]) - (prev[0] + prev[1]))
system = float((now[2] + now[5] + now[6]) - (prev[2] + prev[5] + prev[6]))
idle = float(now[3] - prev[3])
iowait = float(now[4] - prev[4])
aSum = max(user + system + idle + iowait, 1.0)
return CPUSample(time, user / aSum, system / aSum, iowait / aSum)

# Parse aggregate line (exactly 'cpu') if present
for line in cpu_lines:
tokens = line.split()
if not tokens:
continue
name = tokens[0]
if name == 'cpu':
now = to_times(tokens)
if now is None:
continue
if last_all is not None:
all_samples.append(mk_sample(now, last_all))
last_all = now
break

# Parse per-core lines
for line in cpu_lines:
tokens = line.split()
if not tokens:
continue
name = tokens[0]
if len(name) > 3 and name.startswith('cpu') and name[3:].isdigit():
idx = int(name[3:])
now = to_times(tokens)
if now is None:
continue
prev = last_per_cpu.get(idx)
if prev is not None:
per_cpu_samples[idx].append(mk_sample(now, prev))
last_per_cpu[idx] = now

# Backwards compatibility: old code expects a list of CPUSample
if not per_cpu_samples:
return all_samples

ltimes = times
# skip the rest of statistics lines
return samples
return {'all': all_samples, 'per_cpu': dict(per_cpu_samples)}

def _parse_proc_disk_stat_log(file, numCpu):
"""
Parse file for disk stats, but only look at the whole device, eg. sda,
not sda1, sda2 etc. The format of relevant lines should be:
{major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq}
"""
disk_regex_re = re.compile ('^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
disk_regex_re = re.compile (r'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')

# this gets called an awful lot.
def is_relevant_line(linetokens):
Expand Down Expand Up @@ -520,8 +583,8 @@ def _parse_proc_meminfo_log(file):
# [ 0.039993] calling migration_init+0x0/0x6b @ 1
# [ 0.039993] initcall migration_init+0x0/0x6b returned 1 after 0 usecs
def _parse_dmesg(writer, file):
timestamp_re = re.compile ("^\[\s*(\d+\.\d+)\s*]\s+(.*)$")
split_re = re.compile ("^(\S+)\s+([\S\+_-]+) (.*)$")
timestamp_re = re.compile (r"^\[\s*(\d+\.\d+)\s*]\s+(.*)$")
split_re = re.compile (r"^(\S+)\s+([\S\+_-]+) (.*)$")
processMap = {}
idx = 0
inc = 1.0 / 1000000
Expand Down Expand Up @@ -566,7 +629,7 @@ def _parse_dmesg(writer, file):
# print "foo: '%s' '%s' '%s'" % (type, func, rest)
if type == "calling":
ppid = kernel.pid
p = re.match ("\@ (\d+)", rest)
p = re.match (r"\@ (\d+)", rest)
if p is not None:
ppid = float (p.group(1)) // 1000
# print "match: '%s' ('%g') at '%s'" % (func, ppid, time_ms)
Expand Down
8 changes: 7 additions & 1 deletion pybootchartgui/tests/parser_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,13 @@ def testparseProcDiskStatLog(self):

def testparseProcStatLog(self):
trace = parsing.Trace(writer, args, options)
samples = parsing.parse_file(writer, trace, self.mk_fname('proc_stat.log')).cpu_stats
cpu_stats = parsing.parse_file(writer, trace, self.mk_fname('proc_stat.log')).cpu_stats
# Backwards compat: cpu_stats can be List[CPUSample] or
# {'all': List[CPUSample], 'per_cpu': {idx: List[CPUSample]}}
if isinstance(cpu_stats, dict):
samples = cpu_stats.get('all', [])
else:
samples = cpu_stats
self.assertEqual(141, len(samples))

stat_data = open(self.mk_fname('extract.proc_stat.log'))
Expand Down