mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-29 15:28:50 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
88
tools/perf/scripts/python/Perf-Trace-Util/Context.c
Normal file
88
tools/perf/scripts/python/Perf-Trace-Util/Context.c
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Context.c. Python interfaces for perf script.
|
||||
*
|
||||
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <Python.h>
|
||||
#include "../../../perf.h"
|
||||
#include "../../../util/trace-event.h"
|
||||
|
||||
PyMODINIT_FUNC initperf_trace_context(void);
|
||||
|
||||
static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_pc(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyObject *perf_trace_context_common_flags(PyObject *obj,
|
||||
PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_flags(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
|
||||
PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_lock_depth(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyMethodDef ContextMethods[] = {
|
||||
{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
|
||||
"Get the common preempt count event field value."},
|
||||
{ "common_flags", perf_trace_context_common_flags, METH_VARARGS,
|
||||
"Get the common flags event field value."},
|
||||
{ "common_lock_depth", perf_trace_context_common_lock_depth,
|
||||
METH_VARARGS, "Get the common lock depth event field value."},
|
||||
{ NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC initperf_trace_context(void)
|
||||
{
|
||||
(void) Py_InitModule("perf_trace_context", ContextMethods);
|
||||
}
|
||||
122
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
Normal file
122
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
# Core.py - Python extension for perf script, core functions
|
||||
#
|
||||
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
|
||||
#
|
||||
# This software may be distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
def autodict():
|
||||
return defaultdict(autodict)
|
||||
|
||||
flag_fields = autodict()
|
||||
symbolic_fields = autodict()
|
||||
|
||||
def define_flag_field(event_name, field_name, delim):
|
||||
flag_fields[event_name][field_name]['delim'] = delim
|
||||
|
||||
def define_flag_value(event_name, field_name, value, field_str):
|
||||
flag_fields[event_name][field_name]['values'][value] = field_str
|
||||
|
||||
def define_symbolic_field(event_name, field_name):
|
||||
# nothing to do, really
|
||||
pass
|
||||
|
||||
def define_symbolic_value(event_name, field_name, value, field_str):
|
||||
symbolic_fields[event_name][field_name]['values'][value] = field_str
|
||||
|
||||
def flag_str(event_name, field_name, value):
|
||||
string = ""
|
||||
|
||||
if flag_fields[event_name][field_name]:
|
||||
print_delim = 0
|
||||
keys = flag_fields[event_name][field_name]['values'].keys()
|
||||
keys.sort()
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string += flag_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
if idx and (value & idx) == idx:
|
||||
if print_delim and flag_fields[event_name][field_name]['delim']:
|
||||
string += " " + flag_fields[event_name][field_name]['delim'] + " "
|
||||
string += flag_fields[event_name][field_name]['values'][idx]
|
||||
print_delim = 1
|
||||
value &= ~idx
|
||||
|
||||
return string
|
||||
|
||||
def symbol_str(event_name, field_name, value):
|
||||
string = ""
|
||||
|
||||
if symbolic_fields[event_name][field_name]:
|
||||
keys = symbolic_fields[event_name][field_name]['values'].keys()
|
||||
keys.sort()
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string = symbolic_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
if (value == idx):
|
||||
string = symbolic_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
|
||||
return string
|
||||
|
||||
trace_flags = { 0x00: "NONE", \
|
||||
0x01: "IRQS_OFF", \
|
||||
0x02: "IRQS_NOSUPPORT", \
|
||||
0x04: "NEED_RESCHED", \
|
||||
0x08: "HARDIRQ", \
|
||||
0x10: "SOFTIRQ" }
|
||||
|
||||
def trace_flag_str(value):
|
||||
string = ""
|
||||
print_delim = 0
|
||||
|
||||
keys = trace_flags.keys()
|
||||
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string += "NONE"
|
||||
break
|
||||
|
||||
if idx and (value & idx) == idx:
|
||||
if print_delim:
|
||||
string += " | ";
|
||||
string += trace_flags[idx]
|
||||
print_delim = 1
|
||||
value &= ~idx
|
||||
|
||||
return string
|
||||
|
||||
|
||||
def taskState(state):
|
||||
states = {
|
||||
0 : "R",
|
||||
1 : "S",
|
||||
2 : "D",
|
||||
64: "DEAD"
|
||||
}
|
||||
|
||||
if state not in states:
|
||||
return "Unknown"
|
||||
|
||||
return states[state]
|
||||
|
||||
|
||||
class EventHeaders:
|
||||
def __init__(self, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm, common_callchain):
|
||||
self.cpu = common_cpu
|
||||
self.secs = common_secs
|
||||
self.nsecs = common_nsecs
|
||||
self.pid = common_pid
|
||||
self.comm = common_comm
|
||||
self.callchain = common_callchain
|
||||
|
||||
def ts(self):
|
||||
return (self.secs * (10 ** 9)) + self.nsecs
|
||||
|
||||
def ts_format(self):
|
||||
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
||||
94
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
Executable file
94
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
Executable file
|
|
@ -0,0 +1,94 @@
|
|||
# EventClass.py
|
||||
#
|
||||
# This is a library defining some events types classes, which could
|
||||
# be used by other scripts to analyzing the perf samples.
|
||||
#
|
||||
# Currently there are just a few classes defined for examples,
|
||||
# PerfEvent is the base class for all perf event sample, PebsEvent
|
||||
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
|
||||
# event classes based on requirements.
|
||||
|
||||
import struct
|
||||
|
||||
# Event types, user could add more here
|
||||
EVTYPE_GENERIC = 0
|
||||
EVTYPE_PEBS = 1 # Basic PEBS event
|
||||
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
|
||||
EVTYPE_IBS = 3
|
||||
|
||||
#
|
||||
# Currently we don't have good way to tell the event type, but by
|
||||
# the size of raw buffer, raw PEBS event with load latency data's
|
||||
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
|
||||
#
|
||||
def create_event(name, comm, dso, symbol, raw_buf):
|
||||
if (len(raw_buf) == 144):
|
||||
event = PebsEvent(name, comm, dso, symbol, raw_buf)
|
||||
elif (len(raw_buf) == 176):
|
||||
event = PebsNHM(name, comm, dso, symbol, raw_buf)
|
||||
else:
|
||||
event = PerfEvent(name, comm, dso, symbol, raw_buf)
|
||||
|
||||
return event
|
||||
|
||||
class PerfEvent(object):
|
||||
event_num = 0
|
||||
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
|
||||
self.name = name
|
||||
self.comm = comm
|
||||
self.dso = dso
|
||||
self.symbol = symbol
|
||||
self.raw_buf = raw_buf
|
||||
self.ev_type = ev_type
|
||||
PerfEvent.event_num += 1
|
||||
|
||||
def show(self):
|
||||
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
|
||||
|
||||
#
|
||||
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
|
||||
# contains the context info when that event happened: the EFLAGS and
|
||||
# linear IP info, as well as all the registers.
|
||||
#
|
||||
class PebsEvent(PerfEvent):
|
||||
pebs_num = 0
|
||||
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
|
||||
tmp_buf=raw_buf[0:80]
|
||||
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
|
||||
self.flags = flags
|
||||
self.ip = ip
|
||||
self.ax = ax
|
||||
self.bx = bx
|
||||
self.cx = cx
|
||||
self.dx = dx
|
||||
self.si = si
|
||||
self.di = di
|
||||
self.bp = bp
|
||||
self.sp = sp
|
||||
|
||||
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
|
||||
PebsEvent.pebs_num += 1
|
||||
del tmp_buf
|
||||
|
||||
#
|
||||
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
|
||||
# in the four 64 bit words write after the PEBS data:
|
||||
# Status: records the IA32_PERF_GLOBAL_STATUS register value
|
||||
# DLA: Data Linear Address (EIP)
|
||||
# DSE: Data Source Encoding, where the latency happens, hit or miss
|
||||
# in L1/L2/L3 or IO operations
|
||||
# LAT: the actual latency in cycles
|
||||
#
|
||||
class PebsNHM(PebsEvent):
|
||||
pebs_nhm_num = 0
|
||||
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
|
||||
tmp_buf=raw_buf[144:176]
|
||||
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
|
||||
self.status = status
|
||||
self.dla = dla
|
||||
self.dse = dse
|
||||
self.lat = lat
|
||||
|
||||
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
|
||||
PebsNHM.pebs_nhm_num += 1
|
||||
del tmp_buf
|
||||
|
|
@ -0,0 +1,184 @@
|
|||
# SchedGui.py - Python extension for perf script, basic GUI code for
|
||||
# traces drawing and overview.
|
||||
#
|
||||
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
|
||||
#
|
||||
# This software is distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
|
||||
try:
|
||||
import wx
|
||||
except ImportError:
|
||||
raise ImportError, "You need to install the wxpython lib for this script"
|
||||
|
||||
|
||||
class RootFrame(wx.Frame):
|
||||
Y_OFFSET = 100
|
||||
RECT_HEIGHT = 100
|
||||
RECT_SPACE = 50
|
||||
EVENT_MARKING_WIDTH = 5
|
||||
|
||||
def __init__(self, sched_tracer, title, parent = None, id = -1):
|
||||
wx.Frame.__init__(self, parent, id, title)
|
||||
|
||||
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
|
||||
self.screen_width -= 10
|
||||
self.screen_height -= 10
|
||||
self.zoom = 0.5
|
||||
self.scroll_scale = 20
|
||||
self.sched_tracer = sched_tracer
|
||||
self.sched_tracer.set_root_win(self)
|
||||
(self.ts_start, self.ts_end) = sched_tracer.interval()
|
||||
self.update_width_virtual()
|
||||
self.nr_rects = sched_tracer.nr_rectangles() + 1
|
||||
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
|
||||
|
||||
# whole window panel
|
||||
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
|
||||
|
||||
# scrollable container
|
||||
self.scroll = wx.ScrolledWindow(self.panel)
|
||||
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
|
||||
self.scroll.EnableScrolling(True, True)
|
||||
self.scroll.SetFocus()
|
||||
|
||||
# scrollable drawing area
|
||||
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
|
||||
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
|
||||
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
|
||||
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
|
||||
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
|
||||
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
|
||||
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
|
||||
|
||||
self.scroll.Fit()
|
||||
self.Fit()
|
||||
|
||||
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
|
||||
|
||||
self.txt = None
|
||||
|
||||
self.Show(True)
|
||||
|
||||
def us_to_px(self, val):
|
||||
return val / (10 ** 3) * self.zoom
|
||||
|
||||
def px_to_us(self, val):
|
||||
return (val / self.zoom) * (10 ** 3)
|
||||
|
||||
def scroll_start(self):
|
||||
(x, y) = self.scroll.GetViewStart()
|
||||
return (x * self.scroll_scale, y * self.scroll_scale)
|
||||
|
||||
def scroll_start_us(self):
|
||||
(x, y) = self.scroll_start()
|
||||
return self.px_to_us(x)
|
||||
|
||||
def paint_rectangle_zone(self, nr, color, top_color, start, end):
|
||||
offset_px = self.us_to_px(start - self.ts_start)
|
||||
width_px = self.us_to_px(end - self.ts_start)
|
||||
|
||||
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
|
||||
width_py = RootFrame.RECT_HEIGHT
|
||||
|
||||
dc = self.dc
|
||||
|
||||
if top_color is not None:
|
||||
(r, g, b) = top_color
|
||||
top_color = wx.Colour(r, g, b)
|
||||
brush = wx.Brush(top_color, wx.SOLID)
|
||||
dc.SetBrush(brush)
|
||||
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
|
||||
width_py -= RootFrame.EVENT_MARKING_WIDTH
|
||||
offset_py += RootFrame.EVENT_MARKING_WIDTH
|
||||
|
||||
(r ,g, b) = color
|
||||
color = wx.Colour(r, g, b)
|
||||
brush = wx.Brush(color, wx.SOLID)
|
||||
dc.SetBrush(brush)
|
||||
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
|
||||
|
||||
def update_rectangles(self, dc, start, end):
|
||||
start += self.ts_start
|
||||
end += self.ts_start
|
||||
self.sched_tracer.fill_zone(start, end)
|
||||
|
||||
def on_paint(self, event):
|
||||
dc = wx.PaintDC(self.scroll_panel)
|
||||
self.dc = dc
|
||||
|
||||
width = min(self.width_virtual, self.screen_width)
|
||||
(x, y) = self.scroll_start()
|
||||
start = self.px_to_us(x)
|
||||
end = self.px_to_us(x + width)
|
||||
self.update_rectangles(dc, start, end)
|
||||
|
||||
def rect_from_ypixel(self, y):
|
||||
y -= RootFrame.Y_OFFSET
|
||||
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
|
||||
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
|
||||
|
||||
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
|
||||
return -1
|
||||
|
||||
return rect
|
||||
|
||||
def update_summary(self, txt):
|
||||
if self.txt:
|
||||
self.txt.Destroy()
|
||||
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
|
||||
|
||||
|
||||
def on_mouse_down(self, event):
|
||||
(x, y) = event.GetPositionTuple()
|
||||
rect = self.rect_from_ypixel(y)
|
||||
if rect == -1:
|
||||
return
|
||||
|
||||
t = self.px_to_us(x) + self.ts_start
|
||||
|
||||
self.sched_tracer.mouse_down(rect, t)
|
||||
|
||||
|
||||
def update_width_virtual(self):
|
||||
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
|
||||
|
||||
def __zoom(self, x):
|
||||
self.update_width_virtual()
|
||||
(xpos, ypos) = self.scroll.GetViewStart()
|
||||
xpos = self.us_to_px(x) / self.scroll_scale
|
||||
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
|
||||
self.Refresh()
|
||||
|
||||
def zoom_in(self):
|
||||
x = self.scroll_start_us()
|
||||
self.zoom *= 2
|
||||
self.__zoom(x)
|
||||
|
||||
def zoom_out(self):
|
||||
x = self.scroll_start_us()
|
||||
self.zoom /= 2
|
||||
self.__zoom(x)
|
||||
|
||||
|
||||
def on_key_press(self, event):
|
||||
key = event.GetRawKeyCode()
|
||||
if key == ord("+"):
|
||||
self.zoom_in()
|
||||
return
|
||||
if key == ord("-"):
|
||||
self.zoom_out()
|
||||
return
|
||||
|
||||
key = event.GetKeyCode()
|
||||
(x, y) = self.scroll.GetViewStart()
|
||||
if key == wx.WXK_RIGHT:
|
||||
self.scroll.Scroll(x + 1, y)
|
||||
elif key == wx.WXK_LEFT:
|
||||
self.scroll.Scroll(x - 1, y)
|
||||
elif key == wx.WXK_DOWN:
|
||||
self.scroll.Scroll(x, y + 1)
|
||||
elif key == wx.WXK_UP:
|
||||
self.scroll.Scroll(x, y - 1)
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
# Util.py - Python extension for perf script, miscellaneous utility code
|
||||
#
|
||||
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
|
||||
#
|
||||
# This software may be distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
import errno, os
|
||||
|
||||
FUTEX_WAIT = 0
|
||||
FUTEX_WAKE = 1
|
||||
FUTEX_PRIVATE_FLAG = 128
|
||||
FUTEX_CLOCK_REALTIME = 256
|
||||
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
|
||||
|
||||
NSECS_PER_SEC = 1000000000
|
||||
|
||||
def avg(total, n):
|
||||
return total / n
|
||||
|
||||
def nsecs(secs, nsecs):
|
||||
return secs * NSECS_PER_SEC + nsecs
|
||||
|
||||
def nsecs_secs(nsecs):
|
||||
return nsecs / NSECS_PER_SEC
|
||||
|
||||
def nsecs_nsecs(nsecs):
|
||||
return nsecs % NSECS_PER_SEC
|
||||
|
||||
def nsecs_str(nsecs):
|
||||
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
|
||||
return str
|
||||
|
||||
def add_stats(dict, key, value):
|
||||
if not dict.has_key(key):
|
||||
dict[key] = (value, value, value, 1)
|
||||
else:
|
||||
min, max, avg, count = dict[key]
|
||||
if value < min:
|
||||
min = value
|
||||
if value > max:
|
||||
max = value
|
||||
avg = (avg + value) / 2
|
||||
dict[key] = (min, max, avg, count + 1)
|
||||
|
||||
def clear_term():
|
||||
print("\x1b[H\x1b[2J")
|
||||
|
||||
audit_package_warned = False
|
||||
|
||||
try:
|
||||
import audit
|
||||
machine_to_id = {
|
||||
'x86_64': audit.MACH_86_64,
|
||||
'alpha' : audit.MACH_ALPHA,
|
||||
'ia64' : audit.MACH_IA64,
|
||||
'ppc' : audit.MACH_PPC,
|
||||
'ppc64' : audit.MACH_PPC64,
|
||||
's390' : audit.MACH_S390,
|
||||
's390x' : audit.MACH_S390X,
|
||||
'i386' : audit.MACH_X86,
|
||||
'i586' : audit.MACH_X86,
|
||||
'i686' : audit.MACH_X86,
|
||||
}
|
||||
try:
|
||||
machine_to_id['armeb'] = audit.MACH_ARMEB
|
||||
except:
|
||||
pass
|
||||
machine_id = machine_to_id[os.uname()[4]]
|
||||
except:
|
||||
if not audit_package_warned:
|
||||
audit_package_warned = True
|
||||
print "Install the audit-libs-python package to get syscall names"
|
||||
|
||||
def syscall_name(id):
|
||||
try:
|
||||
return audit.audit_syscall_to_name(id, machine_id)
|
||||
except:
|
||||
return str(id)
|
||||
|
||||
def strerror(nr):
|
||||
try:
|
||||
return errno.errorcode[abs(nr)]
|
||||
except:
|
||||
return "Unknown %d errno" % nr
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
#
|
||||
# event_analyzing_sample.py can cover all type of perf samples including
|
||||
# the tracepoints, so no special record requirements, just record what
|
||||
# you want to analyze.
|
||||
#
|
||||
perf record $@
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
# description: analyze all perf samples
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
(perf record -e raw_syscalls:sys_exit $@ || \
|
||||
perf record -e syscalls:sys_exit $@) 2> /dev/null
|
||||
10
tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
Normal file
10
tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
# description: system-wide failed syscalls, by pid
|
||||
# args: [comm]
|
||||
if [ $# -gt 0 ] ; then
|
||||
if ! expr match "$1" "-" > /dev/null ; then
|
||||
comm=$1
|
||||
shift
|
||||
fi
|
||||
fi
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/failed-syscalls-by-pid.py $comm
|
||||
2
tools/perf/scripts/python/bin/futex-contention-record
Normal file
2
tools/perf/scripts/python/bin/futex-contention-record
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
|
||||
4
tools/perf/scripts/python/bin/futex-contention-report
Normal file
4
tools/perf/scripts/python/bin/futex-contention-report
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
# description: futext contention measurement
|
||||
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/futex-contention.py
|
||||
2
tools/perf/scripts/python/bin/net_dropmonitor-record
Executable file
2
tools/perf/scripts/python/bin/net_dropmonitor-record
Executable file
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
perf record -e skb:kfree_skb $@
|
||||
4
tools/perf/scripts/python/bin/net_dropmonitor-report
Executable file
4
tools/perf/scripts/python/bin/net_dropmonitor-report
Executable file
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
# description: display a table of dropped frames
|
||||
|
||||
perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@
|
||||
8
tools/perf/scripts/python/bin/netdev-times-record
Normal file
8
tools/perf/scripts/python/bin/netdev-times-record
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
perf record -e net:net_dev_xmit -e net:net_dev_queue \
|
||||
-e net:netif_receive_skb -e net:netif_rx \
|
||||
-e skb:consume_skb -e skb:kfree_skb \
|
||||
-e skb:skb_copy_datagram_iovec -e napi:napi_poll \
|
||||
-e irq:irq_handler_entry -e irq:irq_handler_exit \
|
||||
-e irq:softirq_entry -e irq:softirq_exit \
|
||||
-e irq:softirq_raise $@
|
||||
5
tools/perf/scripts/python/bin/netdev-times-report
Normal file
5
tools/perf/scripts/python/bin/netdev-times-report
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
# description: display a process of packet and processing time
|
||||
# args: [tx] [rx] [dev=] [debug]
|
||||
|
||||
perf script -s "$PERF_EXEC_PATH"/scripts/python/netdev-times.py $@
|
||||
2
tools/perf/scripts/python/bin/sched-migration-record
Normal file
2
tools/perf/scripts/python/bin/sched-migration-record
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/bash
|
||||
perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
|
||||
3
tools/perf/scripts/python/bin/sched-migration-report
Normal file
3
tools/perf/scripts/python/bin/sched-migration-report
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
# description: sched migration overview
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
|
||||
3
tools/perf/scripts/python/bin/sctop-record
Normal file
3
tools/perf/scripts/python/bin/sctop-record
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
(perf record -e raw_syscalls:sys_enter $@ || \
|
||||
perf record -e syscalls:sys_enter $@) 2> /dev/null
|
||||
24
tools/perf/scripts/python/bin/sctop-report
Normal file
24
tools/perf/scripts/python/bin/sctop-report
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
# description: syscall top
|
||||
# args: [comm] [interval]
|
||||
n_args=0
|
||||
for i in "$@"
|
||||
do
|
||||
if expr match "$i" "-" > /dev/null ; then
|
||||
break
|
||||
fi
|
||||
n_args=$(( $n_args + 1 ))
|
||||
done
|
||||
if [ "$n_args" -gt 2 ] ; then
|
||||
echo "usage: sctop-report [comm] [interval]"
|
||||
exit
|
||||
fi
|
||||
if [ "$n_args" -gt 1 ] ; then
|
||||
comm=$1
|
||||
interval=$2
|
||||
shift 2
|
||||
elif [ "$n_args" -gt 0 ] ; then
|
||||
interval=$1
|
||||
shift
|
||||
fi
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sctop.py $comm $interval
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
(perf record -e raw_syscalls:sys_enter $@ || \
|
||||
perf record -e syscalls:sys_enter $@) 2> /dev/null
|
||||
10
tools/perf/scripts/python/bin/syscall-counts-by-pid-report
Normal file
10
tools/perf/scripts/python/bin/syscall-counts-by-pid-report
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
# description: system-wide syscall counts, by pid
|
||||
# args: [comm]
|
||||
if [ $# -gt 0 ] ; then
|
||||
if ! expr match "$1" "-" > /dev/null ; then
|
||||
comm=$1
|
||||
shift
|
||||
fi
|
||||
fi
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
|
||||
3
tools/perf/scripts/python/bin/syscall-counts-record
Normal file
3
tools/perf/scripts/python/bin/syscall-counts-record
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
(perf record -e raw_syscalls:sys_enter $@ || \
|
||||
perf record -e syscalls:sys_enter $@) 2> /dev/null
|
||||
10
tools/perf/scripts/python/bin/syscall-counts-report
Normal file
10
tools/perf/scripts/python/bin/syscall-counts-report
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
# description: system-wide syscall counts
|
||||
# args: [comm]
|
||||
if [ $# -gt 0 ] ; then
|
||||
if ! expr match "$1" "-" > /dev/null ; then
|
||||
comm=$1
|
||||
shift
|
||||
fi
|
||||
fi
|
||||
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
|
||||
82
tools/perf/scripts/python/check-perf-trace.py
Normal file
82
tools/perf/scripts/python/check-perf-trace.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
# perf script event handlers, generated by perf script -g python
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# This script tests basic functionality such as flag and symbol
|
||||
# strings, common_xxx() calls back into perf, begin, end, unhandled
|
||||
# events, etc. Basically, if this script runs successfully and
|
||||
# displays expected results, Python scripting support should be ok.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from Core import *
|
||||
from perf_trace_context import *
|
||||
|
||||
unhandled = autodict()
|
||||
|
||||
def trace_begin():
|
||||
print "trace_begin"
|
||||
pass
|
||||
|
||||
def trace_end():
|
||||
print_unhandled()
|
||||
|
||||
def irq__softirq_entry(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, vec):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
print_uncommon(context)
|
||||
|
||||
print "vec=%s\n" % \
|
||||
(symbol_str("irq__softirq_entry", "vec", vec)),
|
||||
|
||||
def kmem__kmalloc(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
|
||||
gfp_flags):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
print_uncommon(context)
|
||||
|
||||
print "call_site=%u, ptr=%u, bytes_req=%u, " \
|
||||
"bytes_alloc=%u, gfp_flags=%s\n" % \
|
||||
(call_site, ptr, bytes_req, bytes_alloc,
|
||||
|
||||
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
|
||||
|
||||
def trace_unhandled(event_name, context, event_fields_dict):
|
||||
try:
|
||||
unhandled[event_name] += 1
|
||||
except TypeError:
|
||||
unhandled[event_name] = 1
|
||||
|
||||
def print_header(event_name, cpu, secs, nsecs, pid, comm):
|
||||
print "%-20s %5u %05u.%09u %8u %-20s " % \
|
||||
(event_name, cpu, secs, nsecs, pid, comm),
|
||||
|
||||
# print trace fields not included in handler args
|
||||
def print_uncommon(context):
|
||||
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
|
||||
% (common_pc(context), trace_flag_str(common_flags(context)), \
|
||||
common_lock_depth(context))
|
||||
|
||||
def print_unhandled():
|
||||
keys = unhandled.keys()
|
||||
if not keys:
|
||||
return
|
||||
|
||||
print "\nunhandled events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"-----------"),
|
||||
|
||||
for event_name in keys:
|
||||
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
||||
189
tools/perf/scripts/python/event_analyzing_sample.py
Normal file
189
tools/perf/scripts/python/event_analyzing_sample.py
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
# event_analyzing_sample.py: general event handler in python
|
||||
#
|
||||
# Current perf report is already very powerful with the annotation integrated,
|
||||
# and this script is not trying to be as powerful as perf report, but
|
||||
# providing end user/developer a flexible way to analyze the events other
|
||||
# than trace points.
|
||||
#
|
||||
# The 2 database related functions in this script just show how to gather
|
||||
# the basic information, and users can modify and write their own functions
|
||||
# according to their specific requirement.
|
||||
#
|
||||
# The first function "show_general_events" just does a basic grouping for all
|
||||
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
|
||||
# for a x86 HW PMU event: PEBS with load latency data.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import math
|
||||
import struct
|
||||
import sqlite3
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from EventClass import *
|
||||
|
||||
#
|
||||
# If the perf.data has a big number of samples, then the insert operation
|
||||
# will be very time consuming (about 10+ minutes for 10000 samples) if the
|
||||
# .db database is on disk. Move the .db file to RAM based FS to speedup
|
||||
# the handling, which will cut the time down to several seconds.
|
||||
#
|
||||
con = sqlite3.connect("/dev/shm/perf.db")
|
||||
con.isolation_level = None
|
||||
|
||||
def trace_begin():
|
||||
print "In trace_begin:\n"
|
||||
|
||||
#
|
||||
# Will create several tables at the start, pebs_ll is for PEBS data with
|
||||
# load latency info, while gen_events is for general event.
|
||||
#
|
||||
con.execute("""
|
||||
create table if not exists gen_events (
|
||||
name text,
|
||||
symbol text,
|
||||
comm text,
|
||||
dso text
|
||||
);""")
|
||||
con.execute("""
|
||||
create table if not exists pebs_ll (
|
||||
name text,
|
||||
symbol text,
|
||||
comm text,
|
||||
dso text,
|
||||
flags integer,
|
||||
ip integer,
|
||||
status integer,
|
||||
dse integer,
|
||||
dla integer,
|
||||
lat integer
|
||||
);""")
|
||||
|
||||
#
|
||||
# Create and insert event object to a database so that user could
|
||||
# do more analysis with simple database commands.
|
||||
#
|
||||
def process_event(param_dict):
|
||||
event_attr = param_dict["attr"]
|
||||
sample = param_dict["sample"]
|
||||
raw_buf = param_dict["raw_buf"]
|
||||
comm = param_dict["comm"]
|
||||
name = param_dict["ev_name"]
|
||||
|
||||
# Symbol and dso info are not always resolved
|
||||
if (param_dict.has_key("dso")):
|
||||
dso = param_dict["dso"]
|
||||
else:
|
||||
dso = "Unknown_dso"
|
||||
|
||||
if (param_dict.has_key("symbol")):
|
||||
symbol = param_dict["symbol"]
|
||||
else:
|
||||
symbol = "Unknown_symbol"
|
||||
|
||||
# Create the event object and insert it to the right table in database
|
||||
event = create_event(name, comm, dso, symbol, raw_buf)
|
||||
insert_db(event)
|
||||
|
||||
def insert_db(event):
|
||||
if event.ev_type == EVTYPE_GENERIC:
|
||||
con.execute("insert into gen_events values(?, ?, ?, ?)",
|
||||
(event.name, event.symbol, event.comm, event.dso))
|
||||
elif event.ev_type == EVTYPE_PEBS_LL:
|
||||
event.ip &= 0x7fffffffffffffff
|
||||
event.dla &= 0x7fffffffffffffff
|
||||
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(event.name, event.symbol, event.comm, event.dso, event.flags,
|
||||
event.ip, event.status, event.dse, event.dla, event.lat))
|
||||
|
||||
def trace_end():
|
||||
print "In trace_end:\n"
|
||||
# We show the basic info for the 2 type of event classes
|
||||
show_general_events()
|
||||
show_pebs_ll()
|
||||
con.close()
|
||||
|
||||
#
|
||||
# As the event number may be very big, so we can't use linear way
|
||||
# to show the histogram in real number, but use a log2 algorithm.
|
||||
#
|
||||
|
||||
def num2sym(num):
|
||||
# Each number will have at least one '#'
|
||||
snum = '#' * (int)(math.log(num, 2) + 1)
|
||||
return snum
|
||||
|
||||
def show_general_events():
|
||||
|
||||
# Check the total record number in the table
|
||||
count = con.execute("select count(*) from gen_events")
|
||||
for t in count:
|
||||
print "There is %d records in gen_events table" % t[0]
|
||||
if t[0] == 0:
|
||||
return
|
||||
|
||||
print "Statistics about the general events grouped by thread/symbol/dso: \n"
|
||||
|
||||
# Group by thread
|
||||
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
|
||||
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
|
||||
for row in commq:
|
||||
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
# Group by symbol
|
||||
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
|
||||
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
|
||||
for row in symbolq:
|
||||
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
# Group by dso
|
||||
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
|
||||
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
|
||||
for row in dsoq:
|
||||
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
#
|
||||
# This function just shows the basic info, and we could do more with the
|
||||
# data in the tables, like checking the function parameters when some
|
||||
# big latency events happen.
|
||||
#
|
||||
def show_pebs_ll():
|
||||
|
||||
count = con.execute("select count(*) from pebs_ll")
|
||||
for t in count:
|
||||
print "There is %d records in pebs_ll table" % t[0]
|
||||
if t[0] == 0:
|
||||
return
|
||||
|
||||
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
|
||||
|
||||
# Group by thread
|
||||
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
|
||||
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
|
||||
for row in commq:
|
||||
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
# Group by symbol
|
||||
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
|
||||
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
|
||||
for row in symbolq:
|
||||
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
# Group by dse
|
||||
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
|
||||
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
|
||||
for row in dseq:
|
||||
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
# Group by latency
|
||||
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
|
||||
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
|
||||
for row in latq:
|
||||
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
|
||||
|
||||
def trace_unhandled(event_name, context, event_fields_dict):
|
||||
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
|
||||
78
tools/perf/scripts/python/failed-syscalls-by-pid.py
Normal file
78
tools/perf/scripts/python/failed-syscalls-by-pid.py
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# failed system call counts, by pid
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide failed system call totals, broken down by pid.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import *
|
||||
|
||||
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
|
||||
|
||||
for_comm = None
|
||||
for_pid = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
try:
|
||||
for_pid = int(sys.argv[1])
|
||||
except:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
print "Press control+C to stop and show the summary"
|
||||
|
||||
def trace_end():
|
||||
print_error_totals()
|
||||
|
||||
def raw_syscalls__sys_exit(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, id, ret):
|
||||
if (for_comm and common_comm != for_comm) or \
|
||||
(for_pid and common_pid != for_pid ):
|
||||
return
|
||||
|
||||
if ret < 0:
|
||||
try:
|
||||
syscalls[common_comm][common_pid][id][ret] += 1
|
||||
except TypeError:
|
||||
syscalls[common_comm][common_pid][id][ret] = 1
|
||||
|
||||
def syscalls__sys_exit(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, ret):
|
||||
raw_syscalls__sys_exit(**locals())
|
||||
|
||||
def print_error_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall errors for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall errors:\n\n",
|
||||
|
||||
print "%-30s %10s\n" % ("comm [pid]", "count"),
|
||||
print "%-30s %10s\n" % ("------------------------------", \
|
||||
"----------"),
|
||||
|
||||
comm_keys = syscalls.keys()
|
||||
for comm in comm_keys:
|
||||
pid_keys = syscalls[comm].keys()
|
||||
for pid in pid_keys:
|
||||
print "\n%s [%d]\n" % (comm, pid),
|
||||
id_keys = syscalls[comm][pid].keys()
|
||||
for id in id_keys:
|
||||
print " syscall: %-16s\n" % syscall_name(id),
|
||||
ret_keys = syscalls[comm][pid][id].keys()
|
||||
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
|
||||
print " err = %-20s %10d\n" % (strerror(ret), val),
|
||||
50
tools/perf/scripts/python/futex-contention.py
Normal file
50
tools/perf/scripts/python/futex-contention.py
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
# futex contention
|
||||
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Translation of:
|
||||
#
|
||||
# http://sourceware.org/systemtap/wiki/WSFutexContention
|
||||
#
|
||||
# to perf python scripting.
|
||||
#
|
||||
# Measures futex contention
|
||||
|
||||
import os, sys
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
from Util import *
|
||||
|
||||
process_names = {}
|
||||
thread_thislock = {}
|
||||
thread_blocktime = {}
|
||||
|
||||
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
|
||||
process_names = {} # long-lived pid-to-execname mapping
|
||||
|
||||
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
|
||||
nr, uaddr, op, val, utime, uaddr2, val3):
|
||||
cmd = op & FUTEX_CMD_MASK
|
||||
if cmd != FUTEX_WAIT:
|
||||
return # we don't care about originators of WAKE events
|
||||
|
||||
process_names[tid] = comm
|
||||
thread_thislock[tid] = uaddr
|
||||
thread_blocktime[tid] = nsecs(s, ns)
|
||||
|
||||
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
|
||||
nr, ret):
|
||||
if thread_blocktime.has_key(tid):
|
||||
elapsed = nsecs(s, ns) - thread_blocktime[tid]
|
||||
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
|
||||
del thread_blocktime[tid]
|
||||
del thread_thislock[tid]
|
||||
|
||||
def trace_begin():
|
||||
print "Press control+C to stop and show the summary"
|
||||
|
||||
def trace_end():
|
||||
for (tid, lock) in lock_waits:
|
||||
min, max, avg, count = lock_waits[tid, lock]
|
||||
print "%s[%d] lock %x contended %d times, %d avg ns" % \
|
||||
(process_names[tid], tid, lock, count, avg)
|
||||
|
||||
75
tools/perf/scripts/python/net_dropmonitor.py
Executable file
75
tools/perf/scripts/python/net_dropmonitor.py
Executable file
|
|
@ -0,0 +1,75 @@
|
|||
# Monitor the system for dropped packets and proudce a report of drop locations and counts
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import *
|
||||
|
||||
drop_log = {}
|
||||
kallsyms = []
|
||||
|
||||
def get_kallsyms_table():
|
||||
global kallsyms
|
||||
|
||||
try:
|
||||
f = open("/proc/kallsyms", "r")
|
||||
except:
|
||||
return
|
||||
|
||||
for line in f:
|
||||
loc = int(line.split()[0], 16)
|
||||
name = line.split()[2]
|
||||
kallsyms.append((loc, name))
|
||||
kallsyms.sort()
|
||||
|
||||
def get_sym(sloc):
|
||||
loc = int(sloc)
|
||||
|
||||
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
|
||||
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
|
||||
start, end = -1, len(kallsyms)
|
||||
while end != start + 1:
|
||||
pivot = (start + end) // 2
|
||||
if loc < kallsyms[pivot][0]:
|
||||
end = pivot
|
||||
else:
|
||||
start = pivot
|
||||
|
||||
# Now (start == -1 or kallsyms[start][0] <= loc)
|
||||
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
|
||||
if start >= 0:
|
||||
symloc, name = kallsyms[start]
|
||||
return (name, loc - symloc)
|
||||
else:
|
||||
return (None, 0)
|
||||
|
||||
def print_drop_table():
|
||||
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
|
||||
for i in drop_log.keys():
|
||||
(sym, off) = get_sym(i)
|
||||
if sym == None:
|
||||
sym = i
|
||||
print "%25s %25s %25s" % (sym, off, drop_log[i])
|
||||
|
||||
|
||||
def trace_begin():
|
||||
print "Starting trace (Ctrl-C to dump results)"
|
||||
|
||||
def trace_end():
|
||||
print "Gathering kallsyms data"
|
||||
get_kallsyms_table()
|
||||
print_drop_table()
|
||||
|
||||
# called from perf, when it finds a correspoinding event
|
||||
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
|
||||
skbaddr, location, protocol):
|
||||
slocation = str(location)
|
||||
try:
|
||||
drop_log[slocation] = drop_log[slocation] + 1
|
||||
except:
|
||||
drop_log[slocation] = 1
|
||||
464
tools/perf/scripts/python/netdev-times.py
Normal file
464
tools/perf/scripts/python/netdev-times.py
Normal file
|
|
@ -0,0 +1,464 @@
|
|||
# Display a process of packets and processed time.
|
||||
# It helps us to investigate networking or network device.
|
||||
#
|
||||
# options
|
||||
# tx: show only tx chart
|
||||
# rx: show only rx chart
|
||||
# dev=: show only thing related to specified device
|
||||
# debug: work with debug mode. It shows buffer status.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import *
|
||||
|
||||
all_event_list = []; # insert all tracepoint event related with this script
|
||||
irq_dic = {}; # key is cpu and value is a list which stacks irqs
|
||||
# which raise NET_RX softirq
|
||||
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
|
||||
# and a list which stacks receive
|
||||
receive_hunk_list = []; # a list which include a sequence of receive events
|
||||
rx_skb_list = []; # received packet list for matching
|
||||
# skb_copy_datagram_iovec
|
||||
|
||||
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
|
||||
# tx_xmit_list
|
||||
of_count_rx_skb_list = 0; # overflow count
|
||||
|
||||
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
|
||||
of_count_tx_queue_list = 0; # overflow count
|
||||
|
||||
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
|
||||
of_count_tx_xmit_list = 0; # overflow count
|
||||
|
||||
tx_free_list = []; # list of packets which is freed
|
||||
|
||||
# options
|
||||
show_tx = 0;
|
||||
show_rx = 0;
|
||||
dev = 0; # store a name of device specified by option "dev="
|
||||
debug = 0;
|
||||
|
||||
# indices of event_info tuple
|
||||
EINFO_IDX_NAME= 0
|
||||
EINFO_IDX_CONTEXT=1
|
||||
EINFO_IDX_CPU= 2
|
||||
EINFO_IDX_TIME= 3
|
||||
EINFO_IDX_PID= 4
|
||||
EINFO_IDX_COMM= 5
|
||||
|
||||
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
|
||||
def diff_msec(src, dst):
|
||||
return (dst - src) / 1000000.0
|
||||
|
||||
# Display a process of transmitting a packet
|
||||
def print_transmit(hunk):
|
||||
if dev != 0 and hunk['dev'].find(dev) < 0:
|
||||
return
|
||||
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
|
||||
(hunk['dev'], hunk['len'],
|
||||
nsecs_secs(hunk['queue_t']),
|
||||
nsecs_nsecs(hunk['queue_t'])/1000,
|
||||
diff_msec(hunk['queue_t'], hunk['xmit_t']),
|
||||
diff_msec(hunk['xmit_t'], hunk['free_t']))
|
||||
|
||||
# Format for displaying rx packet processing
|
||||
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
|
||||
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
|
||||
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
|
||||
PF_JOINT= " |"
|
||||
PF_WJOINT= " | |"
|
||||
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
|
||||
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
|
||||
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
|
||||
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
|
||||
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
|
||||
|
||||
# Display a process of received packets and interrputs associated with
|
||||
# a NET_RX softirq
|
||||
def print_receive(hunk):
|
||||
show_hunk = 0
|
||||
irq_list = hunk['irq_list']
|
||||
cpu = irq_list[0]['cpu']
|
||||
base_t = irq_list[0]['irq_ent_t']
|
||||
# check if this hunk should be showed
|
||||
if dev != 0:
|
||||
for i in range(len(irq_list)):
|
||||
if irq_list[i]['name'].find(dev) >= 0:
|
||||
show_hunk = 1
|
||||
break
|
||||
else:
|
||||
show_hunk = 1
|
||||
if show_hunk == 0:
|
||||
return
|
||||
|
||||
print "%d.%06dsec cpu=%d" % \
|
||||
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
|
||||
for i in range(len(irq_list)):
|
||||
print PF_IRQ_ENTRY % \
|
||||
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
|
||||
irq_list[i]['irq'], irq_list[i]['name'])
|
||||
print PF_JOINT
|
||||
irq_event_list = irq_list[i]['event_list']
|
||||
for j in range(len(irq_event_list)):
|
||||
irq_event = irq_event_list[j]
|
||||
if irq_event['event'] == 'netif_rx':
|
||||
print PF_NET_RX % \
|
||||
(diff_msec(base_t, irq_event['time']),
|
||||
irq_event['skbaddr'])
|
||||
print PF_JOINT
|
||||
print PF_SOFT_ENTRY % \
|
||||
diff_msec(base_t, hunk['sirq_ent_t'])
|
||||
print PF_JOINT
|
||||
event_list = hunk['event_list']
|
||||
for i in range(len(event_list)):
|
||||
event = event_list[i]
|
||||
if event['event_name'] == 'napi_poll':
|
||||
print PF_NAPI_POLL % \
|
||||
(diff_msec(base_t, event['event_t']), event['dev'])
|
||||
if i == len(event_list) - 1:
|
||||
print ""
|
||||
else:
|
||||
print PF_JOINT
|
||||
else:
|
||||
print PF_NET_RECV % \
|
||||
(diff_msec(base_t, event['event_t']), event['skbaddr'],
|
||||
event['len'])
|
||||
if 'comm' in event.keys():
|
||||
print PF_WJOINT
|
||||
print PF_CPY_DGRAM % \
|
||||
(diff_msec(base_t, event['comm_t']),
|
||||
event['pid'], event['comm'])
|
||||
elif 'handle' in event.keys():
|
||||
print PF_WJOINT
|
||||
if event['handle'] == "kfree_skb":
|
||||
print PF_KFREE_SKB % \
|
||||
(diff_msec(base_t,
|
||||
event['comm_t']),
|
||||
event['location'])
|
||||
elif event['handle'] == "consume_skb":
|
||||
print PF_CONS_SKB % \
|
||||
diff_msec(base_t,
|
||||
event['comm_t'])
|
||||
print PF_JOINT
|
||||
|
||||
def trace_begin():
|
||||
global show_tx
|
||||
global show_rx
|
||||
global dev
|
||||
global debug
|
||||
|
||||
for i in range(len(sys.argv)):
|
||||
if i == 0:
|
||||
continue
|
||||
arg = sys.argv[i]
|
||||
if arg == 'tx':
|
||||
show_tx = 1
|
||||
elif arg =='rx':
|
||||
show_rx = 1
|
||||
elif arg.find('dev=',0, 4) >= 0:
|
||||
dev = arg[4:]
|
||||
elif arg == 'debug':
|
||||
debug = 1
|
||||
if show_tx == 0 and show_rx == 0:
|
||||
show_tx = 1
|
||||
show_rx = 1
|
||||
|
||||
def trace_end():
|
||||
# order all events in time
|
||||
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
|
||||
b[EINFO_IDX_TIME]))
|
||||
# process all events
|
||||
for i in range(len(all_event_list)):
|
||||
event_info = all_event_list[i]
|
||||
name = event_info[EINFO_IDX_NAME]
|
||||
if name == 'irq__softirq_exit':
|
||||
handle_irq_softirq_exit(event_info)
|
||||
elif name == 'irq__softirq_entry':
|
||||
handle_irq_softirq_entry(event_info)
|
||||
elif name == 'irq__softirq_raise':
|
||||
handle_irq_softirq_raise(event_info)
|
||||
elif name == 'irq__irq_handler_entry':
|
||||
handle_irq_handler_entry(event_info)
|
||||
elif name == 'irq__irq_handler_exit':
|
||||
handle_irq_handler_exit(event_info)
|
||||
elif name == 'napi__napi_poll':
|
||||
handle_napi_poll(event_info)
|
||||
elif name == 'net__netif_receive_skb':
|
||||
handle_netif_receive_skb(event_info)
|
||||
elif name == 'net__netif_rx':
|
||||
handle_netif_rx(event_info)
|
||||
elif name == 'skb__skb_copy_datagram_iovec':
|
||||
handle_skb_copy_datagram_iovec(event_info)
|
||||
elif name == 'net__net_dev_queue':
|
||||
handle_net_dev_queue(event_info)
|
||||
elif name == 'net__net_dev_xmit':
|
||||
handle_net_dev_xmit(event_info)
|
||||
elif name == 'skb__kfree_skb':
|
||||
handle_kfree_skb(event_info)
|
||||
elif name == 'skb__consume_skb':
|
||||
handle_consume_skb(event_info)
|
||||
# display receive hunks
|
||||
if show_rx:
|
||||
for i in range(len(receive_hunk_list)):
|
||||
print_receive(receive_hunk_list[i])
|
||||
# display transmit hunks
|
||||
if show_tx:
|
||||
print " dev len Qdisc " \
|
||||
" netdevice free"
|
||||
for i in range(len(tx_free_list)):
|
||||
print_transmit(tx_free_list[i])
|
||||
if debug:
|
||||
print "debug buffer status"
|
||||
print "----------------------------"
|
||||
print "xmit Qdisc:remain:%d overflow:%d" % \
|
||||
(len(tx_queue_list), of_count_tx_queue_list)
|
||||
print "xmit netdevice:remain:%d overflow:%d" % \
|
||||
(len(tx_xmit_list), of_count_tx_xmit_list)
|
||||
print "receive:remain:%d overflow:%d" % \
|
||||
(len(rx_skb_list), of_count_rx_skb_list)
|
||||
|
||||
# called from perf, when it finds a correspoinding event
|
||||
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
|
||||
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
|
||||
return
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
|
||||
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
|
||||
return
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
|
||||
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
|
||||
return
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
|
||||
callchain, irq, irq_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
irq, irq_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
napi, dev_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
|
||||
skblen, dev_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, skblen, dev_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
|
||||
skblen, dev_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, skblen, dev_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
|
||||
skbaddr, skblen, dev_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, skblen, dev_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
|
||||
skbaddr, skblen, rc, dev_name):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, skblen, rc ,dev_name)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
|
||||
skbaddr, protocol, location):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, protocol, location)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
|
||||
skbaddr, skblen):
|
||||
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
|
||||
skbaddr, skblen)
|
||||
all_event_list.append(event_info)
|
||||
|
||||
def handle_irq_handler_entry(event_info):
|
||||
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
|
||||
if cpu not in irq_dic.keys():
|
||||
irq_dic[cpu] = []
|
||||
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
|
||||
irq_dic[cpu].append(irq_record)
|
||||
|
||||
def handle_irq_handler_exit(event_info):
|
||||
(name, context, cpu, time, pid, comm, irq, ret) = event_info
|
||||
if cpu not in irq_dic.keys():
|
||||
return
|
||||
irq_record = irq_dic[cpu].pop()
|
||||
if irq != irq_record['irq']:
|
||||
return
|
||||
irq_record.update({'irq_ext_t':time})
|
||||
# if an irq doesn't include NET_RX softirq, drop.
|
||||
if 'event_list' in irq_record.keys():
|
||||
irq_dic[cpu].append(irq_record)
|
||||
|
||||
def handle_irq_softirq_raise(event_info):
|
||||
(name, context, cpu, time, pid, comm, vec) = event_info
|
||||
if cpu not in irq_dic.keys() \
|
||||
or len(irq_dic[cpu]) == 0:
|
||||
return
|
||||
irq_record = irq_dic[cpu].pop()
|
||||
if 'event_list' in irq_record.keys():
|
||||
irq_event_list = irq_record['event_list']
|
||||
else:
|
||||
irq_event_list = []
|
||||
irq_event_list.append({'time':time, 'event':'sirq_raise'})
|
||||
irq_record.update({'event_list':irq_event_list})
|
||||
irq_dic[cpu].append(irq_record)
|
||||
|
||||
def handle_irq_softirq_entry(event_info):
|
||||
(name, context, cpu, time, pid, comm, vec) = event_info
|
||||
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
|
||||
|
||||
def handle_irq_softirq_exit(event_info):
|
||||
(name, context, cpu, time, pid, comm, vec) = event_info
|
||||
irq_list = []
|
||||
event_list = 0
|
||||
if cpu in irq_dic.keys():
|
||||
irq_list = irq_dic[cpu]
|
||||
del irq_dic[cpu]
|
||||
if cpu in net_rx_dic.keys():
|
||||
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
|
||||
event_list = net_rx_dic[cpu]['event_list']
|
||||
del net_rx_dic[cpu]
|
||||
if irq_list == [] or event_list == 0:
|
||||
return
|
||||
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
|
||||
'irq_list':irq_list, 'event_list':event_list}
|
||||
# merge information realted to a NET_RX softirq
|
||||
receive_hunk_list.append(rec_data)
|
||||
|
||||
def handle_napi_poll(event_info):
|
||||
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
|
||||
if cpu in net_rx_dic.keys():
|
||||
event_list = net_rx_dic[cpu]['event_list']
|
||||
rec_data = {'event_name':'napi_poll',
|
||||
'dev':dev_name, 'event_t':time}
|
||||
event_list.append(rec_data)
|
||||
|
||||
def handle_netif_rx(event_info):
|
||||
(name, context, cpu, time, pid, comm,
|
||||
skbaddr, skblen, dev_name) = event_info
|
||||
if cpu not in irq_dic.keys() \
|
||||
or len(irq_dic[cpu]) == 0:
|
||||
return
|
||||
irq_record = irq_dic[cpu].pop()
|
||||
if 'event_list' in irq_record.keys():
|
||||
irq_event_list = irq_record['event_list']
|
||||
else:
|
||||
irq_event_list = []
|
||||
irq_event_list.append({'time':time, 'event':'netif_rx',
|
||||
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
|
||||
irq_record.update({'event_list':irq_event_list})
|
||||
irq_dic[cpu].append(irq_record)
|
||||
|
||||
def handle_netif_receive_skb(event_info):
|
||||
global of_count_rx_skb_list
|
||||
|
||||
(name, context, cpu, time, pid, comm,
|
||||
skbaddr, skblen, dev_name) = event_info
|
||||
if cpu in net_rx_dic.keys():
|
||||
rec_data = {'event_name':'netif_receive_skb',
|
||||
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
|
||||
event_list = net_rx_dic[cpu]['event_list']
|
||||
event_list.append(rec_data)
|
||||
rx_skb_list.insert(0, rec_data)
|
||||
if len(rx_skb_list) > buffer_budget:
|
||||
rx_skb_list.pop()
|
||||
of_count_rx_skb_list += 1
|
||||
|
||||
def handle_net_dev_queue(event_info):
|
||||
global of_count_tx_queue_list
|
||||
|
||||
(name, context, cpu, time, pid, comm,
|
||||
skbaddr, skblen, dev_name) = event_info
|
||||
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
|
||||
tx_queue_list.insert(0, skb)
|
||||
if len(tx_queue_list) > buffer_budget:
|
||||
tx_queue_list.pop()
|
||||
of_count_tx_queue_list += 1
|
||||
|
||||
def handle_net_dev_xmit(event_info):
|
||||
global of_count_tx_xmit_list
|
||||
|
||||
(name, context, cpu, time, pid, comm,
|
||||
skbaddr, skblen, rc, dev_name) = event_info
|
||||
if rc == 0: # NETDEV_TX_OK
|
||||
for i in range(len(tx_queue_list)):
|
||||
skb = tx_queue_list[i]
|
||||
if skb['skbaddr'] == skbaddr:
|
||||
skb['xmit_t'] = time
|
||||
tx_xmit_list.insert(0, skb)
|
||||
del tx_queue_list[i]
|
||||
if len(tx_xmit_list) > buffer_budget:
|
||||
tx_xmit_list.pop()
|
||||
of_count_tx_xmit_list += 1
|
||||
return
|
||||
|
||||
def handle_kfree_skb(event_info):
|
||||
(name, context, cpu, time, pid, comm,
|
||||
skbaddr, protocol, location) = event_info
|
||||
for i in range(len(tx_queue_list)):
|
||||
skb = tx_queue_list[i]
|
||||
if skb['skbaddr'] == skbaddr:
|
||||
del tx_queue_list[i]
|
||||
return
|
||||
for i in range(len(tx_xmit_list)):
|
||||
skb = tx_xmit_list[i]
|
||||
if skb['skbaddr'] == skbaddr:
|
||||
skb['free_t'] = time
|
||||
tx_free_list.append(skb)
|
||||
del tx_xmit_list[i]
|
||||
return
|
||||
for i in range(len(rx_skb_list)):
|
||||
rec_data = rx_skb_list[i]
|
||||
if rec_data['skbaddr'] == skbaddr:
|
||||
rec_data.update({'handle':"kfree_skb",
|
||||
'comm':comm, 'pid':pid, 'comm_t':time})
|
||||
del rx_skb_list[i]
|
||||
return
|
||||
|
||||
def handle_consume_skb(event_info):
|
||||
(name, context, cpu, time, pid, comm, skbaddr) = event_info
|
||||
for i in range(len(tx_xmit_list)):
|
||||
skb = tx_xmit_list[i]
|
||||
if skb['skbaddr'] == skbaddr:
|
||||
skb['free_t'] = time
|
||||
tx_free_list.append(skb)
|
||||
del tx_xmit_list[i]
|
||||
return
|
||||
|
||||
def handle_skb_copy_datagram_iovec(event_info):
|
||||
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
|
||||
for i in range(len(rx_skb_list)):
|
||||
rec_data = rx_skb_list[i]
|
||||
if skbaddr == rec_data['skbaddr']:
|
||||
rec_data.update({'handle':"skb_copy_datagram_iovec",
|
||||
'comm':comm, 'pid':pid, 'comm_t':time})
|
||||
del rx_skb_list[i]
|
||||
return
|
||||
460
tools/perf/scripts/python/sched-migration.py
Normal file
460
tools/perf/scripts/python/sched-migration.py
Normal file
|
|
@ -0,0 +1,460 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Cpu task migration overview toy
|
||||
#
|
||||
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
#
|
||||
# perf script event handlers have been generated by perf script -g python
|
||||
#
|
||||
# This software is distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from collections import defaultdict
|
||||
from UserList import UserList
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from SchedGui import *
|
||||
|
||||
|
||||
threads = { 0 : "idle"}
|
||||
|
||||
def thread_name(pid):
|
||||
return "%s:%d" % (threads[pid], pid)
|
||||
|
||||
class RunqueueEventUnknown:
|
||||
@staticmethod
|
||||
def color():
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
return "unknown"
|
||||
|
||||
class RunqueueEventSleep:
|
||||
@staticmethod
|
||||
def color():
|
||||
return (0, 0, 0xff)
|
||||
|
||||
def __init__(self, sleeper):
|
||||
self.sleeper = sleeper
|
||||
|
||||
def __repr__(self):
|
||||
return "%s gone to sleep" % thread_name(self.sleeper)
|
||||
|
||||
class RunqueueEventWakeup:
|
||||
@staticmethod
|
||||
def color():
|
||||
return (0xff, 0xff, 0)
|
||||
|
||||
def __init__(self, wakee):
|
||||
self.wakee = wakee
|
||||
|
||||
def __repr__(self):
|
||||
return "%s woke up" % thread_name(self.wakee)
|
||||
|
||||
class RunqueueEventFork:
|
||||
@staticmethod
|
||||
def color():
|
||||
return (0, 0xff, 0)
|
||||
|
||||
def __init__(self, child):
|
||||
self.child = child
|
||||
|
||||
def __repr__(self):
|
||||
return "new forked task %s" % thread_name(self.child)
|
||||
|
||||
class RunqueueMigrateIn:
|
||||
@staticmethod
|
||||
def color():
|
||||
return (0, 0xf0, 0xff)
|
||||
|
||||
def __init__(self, new):
|
||||
self.new = new
|
||||
|
||||
def __repr__(self):
|
||||
return "task migrated in %s" % thread_name(self.new)
|
||||
|
||||
class RunqueueMigrateOut:
|
||||
@staticmethod
|
||||
def color():
|
||||
return (0xff, 0, 0xff)
|
||||
|
||||
def __init__(self, old):
|
||||
self.old = old
|
||||
|
||||
def __repr__(self):
|
||||
return "task migrated out %s" % thread_name(self.old)
|
||||
|
||||
class RunqueueSnapshot:
|
||||
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
|
||||
self.tasks = tuple(tasks)
|
||||
self.event = event
|
||||
|
||||
def sched_switch(self, prev, prev_state, next):
|
||||
event = RunqueueEventUnknown()
|
||||
|
||||
if taskState(prev_state) == "R" and next in self.tasks \
|
||||
and prev in self.tasks:
|
||||
return self
|
||||
|
||||
if taskState(prev_state) != "R":
|
||||
event = RunqueueEventSleep(prev)
|
||||
|
||||
next_tasks = list(self.tasks[:])
|
||||
if prev in self.tasks:
|
||||
if taskState(prev_state) != "R":
|
||||
next_tasks.remove(prev)
|
||||
elif taskState(prev_state) == "R":
|
||||
next_tasks.append(prev)
|
||||
|
||||
if next not in next_tasks:
|
||||
next_tasks.append(next)
|
||||
|
||||
return RunqueueSnapshot(next_tasks, event)
|
||||
|
||||
def migrate_out(self, old):
|
||||
if old not in self.tasks:
|
||||
return self
|
||||
next_tasks = [task for task in self.tasks if task != old]
|
||||
|
||||
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
|
||||
|
||||
def __migrate_in(self, new, event):
|
||||
if new in self.tasks:
|
||||
self.event = event
|
||||
return self
|
||||
next_tasks = self.tasks[:] + tuple([new])
|
||||
|
||||
return RunqueueSnapshot(next_tasks, event)
|
||||
|
||||
def migrate_in(self, new):
|
||||
return self.__migrate_in(new, RunqueueMigrateIn(new))
|
||||
|
||||
def wake_up(self, new):
|
||||
return self.__migrate_in(new, RunqueueEventWakeup(new))
|
||||
|
||||
def wake_up_new(self, new):
|
||||
return self.__migrate_in(new, RunqueueEventFork(new))
|
||||
|
||||
def load(self):
|
||||
""" Provide the number of tasks on the runqueue.
|
||||
Don't count idle"""
|
||||
return len(self.tasks) - 1
|
||||
|
||||
def __repr__(self):
|
||||
ret = self.tasks.__repr__()
|
||||
ret += self.origin_tostring()
|
||||
|
||||
return ret
|
||||
|
||||
class TimeSlice:
|
||||
def __init__(self, start, prev):
|
||||
self.start = start
|
||||
self.prev = prev
|
||||
self.end = start
|
||||
# cpus that triggered the event
|
||||
self.event_cpus = []
|
||||
if prev is not None:
|
||||
self.total_load = prev.total_load
|
||||
self.rqs = prev.rqs.copy()
|
||||
else:
|
||||
self.rqs = defaultdict(RunqueueSnapshot)
|
||||
self.total_load = 0
|
||||
|
||||
def __update_total_load(self, old_rq, new_rq):
|
||||
diff = new_rq.load() - old_rq.load()
|
||||
self.total_load += diff
|
||||
|
||||
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
|
||||
old_rq = self.prev.rqs[cpu]
|
||||
new_rq = old_rq.sched_switch(prev, prev_state, next)
|
||||
|
||||
if old_rq is new_rq:
|
||||
return
|
||||
|
||||
self.rqs[cpu] = new_rq
|
||||
self.__update_total_load(old_rq, new_rq)
|
||||
ts_list.append(self)
|
||||
self.event_cpus = [cpu]
|
||||
|
||||
def migrate(self, ts_list, new, old_cpu, new_cpu):
|
||||
if old_cpu == new_cpu:
|
||||
return
|
||||
old_rq = self.prev.rqs[old_cpu]
|
||||
out_rq = old_rq.migrate_out(new)
|
||||
self.rqs[old_cpu] = out_rq
|
||||
self.__update_total_load(old_rq, out_rq)
|
||||
|
||||
new_rq = self.prev.rqs[new_cpu]
|
||||
in_rq = new_rq.migrate_in(new)
|
||||
self.rqs[new_cpu] = in_rq
|
||||
self.__update_total_load(new_rq, in_rq)
|
||||
|
||||
ts_list.append(self)
|
||||
|
||||
if old_rq is not out_rq:
|
||||
self.event_cpus.append(old_cpu)
|
||||
self.event_cpus.append(new_cpu)
|
||||
|
||||
def wake_up(self, ts_list, pid, cpu, fork):
|
||||
old_rq = self.prev.rqs[cpu]
|
||||
if fork:
|
||||
new_rq = old_rq.wake_up_new(pid)
|
||||
else:
|
||||
new_rq = old_rq.wake_up(pid)
|
||||
|
||||
if new_rq is old_rq:
|
||||
return
|
||||
self.rqs[cpu] = new_rq
|
||||
self.__update_total_load(old_rq, new_rq)
|
||||
ts_list.append(self)
|
||||
self.event_cpus = [cpu]
|
||||
|
||||
def next(self, t):
|
||||
self.end = t
|
||||
return TimeSlice(t, self)
|
||||
|
||||
class TimeSliceList(UserList):
|
||||
def __init__(self, arg = []):
|
||||
self.data = arg
|
||||
|
||||
def get_time_slice(self, ts):
|
||||
if len(self.data) == 0:
|
||||
slice = TimeSlice(ts, TimeSlice(-1, None))
|
||||
else:
|
||||
slice = self.data[-1].next(ts)
|
||||
return slice
|
||||
|
||||
def find_time_slice(self, ts):
|
||||
start = 0
|
||||
end = len(self.data)
|
||||
found = -1
|
||||
searching = True
|
||||
while searching:
|
||||
if start == end or start == end - 1:
|
||||
searching = False
|
||||
|
||||
i = (end + start) / 2
|
||||
if self.data[i].start <= ts and self.data[i].end >= ts:
|
||||
found = i
|
||||
end = i
|
||||
continue
|
||||
|
||||
if self.data[i].end < ts:
|
||||
start = i
|
||||
|
||||
elif self.data[i].start > ts:
|
||||
end = i
|
||||
|
||||
return found
|
||||
|
||||
def set_root_win(self, win):
|
||||
self.root_win = win
|
||||
|
||||
def mouse_down(self, cpu, t):
|
||||
idx = self.find_time_slice(t)
|
||||
if idx == -1:
|
||||
return
|
||||
|
||||
ts = self[idx]
|
||||
rq = ts.rqs[cpu]
|
||||
raw = "CPU: %d\n" % cpu
|
||||
raw += "Last event : %s\n" % rq.event.__repr__()
|
||||
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
|
||||
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
|
||||
raw += "Load = %d\n" % rq.load()
|
||||
for t in rq.tasks:
|
||||
raw += "%s \n" % thread_name(t)
|
||||
|
||||
self.root_win.update_summary(raw)
|
||||
|
||||
def update_rectangle_cpu(self, slice, cpu):
|
||||
rq = slice.rqs[cpu]
|
||||
|
||||
if slice.total_load != 0:
|
||||
load_rate = rq.load() / float(slice.total_load)
|
||||
else:
|
||||
load_rate = 0
|
||||
|
||||
red_power = int(0xff - (0xff * load_rate))
|
||||
color = (0xff, red_power, red_power)
|
||||
|
||||
top_color = None
|
||||
|
||||
if cpu in slice.event_cpus:
|
||||
top_color = rq.event.color()
|
||||
|
||||
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
|
||||
|
||||
def fill_zone(self, start, end):
|
||||
i = self.find_time_slice(start)
|
||||
if i == -1:
|
||||
return
|
||||
|
||||
for i in xrange(i, len(self.data)):
|
||||
timeslice = self.data[i]
|
||||
if timeslice.start > end:
|
||||
return
|
||||
|
||||
for cpu in timeslice.rqs:
|
||||
self.update_rectangle_cpu(timeslice, cpu)
|
||||
|
||||
def interval(self):
|
||||
if len(self.data) == 0:
|
||||
return (0, 0)
|
||||
|
||||
return (self.data[0].start, self.data[-1].end)
|
||||
|
||||
def nr_rectangles(self):
|
||||
last_ts = self.data[-1]
|
||||
max_cpu = 0
|
||||
for cpu in last_ts.rqs:
|
||||
if cpu > max_cpu:
|
||||
max_cpu = cpu
|
||||
return max_cpu
|
||||
|
||||
|
||||
class SchedEventProxy:
|
||||
def __init__(self):
|
||||
self.current_tsk = defaultdict(lambda : -1)
|
||||
self.timeslices = TimeSliceList()
|
||||
|
||||
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
|
||||
next_comm, next_pid, next_prio):
|
||||
""" Ensure the task we sched out this cpu is really the one
|
||||
we logged. Otherwise we may have missed traces """
|
||||
|
||||
on_cpu_task = self.current_tsk[headers.cpu]
|
||||
|
||||
if on_cpu_task != -1 and on_cpu_task != prev_pid:
|
||||
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
|
||||
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
|
||||
|
||||
threads[prev_pid] = prev_comm
|
||||
threads[next_pid] = next_comm
|
||||
self.current_tsk[headers.cpu] = next_pid
|
||||
|
||||
ts = self.timeslices.get_time_slice(headers.ts())
|
||||
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
|
||||
|
||||
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
|
||||
ts = self.timeslices.get_time_slice(headers.ts())
|
||||
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
|
||||
|
||||
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
|
||||
if success == 0:
|
||||
return
|
||||
ts = self.timeslices.get_time_slice(headers.ts())
|
||||
ts.wake_up(self.timeslices, pid, target_cpu, fork)
|
||||
|
||||
|
||||
def trace_begin():
|
||||
global parser
|
||||
parser = SchedEventProxy()
|
||||
|
||||
def trace_end():
|
||||
app = wx.App(False)
|
||||
timeslices = parser.timeslices
|
||||
frame = RootFrame(timeslices, "Migration")
|
||||
app.MainLoop()
|
||||
|
||||
def sched__sched_stat_runtime(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, runtime, vruntime):
|
||||
pass
|
||||
|
||||
def sched__sched_stat_iowait(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, delay):
|
||||
pass
|
||||
|
||||
def sched__sched_stat_sleep(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, delay):
|
||||
pass
|
||||
|
||||
def sched__sched_stat_wait(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, delay):
|
||||
pass
|
||||
|
||||
def sched__sched_process_fork(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, parent_comm, parent_pid, child_comm, child_pid):
|
||||
pass
|
||||
|
||||
def sched__sched_process_wait(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio):
|
||||
pass
|
||||
|
||||
def sched__sched_process_exit(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio):
|
||||
pass
|
||||
|
||||
def sched__sched_process_free(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio):
|
||||
pass
|
||||
|
||||
def sched__sched_migrate_task(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio, orig_cpu,
|
||||
dest_cpu):
|
||||
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm, common_callchain)
|
||||
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
|
||||
|
||||
def sched__sched_switch(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm, common_callchain,
|
||||
prev_comm, prev_pid, prev_prio, prev_state,
|
||||
next_comm, next_pid, next_prio):
|
||||
|
||||
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm, common_callchain)
|
||||
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
|
||||
next_comm, next_pid, next_prio)
|
||||
|
||||
def sched__sched_wakeup_new(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio, success,
|
||||
target_cpu):
|
||||
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm, common_callchain)
|
||||
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
|
||||
|
||||
def sched__sched_wakeup(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio, success,
|
||||
target_cpu):
|
||||
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm, common_callchain)
|
||||
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
|
||||
|
||||
def sched__sched_wait_task(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid, prio):
|
||||
pass
|
||||
|
||||
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, ret):
|
||||
pass
|
||||
|
||||
def sched__sched_kthread_stop(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, comm, pid):
|
||||
pass
|
||||
|
||||
def trace_unhandled(event_name, context, event_fields_dict):
|
||||
pass
|
||||
80
tools/perf/scripts/python/sctop.py
Normal file
80
tools/perf/scripts/python/sctop.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# system call top
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Periodically displays system-wide system call totals, broken down by
|
||||
# syscall. If a [comm] arg is specified, only syscalls called by
|
||||
# [comm] are displayed. If an [interval] arg is specified, the display
|
||||
# will be refreshed every [interval] seconds. The default interval is
|
||||
# 3 seconds.
|
||||
|
||||
import os, sys, thread, time
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import *
|
||||
|
||||
usage = "perf script -s sctop.py [comm] [interval]\n";
|
||||
|
||||
for_comm = None
|
||||
default_interval = 3
|
||||
interval = default_interval
|
||||
|
||||
if len(sys.argv) > 3:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
for_comm = sys.argv[1]
|
||||
interval = int(sys.argv[2])
|
||||
elif len(sys.argv) > 1:
|
||||
try:
|
||||
interval = int(sys.argv[1])
|
||||
except ValueError:
|
||||
for_comm = sys.argv[1]
|
||||
interval = default_interval
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
thread.start_new_thread(print_syscall_totals, (interval,))
|
||||
pass
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, id, args):
|
||||
if for_comm is not None:
|
||||
if common_comm != for_comm:
|
||||
return
|
||||
try:
|
||||
syscalls[id] += 1
|
||||
except TypeError:
|
||||
syscalls[id] = 1
|
||||
|
||||
def syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
raw_syscalls__sys_enter(**locals())
|
||||
|
||||
def print_syscall_totals(interval):
|
||||
while 1:
|
||||
clear_term()
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"----------"),
|
||||
|
||||
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
|
||||
reverse = True):
|
||||
try:
|
||||
print "%-40s %10d\n" % (syscall_name(id), val),
|
||||
except TypeError:
|
||||
pass
|
||||
syscalls.clear()
|
||||
time.sleep(interval)
|
||||
74
tools/perf/scripts/python/syscall-counts-by-pid.py
Normal file
74
tools/perf/scripts/python/syscall-counts-by-pid.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# system call counts, by pid
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide system call totals, broken down by syscall.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os, sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import syscall_name
|
||||
|
||||
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
|
||||
|
||||
for_comm = None
|
||||
for_pid = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
try:
|
||||
for_pid = int(sys.argv[1])
|
||||
except:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
print "Press control+C to stop and show the summary"
|
||||
|
||||
def trace_end():
|
||||
print_syscall_totals()
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, id, args):
|
||||
|
||||
if (for_comm and common_comm != for_comm) or \
|
||||
(for_pid and common_pid != for_pid ):
|
||||
return
|
||||
try:
|
||||
syscalls[common_comm][common_pid][id] += 1
|
||||
except TypeError:
|
||||
syscalls[common_comm][common_pid][id] = 1
|
||||
|
||||
def syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
raw_syscalls__sys_enter(**locals())
|
||||
|
||||
def print_syscall_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events by comm/pid:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"----------"),
|
||||
|
||||
comm_keys = syscalls.keys()
|
||||
for comm in comm_keys:
|
||||
pid_keys = syscalls[comm].keys()
|
||||
for pid in pid_keys:
|
||||
print "\n%s [%d]\n" % (comm, pid),
|
||||
id_keys = syscalls[comm][pid].keys()
|
||||
for id, val in sorted(syscalls[comm][pid].iteritems(), \
|
||||
key = lambda(k, v): (v, k), reverse = True):
|
||||
print " %-38s %10d\n" % (syscall_name(id), val),
|
||||
64
tools/perf/scripts/python/syscall-counts.py
Normal file
64
tools/perf/scripts/python/syscall-counts.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
# system call counts
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide system call totals, broken down by syscall.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import syscall_name
|
||||
|
||||
usage = "perf script -s syscall-counts.py [comm]\n";
|
||||
|
||||
for_comm = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
print "Press control+C to stop and show the summary"
|
||||
|
||||
def trace_end():
|
||||
print_syscall_totals()
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
common_callchain, id, args):
|
||||
if for_comm is not None:
|
||||
if common_comm != for_comm:
|
||||
return
|
||||
try:
|
||||
syscalls[id] += 1
|
||||
except TypeError:
|
||||
syscalls[id] = 1
|
||||
|
||||
def syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
raw_syscalls__sys_enter(**locals())
|
||||
|
||||
def print_syscall_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"-----------"),
|
||||
|
||||
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
|
||||
reverse = True):
|
||||
print "%-40s %10d\n" % (syscall_name(id), val),
|
||||
Loading…
Add table
Add a link
Reference in a new issue