Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,118 @@
/*
* Trace files that want to automate creation of all tracepoints defined
* in their file should include this file. The following are macros that the
* trace file may define:
*
* TRACE_SYSTEM defines the system the tracepoint is for
*
* TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
* This macro may be defined to tell define_trace.h what file to include.
* Note, leave off the ".h".
*
* TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
* then this macro can define the path to use. Note, the path is relative to
* define_trace.h, not the file including it. Full path names for out of tree
* modules must be used.
*/
#ifdef CREATE_TRACE_POINTS
/* Prevent recursion */
#undef CREATE_TRACE_POINTS
#include <linux/stringify.h>
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DEFINE_TRACE(name)
#undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
TRACE_EVENT(name, \
PARAMS(proto), \
PARAMS(args), \
PARAMS(tstruct), \
PARAMS(assign), \
PARAMS(print))
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
#undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_TRACE(name)
#undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name)
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
#ifndef TRACE_INCLUDE_FILE
# define TRACE_INCLUDE_FILE TRACE_SYSTEM
# define UNDEF_TRACE_INCLUDE_FILE
#endif
#ifndef TRACE_INCLUDE_PATH
# define __TRACE_INCLUDE(system) <trace/events/system.h>
# define UNDEF_TRACE_INCLUDE_PATH
#else
# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
#endif
# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
/* Let the trace headers be reread */
#define TRACE_HEADER_MULTI_READ
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/* Make all open coded DECLARE_TRACE nops */
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
#ifdef CONFIG_EVENT_TRACING
#include <trace/ftrace.h>
#endif
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_CONDITION
#undef DECLARE_EVENT_CLASS
#undef DEFINE_EVENT
#undef DEFINE_EVENT_FN
#undef DEFINE_EVENT_PRINT
#undef DEFINE_EVENT_CONDITION
#undef TRACE_HEADER_MULTI_READ
#undef DECLARE_TRACE
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
# undef TRACE_INCLUDE_FILE
# undef UNDEF_TRACE_INCLUDE_FILE
#endif
#ifdef UNDEF_TRACE_INCLUDE_PATH
# undef TRACE_INCLUDE_PATH
# undef UNDEF_TRACE_INCLUDE_PATH
#endif
/* We may be processing more files */
#define CREATE_TRACE_POINTS
#endif /* CREATE_TRACE_POINTS */

154
include/trace/events/9p.h Normal file
View file

@ -0,0 +1,154 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM 9p
#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_9P_H
#include <linux/tracepoint.h>
#define show_9p_op(type) \
__print_symbolic(type, \
{ P9_TLERROR, "P9_TLERROR" }, \
{ P9_RLERROR, "P9_RLERROR" }, \
{ P9_TSTATFS, "P9_TSTATFS" }, \
{ P9_RSTATFS, "P9_RSTATFS" }, \
{ P9_TLOPEN, "P9_TLOPEN" }, \
{ P9_RLOPEN, "P9_RLOPEN" }, \
{ P9_TLCREATE, "P9_TLCREATE" }, \
{ P9_RLCREATE, "P9_RLCREATE" }, \
{ P9_TSYMLINK, "P9_TSYMLINK" }, \
{ P9_RSYMLINK, "P9_RSYMLINK" }, \
{ P9_TMKNOD, "P9_TMKNOD" }, \
{ P9_RMKNOD, "P9_RMKNOD" }, \
{ P9_TRENAME, "P9_TRENAME" }, \
{ P9_RRENAME, "P9_RRENAME" }, \
{ P9_TREADLINK, "P9_TREADLINK" }, \
{ P9_RREADLINK, "P9_RREADLINK" }, \
{ P9_TGETATTR, "P9_TGETATTR" }, \
{ P9_RGETATTR, "P9_RGETATTR" }, \
{ P9_TSETATTR, "P9_TSETATTR" }, \
{ P9_RSETATTR, "P9_RSETATTR" }, \
{ P9_TXATTRWALK, "P9_TXATTRWALK" }, \
{ P9_RXATTRWALK, "P9_RXATTRWALK" }, \
{ P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
{ P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
{ P9_TREADDIR, "P9_TREADDIR" }, \
{ P9_RREADDIR, "P9_RREADDIR" }, \
{ P9_TFSYNC, "P9_TFSYNC" }, \
{ P9_RFSYNC, "P9_RFSYNC" }, \
{ P9_TLOCK, "P9_TLOCK" }, \
{ P9_RLOCK, "P9_RLOCK" }, \
{ P9_TGETLOCK, "P9_TGETLOCK" }, \
{ P9_RGETLOCK, "P9_RGETLOCK" }, \
{ P9_TLINK, "P9_TLINK" }, \
{ P9_RLINK, "P9_RLINK" }, \
{ P9_TMKDIR, "P9_TMKDIR" }, \
{ P9_RMKDIR, "P9_RMKDIR" }, \
{ P9_TRENAMEAT, "P9_TRENAMEAT" }, \
{ P9_RRENAMEAT, "P9_RRENAMEAT" }, \
{ P9_TUNLINKAT, "P9_TUNLINKAT" }, \
{ P9_RUNLINKAT, "P9_RUNLINKAT" }, \
{ P9_TVERSION, "P9_TVERSION" }, \
{ P9_RVERSION, "P9_RVERSION" }, \
{ P9_TAUTH, "P9_TAUTH" }, \
{ P9_RAUTH, "P9_RAUTH" }, \
{ P9_TATTACH, "P9_TATTACH" }, \
{ P9_RATTACH, "P9_RATTACH" }, \
{ P9_TERROR, "P9_TERROR" }, \
{ P9_RERROR, "P9_RERROR" }, \
{ P9_TFLUSH, "P9_TFLUSH" }, \
{ P9_RFLUSH, "P9_RFLUSH" }, \
{ P9_TWALK, "P9_TWALK" }, \
{ P9_RWALK, "P9_RWALK" }, \
{ P9_TOPEN, "P9_TOPEN" }, \
{ P9_ROPEN, "P9_ROPEN" }, \
{ P9_TCREATE, "P9_TCREATE" }, \
{ P9_RCREATE, "P9_RCREATE" }, \
{ P9_TREAD, "P9_TREAD" }, \
{ P9_RREAD, "P9_RREAD" }, \
{ P9_TWRITE, "P9_TWRITE" }, \
{ P9_RWRITE, "P9_RWRITE" }, \
{ P9_TCLUNK, "P9_TCLUNK" }, \
{ P9_RCLUNK, "P9_RCLUNK" }, \
{ P9_TREMOVE, "P9_TREMOVE" }, \
{ P9_RREMOVE, "P9_RREMOVE" }, \
{ P9_TSTAT, "P9_TSTAT" }, \
{ P9_RSTAT, "P9_RSTAT" }, \
{ P9_TWSTAT, "P9_TWSTAT" }, \
{ P9_RWSTAT, "P9_RWSTAT" })
TRACE_EVENT(9p_client_req,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
TP_ARGS(clnt, type, tag),
TP_STRUCT__entry(
__field( void *, clnt )
__field( __u8, type )
__field( __u32, tag )
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = type;
__entry->tag = tag;
),
TP_printk("client %lu request %s tag %d",
(long)__entry->clnt, show_9p_op(__entry->type),
__entry->tag)
);
TRACE_EVENT(9p_client_res,
TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err),
TP_ARGS(clnt, type, tag, err),
TP_STRUCT__entry(
__field( void *, clnt )
__field( __u8, type )
__field( __u32, tag )
__field( __u32, err )
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = type;
__entry->tag = tag;
__entry->err = err;
),
TP_printk("client %lu response %s tag %d err %d",
(long)__entry->clnt, show_9p_op(__entry->type),
__entry->tag, __entry->err)
);
/* dump 32 bytes of protocol data */
#define P9_PROTO_DUMP_SZ 32
TRACE_EVENT(9p_protocol_dump,
TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu),
TP_ARGS(clnt, pdu),
TP_STRUCT__entry(
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
__array( unsigned char, line, P9_PROTO_DUMP_SZ )
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
),
TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
__entry->tag, 0, __entry->line, 16, __entry->line + 16)
);
#endif /* _TRACE_9P_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

319
include/trace/events/asoc.h Normal file
View file

@ -0,0 +1,319 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM asoc
#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ASOC_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#define DAPM_DIRECT "(direct)"
struct snd_soc_jack;
struct snd_soc_codec;
struct snd_soc_card;
struct snd_soc_dapm_widget;
struct snd_soc_dapm_path;
DECLARE_EVENT_CLASS(snd_soc_card,
TP_PROTO(struct snd_soc_card *card, int val),
TP_ARGS(card, val),
TP_STRUCT__entry(
__string( name, card->name )
__field( int, val )
),
TP_fast_assign(
__assign_str(name, card->name);
__entry->val = val;
),
TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
);
DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
TP_PROTO(struct snd_soc_card *card, int val),
TP_ARGS(card, val)
);
DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
TP_PROTO(struct snd_soc_card *card, int val),
TP_ARGS(card, val)
);
DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
TP_PROTO(struct snd_soc_card *card),
TP_ARGS(card),
TP_STRUCT__entry(
__string( name, card->name )
),
TP_fast_assign(
__assign_str(name, card->name);
),
TP_printk("card=%s", __get_str(name))
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
TP_PROTO(struct snd_soc_card *card),
TP_ARGS(card)
);
DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
TP_PROTO(struct snd_soc_card *card),
TP_ARGS(card)
);
DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
TP_PROTO(struct snd_soc_dapm_widget *w, int val),
TP_ARGS(w, val),
TP_STRUCT__entry(
__string( name, w->name )
__field( int, val )
),
TP_fast_assign(
__assign_str(name, w->name);
__entry->val = val;
),
TP_printk("widget=%s val=%d", __get_str(name),
(int)__entry->val)
);
DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
TP_PROTO(struct snd_soc_dapm_widget *w, int val),
TP_ARGS(w, val)
);
DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
TP_PROTO(struct snd_soc_dapm_widget *w, int val),
TP_ARGS(w, val)
);
DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
TP_PROTO(struct snd_soc_dapm_widget *w, int val),
TP_ARGS(w, val)
);
TRACE_EVENT(snd_soc_dapm_walk_done,
TP_PROTO(struct snd_soc_card *card),
TP_ARGS(card),
TP_STRUCT__entry(
__string( name, card->name )
__field( int, power_checks )
__field( int, path_checks )
__field( int, neighbour_checks )
),
TP_fast_assign(
__assign_str(name, card->name);
__entry->power_checks = card->dapm_stats.power_checks;
__entry->path_checks = card->dapm_stats.path_checks;
__entry->neighbour_checks = card->dapm_stats.neighbour_checks;
),
TP_printk("%s: checks %d power, %d path, %d neighbour",
__get_str(name), (int)__entry->power_checks,
(int)__entry->path_checks, (int)__entry->neighbour_checks)
);
TRACE_EVENT(snd_soc_dapm_output_path,
TP_PROTO(struct snd_soc_dapm_widget *widget,
struct snd_soc_dapm_path *path),
TP_ARGS(widget, path),
TP_STRUCT__entry(
__string( wname, widget->name )
__string( pname, path->name ? path->name : DAPM_DIRECT)
__string( psname, path->sink->name )
__field( int, path_sink )
__field( int, path_connect )
),
TP_fast_assign(
__assign_str(wname, widget->name);
__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
__assign_str(psname, path->sink->name);
__entry->path_connect = path->connect;
__entry->path_sink = (long)path->sink;
),
TP_printk("%c%s -> %s -> %s",
(int) __entry->path_sink &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
);
TRACE_EVENT(snd_soc_dapm_input_path,
TP_PROTO(struct snd_soc_dapm_widget *widget,
struct snd_soc_dapm_path *path),
TP_ARGS(widget, path),
TP_STRUCT__entry(
__string( wname, widget->name )
__string( pname, path->name ? path->name : DAPM_DIRECT)
__string( psname, path->source->name )
__field( int, path_source )
__field( int, path_connect )
),
TP_fast_assign(
__assign_str(wname, widget->name);
__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
__assign_str(psname, path->source->name);
__entry->path_connect = path->connect;
__entry->path_source = (long)path->source;
),
TP_printk("%c%s <- %s <- %s",
(int) __entry->path_source &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
);
TRACE_EVENT(snd_soc_dapm_connected,
TP_PROTO(int paths, int stream),
TP_ARGS(paths, stream),
TP_STRUCT__entry(
__field( int, paths )
__field( int, stream )
),
TP_fast_assign(
__entry->paths = paths;
__entry->stream = stream;
),
TP_printk("%s: found %d paths",
__entry->stream ? "capture" : "playback", __entry->paths)
);
TRACE_EVENT(snd_soc_jack_irq,
TP_PROTO(const char *name),
TP_ARGS(name),
TP_STRUCT__entry(
__string( name, name )
),
TP_fast_assign(
__assign_str(name, name);
),
TP_printk("%s", __get_str(name))
);
TRACE_EVENT(snd_soc_jack_report,
TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
TP_ARGS(jack, mask, val),
TP_STRUCT__entry(
__string( name, jack->jack->name )
__field( int, mask )
__field( int, val )
),
TP_fast_assign(
__assign_str(name, jack->jack->name);
__entry->mask = mask;
__entry->val = val;
),
TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
(int)__entry->mask)
);
TRACE_EVENT(snd_soc_jack_notify,
TP_PROTO(struct snd_soc_jack *jack, int val),
TP_ARGS(jack, val),
TP_STRUCT__entry(
__string( name, jack->jack->name )
__field( int, val )
),
TP_fast_assign(
__assign_str(name, jack->jack->name);
__entry->val = val;
),
TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
);
TRACE_EVENT(snd_soc_cache_sync,
TP_PROTO(struct snd_soc_codec *codec, const char *type,
const char *status),
TP_ARGS(codec, type, status),
TP_STRUCT__entry(
__string( name, codec->component.name)
__string( status, status )
__string( type, type )
__field( int, id )
),
TP_fast_assign(
__assign_str(name, codec->component.name);
__assign_str(status, status);
__assign_str(type, type);
__entry->id = codec->component.id;
),
TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
(int)__entry->id, __get_str(type), __get_str(status))
);
#endif /* _TRACE_ASOC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,482 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM bcache
#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BCACHE_H
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(bcache_request,
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(unsigned int, orig_major )
__field(unsigned int, orig_minor )
__field(sector_t, sector )
__field(dev_t, orig_sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->orig_major = d->disk->major;
__entry->orig_minor = d->disk->first_minor;
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, (unsigned long long)__entry->sector,
__entry->nr_sector, __entry->orig_major, __entry->orig_minor,
(unsigned long long)__entry->orig_sector)
);
DECLARE_EVENT_CLASS(bkey,
TP_PROTO(struct bkey *k),
TP_ARGS(k),
TP_STRUCT__entry(
__field(u32, size )
__field(u32, inode )
__field(u64, offset )
__field(bool, dirty )
),
TP_fast_assign(
__entry->inode = KEY_INODE(k);
__entry->offset = KEY_OFFSET(k);
__entry->size = KEY_SIZE(k);
__entry->dirty = KEY_DIRTY(k);
),
TP_printk("%u:%llu len %u dirty %u", __entry->inode,
__entry->offset, __entry->size, __entry->dirty)
);
DECLARE_EVENT_CLASS(btree_node,
TP_PROTO(struct btree *b),
TP_ARGS(b),
TP_STRUCT__entry(
__field(size_t, bucket )
),
TP_fast_assign(
__entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
),
TP_printk("bucket %zu", __entry->bucket)
);
/* request.c */
DEFINE_EVENT(bcache_request, bcache_request_start,
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
DEFINE_EVENT(bcache_request, bcache_request_end,
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
DECLARE_EVENT_CLASS(bcache_bio,
TP_PROTO(struct bio *bio),
TP_ARGS(bio),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(sector_t, sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector, __entry->nr_sector)
);
DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
TRACE_EVENT(bcache_read,
TP_PROTO(struct bio *bio, bool hit, bool bypass),
TP_ARGS(bio, hit, bypass),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(sector_t, sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
__field(bool, cache_hit )
__field(bool, bypass )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
__entry->cache_hit = hit;
__entry->bypass = bypass;
),
TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, (unsigned long long)__entry->sector,
__entry->nr_sector, __entry->cache_hit, __entry->bypass)
);
TRACE_EVENT(bcache_write,
TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
bool writeback, bool bypass),
TP_ARGS(c, inode, bio, writeback, bypass),
TP_STRUCT__entry(
__array(char, uuid, 16 )
__field(u64, inode )
__field(sector_t, sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
__field(bool, writeback )
__field(bool, bypass )
),
TP_fast_assign(
memcpy(__entry->uuid, c->sb.set_uuid, 16);
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
__entry->writeback = writeback;
__entry->bypass = bypass;
),
TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
__entry->uuid, __entry->inode,
__entry->rwbs, (unsigned long long)__entry->sector,
__entry->nr_sector, __entry->writeback, __entry->bypass)
);
DEFINE_EVENT(bcache_bio, bcache_read_retry,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
DEFINE_EVENT(bkey, bcache_cache_insert,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
/* Journal */
DECLARE_EVENT_CLASS(cache_set,
TP_PROTO(struct cache_set *c),
TP_ARGS(c),
TP_STRUCT__entry(
__array(char, uuid, 16 )
),
TP_fast_assign(
memcpy(__entry->uuid, c->sb.set_uuid, 16);
),
TP_printk("%pU", __entry->uuid)
);
DEFINE_EVENT(bkey, bcache_journal_replay_key,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
DEFINE_EVENT(cache_set, bcache_journal_full,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_journal_entry_full,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(bcache_bio, bcache_journal_write,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
/* Btree */
DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(btree_node, bcache_btree_read,
TP_PROTO(struct btree *b),
TP_ARGS(b)
);
TRACE_EVENT(bcache_btree_write,
TP_PROTO(struct btree *b),
TP_ARGS(b),
TP_STRUCT__entry(
__field(size_t, bucket )
__field(unsigned, block )
__field(unsigned, keys )
),
TP_fast_assign(
__entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
__entry->block = b->written;
__entry->keys = b->keys.set[b->keys.nsets].data->keys;
),
TP_printk("bucket %zu", __entry->bucket)
);
DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
TP_PROTO(struct btree *b),
TP_ARGS(b)
);
DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(btree_node, bcache_btree_node_free,
TP_PROTO(struct btree *b),
TP_ARGS(b)
);
TRACE_EVENT(bcache_btree_gc_coalesce,
TP_PROTO(unsigned nodes),
TP_ARGS(nodes),
TP_STRUCT__entry(
__field(unsigned, nodes )
),
TP_fast_assign(
__entry->nodes = nodes;
),
TP_printk("coalesced %u nodes", __entry->nodes)
);
DEFINE_EVENT(cache_set, bcache_gc_start,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_end,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
DEFINE_EVENT(bkey, bcache_gc_copy,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
DEFINE_EVENT(bkey, bcache_gc_copy_collision,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
TRACE_EVENT(bcache_btree_insert_key,
TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
TP_ARGS(b, k, op, status),
TP_STRUCT__entry(
__field(u64, btree_node )
__field(u32, btree_level )
__field(u32, inode )
__field(u64, offset )
__field(u32, size )
__field(u8, dirty )
__field(u8, op )
__field(u8, status )
),
TP_fast_assign(
__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
__entry->btree_level = b->level;
__entry->inode = KEY_INODE(k);
__entry->offset = KEY_OFFSET(k);
__entry->size = KEY_SIZE(k);
__entry->dirty = KEY_DIRTY(k);
__entry->op = op;
__entry->status = status;
),
TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
__entry->status, __entry->op,
__entry->btree_node, __entry->btree_level,
__entry->inode, __entry->offset,
__entry->size, __entry->dirty)
);
DECLARE_EVENT_CLASS(btree_split,
TP_PROTO(struct btree *b, unsigned keys),
TP_ARGS(b, keys),
TP_STRUCT__entry(
__field(size_t, bucket )
__field(unsigned, keys )
),
TP_fast_assign(
__entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
__entry->keys = keys;
),
TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
);
DEFINE_EVENT(btree_split, bcache_btree_node_split,
TP_PROTO(struct btree *b, unsigned keys),
TP_ARGS(b, keys)
);
DEFINE_EVENT(btree_split, bcache_btree_node_compact,
TP_PROTO(struct btree *b, unsigned keys),
TP_ARGS(b, keys)
);
DEFINE_EVENT(btree_node, bcache_btree_set_root,
TP_PROTO(struct btree *b),
TP_ARGS(b)
);
TRACE_EVENT(bcache_keyscan,
TP_PROTO(unsigned nr_found,
unsigned start_inode, uint64_t start_offset,
unsigned end_inode, uint64_t end_offset),
TP_ARGS(nr_found,
start_inode, start_offset,
end_inode, end_offset),
TP_STRUCT__entry(
__field(__u32, nr_found )
__field(__u32, start_inode )
__field(__u64, start_offset )
__field(__u32, end_inode )
__field(__u64, end_offset )
),
TP_fast_assign(
__entry->nr_found = nr_found;
__entry->start_inode = start_inode;
__entry->start_offset = start_offset;
__entry->end_inode = end_inode;
__entry->end_offset = end_offset;
),
TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
__entry->start_inode, __entry->start_offset,
__entry->end_inode, __entry->end_offset)
);
/* Allocator */
TRACE_EVENT(bcache_invalidate,
TP_PROTO(struct cache *ca, size_t bucket),
TP_ARGS(ca, bucket),
TP_STRUCT__entry(
__field(unsigned, sectors )
__field(dev_t, dev )
__field(__u64, offset )
),
TP_fast_assign(
__entry->dev = ca->bdev->bd_dev;
__entry->offset = bucket << ca->set->bucket_bits;
__entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
),
TP_printk("invalidated %u sectors at %d,%d sector=%llu",
__entry->sectors, MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->offset)
);
TRACE_EVENT(bcache_alloc,
TP_PROTO(struct cache *ca, size_t bucket),
TP_ARGS(ca, bucket),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(__u64, offset )
),
TP_fast_assign(
__entry->dev = ca->bdev->bd_dev;
__entry->offset = bucket << ca->set->bucket_bits;
),
TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->offset)
);
TRACE_EVENT(bcache_alloc_fail,
TP_PROTO(struct cache *ca, unsigned reserve),
TP_ARGS(ca, reserve),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(unsigned, free )
__field(unsigned, free_inc )
__field(unsigned, blocked )
),
TP_fast_assign(
__entry->dev = ca->bdev->bd_dev;
__entry->free = fifo_used(&ca->free[reserve]);
__entry->free_inc = fifo_used(&ca->free_inc);
__entry->blocked = atomic_read(&ca->set->prio_blocked);
),
TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
__entry->free_inc, __entry->blocked)
);
/* Background writeback */
DEFINE_EVENT(bkey, bcache_writeback,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
DEFINE_EVENT(bkey, bcache_writeback_collision,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
#endif /* _TRACE_BCACHE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,674 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM block
#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BLOCK_H
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
#define RWBS_LEN 8
DECLARE_EVENT_CLASS(block_buffer,
TP_PROTO(struct buffer_head *bh),
TP_ARGS(bh),
TP_STRUCT__entry (
__field( dev_t, dev )
__field( sector_t, sector )
__field( size_t, size )
),
TP_fast_assign(
__entry->dev = bh->b_bdev->bd_dev;
__entry->sector = bh->b_blocknr;
__entry->size = bh->b_size;
),
TP_printk("%d,%d sector=%llu size=%zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->sector, __entry->size
)
);
/**
* block_touch_buffer - mark a buffer accessed
* @bh: buffer_head being touched
*
* Called from touch_buffer().
*/
DEFINE_EVENT(block_buffer, block_touch_buffer,
TP_PROTO(struct buffer_head *bh),
TP_ARGS(bh)
);
/**
* block_dirty_buffer - mark a buffer dirty
* @bh: buffer_head being dirtied
*
* Called from mark_buffer_dirty().
*/
DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_PROTO(struct buffer_head *bh),
TP_ARGS(bh)
);
DECLARE_EVENT_CLASS(block_rq_with_error,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int, errors )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq);
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->errors)
);
/**
* block_rq_abort - abort block operation request
* @q: queue containing the block operation request
* @rq: block IO operation request
*
* Called immediately after pending block IO operation request @rq in
* queue @q is aborted. The fields in the operation request @rq
* can be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_abort,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_requeue - place block IO request back on a queue
* @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
* @q. For some reason the request was not completed and needs to be
* put back in the queue.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_complete - block IO operation completed by device driver
* @q: queue containing the block operation request
* @rq: block operations request
* @nr_bytes: number of completed bytes
*
* The block_rq_complete tracepoint event indicates that some portion
* of operation request has been completed by the device driver. If
* the @rq->bio is %NULL, then there is absolutely no additional work to
* do for the request. If @rq->bio is non-NULL then there is
* additional work required to complete the request.
*/
TRACE_EVENT(block_rq_complete,
TP_PROTO(struct request_queue *q, struct request *rq,
unsigned int nr_bytes),
TP_ARGS(q, rq, nr_bytes),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int, errors )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
blk_dump_cmd(__get_str(cmd), rq);
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->errors)
);
DECLARE_EVENT_CLASS(block_rq,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq);
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq);
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __entry->bytes, __get_str(cmd),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_rq_insert - insert block operation request into queue
* @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
* into queue @q. The fields in the operation request @rq struct can
* be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq, block_rq_insert,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
* @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_bio_bounce - used bounce buffer when processing block operation
* @q: queue holding the block operation
* @bio: block operation
*
* A bounce buffer was used to handle the block operation @bio in @q.
* This occurs when hardware limitations prevent a direct transfer of
* data between the @bio data memory area and the IO device. Use of a
* bounce buffer requires extra copying of data and decreases
* performance.
*/
TRACE_EVENT(block_bio_bounce,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
TP_PROTO(struct request_queue *q, struct bio *bio, int error),
TP_ARGS(q, bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned, nr_sector )
__field( int, error )
__array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = error;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->error)
);
DECLARE_EVENT_CLASS(block_bio_merge,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_ARGS(q, rq, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @q: queue holding operation
* @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block request @bio to the end of an existing block request
* in queue @q.
*/
DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_ARGS(q, rq, bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
* @q: queue holding operation
* @rq: request bio is being merged into
* @bio: new block operation to merge
*
* Merging block IO operation @bio to the beginning of an existing block
* operation in queue @q.
*/
DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
TP_ARGS(q, rq, bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
* @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
TRACE_EVENT(block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
DECLARE_EVENT_CLASS(block_get_rq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_rw : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
* @q: queue for operations
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* A request struct for queue @q has been allocated to handle the
* block IO operation @bio.
*/
DEFINE_EVENT(block_get_rq, block_getrq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
);
/**
* block_sleeprq - waiting to get a free request entry in queue for block IO operation
* @q: queue for operation
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* In the case where a request struct cannot be provided for queue @q
* the process needs to wait for an request struct to become
* available. This tracepoint event is generated each time the
* process goes to sleep waiting for request struct become available.
*/
DEFINE_EVENT(block_get_rq, block_sleeprq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
);
/**
* block_plug - keep operations requests in request queue
* @q: request queue to plug
*
* Plug the request queue @q. Do not allow block operation requests
* to be sent to the device driver. Instead, accumulate requests in
* the queue to improve throughput performance of the block device.
*/
TRACE_EVENT(block_plug,
TP_PROTO(struct request_queue *q),
TP_ARGS(q),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("[%s]", __entry->comm)
);
DECLARE_EVENT_CLASS(block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit),
TP_STRUCT__entry(
__field( int, nr_rq )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->nr_rq = depth;
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
);
/**
* block_unplug - release of operations requests in request queue
* @q: request queue to unplug
* @depth: number of requests just added to the queue
* @explicit: whether this was an explicit unplug, or one from schedule()
*
* Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue.
*/
DEFINE_EVENT(block_unplug, block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit)
);
/**
* block_split - split a single bio struct into two bio structs
* @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
* The bio request @bio in request queue @q needs to be split into two
* bio requests. The newly created @bio request starts at
* @new_sector. This split may be required due to hardware limitation
* such as operation crossing device boundaries in a RAID system.
*/
TRACE_EVENT(block_split,
TP_PROTO(struct request_queue *q, struct bio *bio,
unsigned int new_sector),
TP_ARGS(q, bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( sector_t, new_sector )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu / %llu [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
(unsigned long long)__entry->new_sector,
__entry->comm)
);
/**
* block_bio_remap - map request for a logical device to the raw device
* @q: queue holding the operation
* @bio: revised operation
* @dev: device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
* raw block device.
*/
TRACE_EVENT(block_bio_remap,
TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
sector_t from),
TP_ARGS(q, bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector)
);
/**
* block_rq_remap - map request for a block operation request
* @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
*
* The block operation request @rq in @q has been remapped. The block
* operation request @rq holds the current information and @from hold
* the original sector.
*/
TRACE_EVENT(block_rq_remap,
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
sector_t from),
TP_ARGS(q, rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__field( unsigned int, nr_bios )
__array( char, rwbs, RWBS_LEN)
),
TP_fast_assign(
__entry->dev = disk_devt(rq->rq_disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector, __entry->nr_bios)
);
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

1178
include/trace/events/btrfs.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,133 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM compaction
#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_COMPACTION_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/tracepoint.h>
#include <trace/events/gfpflags.h>
DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken),
TP_STRUCT__entry(
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
),
TP_fast_assign(
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
),
TP_printk("nr_scanned=%lu nr_taken=%lu",
__entry->nr_scanned,
__entry->nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken)
);
DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_PROTO(unsigned long nr_scanned,
unsigned long nr_taken),
TP_ARGS(nr_scanned, nr_taken)
);
TRACE_EVENT(mm_compaction_migratepages,
TP_PROTO(unsigned long nr_all,
int migrate_rc,
struct list_head *migratepages),
TP_ARGS(nr_all, migrate_rc, migratepages),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_failed)
),
TP_fast_assign(
unsigned long nr_failed = 0;
struct list_head *page_lru;
/*
* migrate_pages() returns either a non-negative number
* with the number of pages that failed migration, or an
* error code, in which case we need to count the remaining
* pages manually
*/
if (migrate_rc >= 0)
nr_failed = migrate_rc;
else
list_for_each(page_lru, migratepages)
nr_failed++;
__entry->nr_migrated = nr_all - nr_failed;
__entry->nr_failed = nr_failed;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
__entry->nr_migrated,
__entry->nr_failed)
);
TRACE_EVENT(mm_compaction_begin,
TP_PROTO(unsigned long zone_start, unsigned long migrate_start,
unsigned long free_start, unsigned long zone_end),
TP_ARGS(zone_start, migrate_start, free_start, zone_end),
TP_STRUCT__entry(
__field(unsigned long, zone_start)
__field(unsigned long, migrate_start)
__field(unsigned long, free_start)
__field(unsigned long, zone_end)
),
TP_fast_assign(
__entry->zone_start = zone_start;
__entry->migrate_start = migrate_start;
__entry->free_start = free_start;
__entry->zone_end = zone_end;
),
TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu",
__entry->zone_start,
__entry->migrate_start,
__entry->free_start,
__entry->zone_end)
);
TRACE_EVENT(mm_compaction_end,
TP_PROTO(int status),
TP_ARGS(status),
TP_STRUCT__entry(
__field(int, status)
),
TP_fast_assign(
__entry->status = status;
),
TP_printk("status=%d", __entry->status)
);
#endif /* _TRACE_COMPACTION_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,58 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM context_tracking
#if !defined(_TRACE_CONTEXT_TRACKING_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_CONTEXT_TRACKING_H
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(context_tracking_user,
TP_PROTO(int dummy),
TP_ARGS(dummy),
TP_STRUCT__entry(
__field( int, dummy )
),
TP_fast_assign(
__entry->dummy = dummy;
),
TP_printk("%s", "")
);
/**
* user_enter - called when the kernel resumes to userspace
* @dummy: dummy arg to make trace event macro happy
*
* This event occurs when the kernel resumes to userspace after
* an exception or a syscall.
*/
DEFINE_EVENT(context_tracking_user, user_enter,
TP_PROTO(int dummy),
TP_ARGS(dummy)
);
/**
* user_exit - called when userspace enters the kernel
* @dummy: dummy arg to make trace event macro happy
*
* This event occurs when userspace enters the kernel through
* an exception or a syscall.
*/
DEFINE_EVENT(context_tracking_user, user_exit,
TP_PROTO(int dummy),
TP_ARGS(dummy)
);
#endif /* _TRACE_CONTEXT_TRACKING_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,139 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cpufreq_interactive
#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_CPUFREQ_INTERACTIVE_H
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(set,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq),
TP_STRUCT__entry(
__field( u32, cpu_id )
__field(unsigned long, targfreq )
__field(unsigned long, actualfreq )
),
TP_fast_assign(
__entry->cpu_id = (u32) cpu_id;
__entry->targfreq = targfreq;
__entry->actualfreq = actualfreq;
),
TP_printk("cpu=%u targ=%lu actual=%lu",
__entry->cpu_id, __entry->targfreq,
__entry->actualfreq)
);
DEFINE_EVENT(set, cpufreq_interactive_setspeed,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
);
#ifdef CONFIG_ARCH_EXYNOS
DEFINE_EVENT(set, cpufreq_interactive_cpu_min_qos,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
);
DEFINE_EVENT(set, cpufreq_interactive_cpu_max_qos,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
);
#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
DEFINE_EVENT(set, cpufreq_interactive_kfc_min_qos,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
);
DEFINE_EVENT(set, cpufreq_interactive_kfc_max_qos,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)
);
#endif /* CONFIG_ARM_EXYNOS_MP_CPUFREQ */
#endif /* CONFIG_ARCH_EXYNOS */
DECLARE_EVENT_CLASS(loadeval,
TP_PROTO(unsigned long cpu_id, unsigned long load,
unsigned long curtarg, unsigned long curactual,
unsigned long newtarg),
TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
TP_STRUCT__entry(
__field(unsigned long, cpu_id )
__field(unsigned long, load )
__field(unsigned long, curtarg )
__field(unsigned long, curactual )
__field(unsigned long, newtarg )
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->load = load;
__entry->curtarg = curtarg;
__entry->curactual = curactual;
__entry->newtarg = newtarg;
),
TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
__entry->cpu_id, __entry->load, __entry->curtarg,
__entry->curactual, __entry->newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_target,
TP_PROTO(unsigned long cpu_id, unsigned long load,
unsigned long curtarg, unsigned long curactual,
unsigned long newtarg),
TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_already,
TP_PROTO(unsigned long cpu_id, unsigned long load,
unsigned long curtarg, unsigned long curactual,
unsigned long newtarg),
TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
TP_PROTO(unsigned long cpu_id, unsigned long load,
unsigned long curtarg, unsigned long curactual,
unsigned long newtarg),
TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
);
TRACE_EVENT(cpufreq_interactive_boost,
TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(
__string(s, s)
),
TP_fast_assign(
__assign_str(s, s);
),
TP_printk("%s", __get_str(s))
);
TRACE_EVENT(cpufreq_interactive_unboost,
TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(
__string(s, s)
),
TP_fast_assign(
__assign_str(s, s);
),
TP_printk("%s", __get_str(s))
);
#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

866
include/trace/events/ext3.h Normal file
View file

@ -0,0 +1,866 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ext3
#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EXT3_H
#include <linux/tracepoint.h>
TRACE_EVENT(ext3_free_inode,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( uid_t, uid )
__field( gid_t, gid )
__field( blkcnt_t, blocks )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->uid = i_uid_read(inode);
__entry->gid = i_gid_read(inode);
__entry->blocks = inode->i_blocks;
),
TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->uid, __entry->gid,
(unsigned long) __entry->blocks)
);
TRACE_EVENT(ext3_request_inode,
TP_PROTO(struct inode *dir, int mode),
TP_ARGS(dir, mode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, dir )
__field( umode_t, mode )
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->dir = dir->i_ino;
__entry->mode = mode;
),
TP_printk("dev %d,%d dir %lu mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->dir, __entry->mode)
);
TRACE_EVENT(ext3_allocate_inode,
TP_PROTO(struct inode *inode, struct inode *dir, int mode),
TP_ARGS(inode, dir, mode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ino_t, dir )
__field( umode_t, mode )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->dir = dir->i_ino;
__entry->mode = mode;
),
TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->dir, __entry->mode)
);
TRACE_EVENT(ext3_evict_inode,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( int, nlink )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->nlink = inode->i_nlink;
),
TP_printk("dev %d,%d ino %lu nlink %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->nlink)
);
TRACE_EVENT(ext3_drop_inode,
TP_PROTO(struct inode *inode, int drop),
TP_ARGS(inode, drop),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( int, drop )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->drop = drop;
),
TP_printk("dev %d,%d ino %lu drop %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->drop)
);
TRACE_EVENT(ext3_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
TP_ARGS(inode, IP),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field(unsigned long, ip )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->ip = IP;
),
TP_printk("dev %d,%d ino %lu caller %pF",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (void *)__entry->ip)
);
TRACE_EVENT(ext3_write_begin,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int flags),
TP_ARGS(inode, pos, len, flags),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
__field( unsigned int, flags )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
__entry->flags = flags;
),
TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long long) __entry->pos, __entry->len,
__entry->flags)
);
DECLARE_EVENT_CLASS(ext3__write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
TP_ARGS(inode, pos, len, copied),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( loff_t, pos )
__field( unsigned int, len )
__field( unsigned int, copied )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->pos = pos;
__entry->len = len;
__entry->copied = copied;
),
TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long long) __entry->pos, __entry->len,
__entry->copied)
);
DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
TP_ARGS(inode, pos, len, copied)
);
DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
TP_ARGS(inode, pos, len, copied)
);
DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
unsigned int copied),
TP_ARGS(inode, pos, len, copied)
);
DECLARE_EVENT_CLASS(ext3__page_op,
TP_PROTO(struct page *page),
TP_ARGS(page),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
),
TP_fast_assign(
__entry->index = page->index;
__entry->ino = page->mapping->host->i_ino;
__entry->dev = page->mapping->host->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu page_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->index)
);
DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(ext3__page_op, ext3_readpage,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(ext3__page_op, ext3_releasepage,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
TRACE_EVENT(ext3_invalidatepage,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
TP_ARGS(page, offset, length),
TP_STRUCT__entry(
__field( pgoff_t, index )
__field( unsigned int, offset )
__field( unsigned int, length )
__field( ino_t, ino )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->index = page->index;
__entry->offset = offset;
__entry->length = length;
__entry->ino = page->mapping->host->i_ino;
__entry->dev = page->mapping->host->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->index, __entry->offset, __entry->length)
);
TRACE_EVENT(ext3_discard_blocks,
TP_PROTO(struct super_block *sb, unsigned long blk,
unsigned long count),
TP_ARGS(sb, blk, count),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, blk )
__field( unsigned long, count )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->blk = blk;
__entry->count = count;
),
TP_printk("dev %d,%d blk %lu count %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->blk, __entry->count)
);
TRACE_EVENT(ext3_request_blocks,
TP_PROTO(struct inode *inode, unsigned long goal,
unsigned long count),
TP_ARGS(inode, goal, count),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned long, count )
__field( unsigned long, goal )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->count = count;
__entry->goal = goal;
),
TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->count, __entry->goal)
);
TRACE_EVENT(ext3_allocate_blocks,
TP_PROTO(struct inode *inode, unsigned long goal,
unsigned long count, unsigned long block),
TP_ARGS(inode, goal, count, block),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned long, block )
__field( unsigned long, count )
__field( unsigned long, goal )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->block = block;
__entry->count = count;
__entry->goal = goal;
),
TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->count, __entry->block,
__entry->goal)
);
TRACE_EVENT(ext3_free_blocks,
TP_PROTO(struct inode *inode, unsigned long block,
unsigned long count),
TP_ARGS(inode, block, count),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( unsigned long, block )
__field( unsigned long, count )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->block = block;
__entry->count = count;
),
TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->block, __entry->count)
);
TRACE_EVENT(ext3_sync_file_enter,
TP_PROTO(struct file *file, int datasync),
TP_ARGS(file, datasync),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ino_t, parent )
__field( int, datasync )
),
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
__entry->dev = dentry->d_inode->i_sb->s_dev;
__entry->ino = dentry->d_inode->i_ino;
__entry->datasync = datasync;
__entry->parent = dentry->d_parent->d_inode->i_ino;
),
TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->parent, __entry->datasync)
);
TRACE_EVENT(ext3_sync_file_exit,
TP_PROTO(struct inode *inode, int ret),
TP_ARGS(inode, ret),
TP_STRUCT__entry(
__field( int, ret )
__field( ino_t, ino )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->ret = ret;
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->ret)
);
TRACE_EVENT(ext3_sync_fs,
TP_PROTO(struct super_block *sb, int wait),
TP_ARGS(sb, wait),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, wait )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->wait = wait;
),
TP_printk("dev %d,%d wait %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->wait)
);
TRACE_EVENT(ext3_rsv_window_add,
TP_PROTO(struct super_block *sb,
struct ext3_reserve_window_node *rsv_node),
TP_ARGS(sb, rsv_node),
TP_STRUCT__entry(
__field( unsigned long, start )
__field( unsigned long, end )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->start = rsv_node->rsv_window._rsv_start;
__entry->end = rsv_node->rsv_window._rsv_end;
),
TP_printk("dev %d,%d start %lu end %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->start, __entry->end)
);
TRACE_EVENT(ext3_discard_reservation,
TP_PROTO(struct inode *inode,
struct ext3_reserve_window_node *rsv_node),
TP_ARGS(inode, rsv_node),
TP_STRUCT__entry(
__field( unsigned long, start )
__field( unsigned long, end )
__field( ino_t, ino )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->start = rsv_node->rsv_window._rsv_start;
__entry->end = rsv_node->rsv_window._rsv_end;
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu start %lu end %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long)__entry->ino, __entry->start,
__entry->end)
);
TRACE_EVENT(ext3_alloc_new_reservation,
TP_PROTO(struct super_block *sb, unsigned long goal),
TP_ARGS(sb, goal),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, goal )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->goal = goal;
),
TP_printk("dev %d,%d goal %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->goal)
);
TRACE_EVENT(ext3_reserved,
TP_PROTO(struct super_block *sb, unsigned long block,
struct ext3_reserve_window_node *rsv_node),
TP_ARGS(sb, block, rsv_node),
TP_STRUCT__entry(
__field( unsigned long, block )
__field( unsigned long, start )
__field( unsigned long, end )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->block = block;
__entry->start = rsv_node->rsv_window._rsv_start;
__entry->end = rsv_node->rsv_window._rsv_end;
__entry->dev = sb->s_dev;
),
TP_printk("dev %d,%d block %lu, start %lu end %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->block, __entry->start, __entry->end)
);
TRACE_EVENT(ext3_forget,
TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
TP_ARGS(inode, is_metadata, block),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( umode_t, mode )
__field( int, is_metadata )
__field( unsigned long, block )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->mode = inode->i_mode;
__entry->is_metadata = is_metadata;
__entry->block = block;
),
TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->is_metadata, __entry->block)
);
TRACE_EVENT(ext3_read_block_bitmap,
TP_PROTO(struct super_block *sb, unsigned int group),
TP_ARGS(sb, group),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( __u32, group )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->group = group;
),
TP_printk("dev %d,%d group %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->group)
);
TRACE_EVENT(ext3_direct_IO_enter,
TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
TP_ARGS(inode, offset, len, rw),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( loff_t, pos )
__field( unsigned long, len )
__field( int, rw )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
__entry->pos = offset;
__entry->len = len;
__entry->rw = rw;
),
TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long long) __entry->pos, __entry->len,
__entry->rw)
);
TRACE_EVENT(ext3_direct_IO_exit,
TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
int rw, int ret),
TP_ARGS(inode, offset, len, rw, ret),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( loff_t, pos )
__field( unsigned long, len )
__field( int, rw )
__field( int, ret )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
__entry->pos = offset;
__entry->len = len;
__entry->rw = rw;
__entry->ret = ret;
),
TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long long) __entry->pos, __entry->len,
__entry->rw, __entry->ret)
);
TRACE_EVENT(ext3_unlink_enter,
TP_PROTO(struct inode *parent, struct dentry *dentry),
TP_ARGS(parent, dentry),
TP_STRUCT__entry(
__field( ino_t, parent )
__field( ino_t, ino )
__field( loff_t, size )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->parent = parent->i_ino;
__entry->ino = dentry->d_inode->i_ino;
__entry->size = dentry->d_inode->i_size;
__entry->dev = dentry->d_inode->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu size %lld parent %ld",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long long)__entry->size,
(unsigned long) __entry->parent)
);
TRACE_EVENT(ext3_unlink_exit,
TP_PROTO(struct dentry *dentry, int ret),
TP_ARGS(dentry, ret),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( int, ret )
),
TP_fast_assign(
__entry->ino = dentry->d_inode->i_ino;
__entry->dev = dentry->d_inode->i_sb->s_dev;
__entry->ret = ret;
),
TP_printk("dev %d,%d ino %lu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->ret)
);
DECLARE_EVENT_CLASS(ext3__truncate,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( blkcnt_t, blocks )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
__entry->blocks = inode->i_blocks;
),
TP_printk("dev %d,%d ino %lu blocks %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, (unsigned long) __entry->blocks)
);
DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
TP_PROTO(struct inode *inode),
TP_ARGS(inode)
);
DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
TP_PROTO(struct inode *inode),
TP_ARGS(inode)
);
TRACE_EVENT(ext3_get_blocks_enter,
TP_PROTO(struct inode *inode, unsigned long lblk,
unsigned long len, int create),
TP_ARGS(inode, lblk, len, create),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( unsigned long, lblk )
__field( unsigned long, len )
__field( int, create )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
__entry->lblk = lblk;
__entry->len = len;
__entry->create = create;
),
TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len, __entry->create)
);
TRACE_EVENT(ext3_get_blocks_exit,
TP_PROTO(struct inode *inode, unsigned long lblk,
unsigned long pblk, unsigned long len, int ret),
TP_ARGS(inode, lblk, pblk, len, ret),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
__field( unsigned long, lblk )
__field( unsigned long, pblk )
__field( unsigned long, len )
__field( int, ret )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
__entry->lblk = lblk;
__entry->pblk = pblk;
__entry->len = len;
__entry->ret = ret;
),
TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->pblk,
__entry->len, __entry->ret)
);
TRACE_EVENT(ext3_load_inode,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__field( ino_t, ino )
__field( dev_t, dev )
),
TP_fast_assign(
__entry->ino = inode->i_ino;
__entry->dev = inode->i_sb->s_dev;
),
TP_printk("dev %d,%d ino %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino)
);
#endif /* _TRACE_EXT3_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

2487
include/trace/events/ext4.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,312 @@
/*
* AUTHOR: JK Kim <jk.man.kim@>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM exynos
#if !defined(_TRACE_EXYNOS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EXYNOS_H
#include <linux/tracepoint.h>
#include <linux/regulator/driver.h>
#include <soc/samsung/exynos-devfreq.h>
#include "../../../drivers/soc/samsung/pwrcal/pwrcal-vclk.h"
#define freq_name(flag) { flag##_EXYNOS_FREQ, #flag }
#define show_freq_name(val) \
__print_symbolic(val, \
freq_name(APL), \
freq_name(ATL), \
freq_name(INT), \
freq_name(MIF), \
freq_name(ISP), \
freq_name(DISP))
TRACE_EVENT(exynos_regulator_in,
TP_PROTO(char *name, struct regulator_dev *rdev, unsigned int voltage),
TP_ARGS(name, rdev, voltage),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned int, vsel_reg)
__field(unsigned int, voltage)
),
TP_fast_assign(
__assign_str(name, name);
__entry->vsel_reg = rdev->desc->vsel_reg;
__entry->voltage = voltage;
),
TP_printk("name: %s vsel_reg: %u voltage: %u",
__get_str(name), __entry->vsel_reg, __entry->voltage)
);
TRACE_EVENT(exynos_regulator_on,
TP_PROTO(char *name, struct regulator_dev *rdev, unsigned int voltage),
TP_ARGS(name, rdev, voltage),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned int, vsel_reg)
__field(unsigned int, voltage)
),
TP_fast_assign(
__assign_str(name, name);
__entry->vsel_reg = rdev->desc->vsel_reg;
__entry->voltage = voltage;
),
TP_printk("name: %s vsel_reg: %u voltage: %u",
__get_str(name), __entry->vsel_reg, __entry->voltage)
);
TRACE_EVENT(exynos_regulator_out,
TP_PROTO(char *name, struct regulator_dev *rdev, unsigned int voltage),
TP_ARGS(name, rdev, voltage),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned int, vsel_reg)
__field(unsigned int, voltage)
),
TP_fast_assign(
__assign_str(name, name);
__entry->vsel_reg = rdev->desc->vsel_reg;
__entry->voltage = voltage;
),
TP_printk("name: %s vsel_reg: %u voltage: %u",
__get_str(name), __entry->vsel_reg, __entry->voltage)
);
TRACE_EVENT(exynos_thermal,
TP_PROTO(void * data, unsigned int temp, char *name, unsigned int maxcooling),
TP_ARGS(data, temp, name, maxcooling),
TP_STRUCT__entry(
__field(void *,data)
__field(unsigned int, temp)
__string(name, name)
__field(unsigned int, maxcooling)
),
TP_fast_assign(
__entry->data = data;
__entry->temp = temp;
__assign_str(name, name);
__entry->maxcooling = maxcooling;
),
TP_printk("(struct ...)data: %p temp: %u name: %s maxcooling:%u",
__entry->data, __entry->temp, __get_str(name), __entry->maxcooling)
);
TRACE_EVENT(exynos_clk_in,
TP_PROTO(struct vclk *vclk, const char *name),
TP_ARGS(vclk, name),
TP_STRUCT__entry(
__field(void *, vclk)
__string(name, name)
),
TP_fast_assign(
__entry->vclk = vclk;
__assign_str(name, name);
),
TP_printk("name: %s vclk: %p",
__get_str(name), __entry->vclk)
);
TRACE_EVENT(exynos_clk_on,
TP_PROTO(struct vclk *vclk, const char *name),
TP_ARGS(vclk, name),
TP_STRUCT__entry(
__field(void *, vclk)
__string(name, name)
),
TP_fast_assign(
__entry->vclk = vclk;
__assign_str(name, name);
),
TP_printk("name: %s vclk: %p",
__get_str(name), __entry->vclk)
);
TRACE_EVENT(exynos_clk_out,
TP_PROTO(struct vclk *vclk, const char *name),
TP_ARGS(vclk, name),
TP_STRUCT__entry(
__field(void *, vclk)
__string(name, name)
),
TP_fast_assign(
__entry->vclk = vclk;
__assign_str(name, name);
),
TP_printk("name: %s vclk: %p",
__get_str(name), __entry->vclk)
);
TRACE_EVENT(exynos_freq_in,
TP_PROTO(u32 flag, u32 freq),
TP_ARGS(flag, freq),
TP_STRUCT__entry(
__field(u32, flag)
__field(u32, freq)
),
TP_fast_assign(
__entry->flag = flag;
__entry->freq = freq;
),
TP_printk("name: %s freq: %u",
show_freq_name(__entry->flag), __entry->freq)
);
TRACE_EVENT(exynos_freq_out,
TP_PROTO(u32 flag, u32 freq),
TP_ARGS(flag, freq),
TP_STRUCT__entry(
__field(u32, flag)
__field(u32, freq)
),
TP_fast_assign(
__entry->flag = flag;
__entry->freq = freq;
),
TP_printk("name: %s freq: %u",
show_freq_name(__entry->flag), __entry->freq)
);
TRACE_EVENT(exynos_clockevent,
TP_PROTO(long long clc, int64_t delta, void *next_event),
TP_ARGS(clc, delta, next_event),
TP_STRUCT__entry(
__field(long long, clc)
__field(int64_t, delta)
__field(long long, next_event)
),
TP_fast_assign(
__entry->clc = clc;
__entry->delta = delta;
__entry->next_event = *((long long *)next_event);
),
TP_printk("clc: %lld delta: %lld next_event:%lld",
__entry->clc, __entry->delta, __entry->next_event)
);
TRACE_EVENT(exynos_smc_in,
TP_PROTO(unsigned long cmd, unsigned long arg1, unsigned long arg2, unsigned long arg3),
TP_ARGS(cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
__field(unsigned long, cmd)
__field(unsigned long, arg1)
__field(unsigned long, arg2)
__field(unsigned long, arg3)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->arg1 = arg1;
__entry->arg2 = arg2;
__entry->arg3 = arg3;
),
TP_printk("cmd: %lx arg1: %lu arg2: %lu arg3: %lu",
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3)
);
TRACE_EVENT(exynos_smc_out,
TP_PROTO(unsigned long cmd, unsigned long arg1, unsigned long arg2, unsigned long arg3),
TP_ARGS(cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
__field(unsigned long, cmd)
__field(unsigned long, arg1)
__field(unsigned long, arg2)
__field(unsigned long, arg3)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->arg1 = arg1;
__entry->arg2 = arg2;
__entry->arg3 = arg3;
),
TP_printk("cmd: %lx arg1: %lu arg2: %lu arg3: %lu",
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3)
);
TRACE_EVENT(exynos_cpuidle_in,
TP_PROTO(unsigned int idx),
TP_ARGS(idx),
TP_STRUCT__entry(
__field(unsigned int, idx)
),
TP_fast_assign(
__entry->idx = idx;
),
TP_printk("idx: %u", __entry->idx)
);
TRACE_EVENT(exynos_cpuidle_out,
TP_PROTO(unsigned int idx),
TP_ARGS(idx),
TP_STRUCT__entry(
__field(unsigned int, idx)
),
TP_fast_assign(
__entry->idx = idx;
),
TP_printk("idx: %u", __entry->idx)
);
/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
#endif /* _TRACE_EXYNOS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

1025
include/trace/events/f2fs.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,128 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fence
#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FENCE_H
#include <linux/tracepoint.h>
struct fence;
TRACE_EVENT(fence_annotate_wait_on,
/* fence: the fence waiting on f1, f1: the fence to be waited on. */
TP_PROTO(struct fence *fence, struct fence *f1),
TP_ARGS(fence, f1),
TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_driver_name(fence))
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(waiting_driver, f1->ops->get_driver_name(f1))
__string(waiting_timeline, f1->ops->get_timeline_name(f1))
__field(unsigned int, waiting_context)
__field(unsigned int, waiting_seqno)
),
TP_fast_assign(
__assign_str(driver, fence->ops->get_driver_name(fence))
__assign_str(timeline, fence->ops->get_timeline_name(fence))
__entry->context = fence->context;
__entry->seqno = fence->seqno;
__assign_str(waiting_driver, f1->ops->get_driver_name(f1))
__assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
__entry->waiting_context = f1->context;
__entry->waiting_seqno = f1->seqno;
),
TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
"waits on driver=%s timeline=%s context=%u seqno=%u",
__get_str(driver), __get_str(timeline), __entry->context,
__entry->seqno,
__get_str(waiting_driver), __get_str(waiting_timeline),
__entry->waiting_context, __entry->waiting_seqno)
);
DECLARE_EVENT_CLASS(fence,
TP_PROTO(struct fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
__field(unsigned int, seqno)
),
TP_fast_assign(
__assign_str(driver, fence->ops->get_driver_name(fence))
__assign_str(timeline, fence->ops->get_timeline_name(fence))
__entry->context = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("driver=%s timeline=%s context=%u seqno=%u",
__get_str(driver), __get_str(timeline), __entry->context,
__entry->seqno)
);
DEFINE_EVENT(fence, fence_emit,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_init,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_destroy,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_enable_signal,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_signaled,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_wait_start,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
DEFINE_EVENT(fence, fence_wait_end,
TP_PROTO(struct fence *fence),
TP_ARGS(fence)
);
#endif /* _TRACE_FENCE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,96 @@
/*
* Events for filesystem locks
*
* Copyright 2013 Jeff Layton <jlayton@poochiereds.net>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM filelock
#if !defined(_TRACE_FILELOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FILELOCK_H
#include <linux/tracepoint.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#define show_fl_flags(val) \
__print_flags(val, "|", \
{ FL_POSIX, "FL_POSIX" }, \
{ FL_FLOCK, "FL_FLOCK" }, \
{ FL_DELEG, "FL_DELEG" }, \
{ FL_ACCESS, "FL_ACCESS" }, \
{ FL_EXISTS, "FL_EXISTS" }, \
{ FL_LEASE, "FL_LEASE" }, \
{ FL_CLOSE, "FL_CLOSE" }, \
{ FL_SLEEP, "FL_SLEEP" }, \
{ FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \
{ FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \
{ FL_OFDLCK, "FL_OFDLCK" })
#define show_fl_type(val) \
__print_symbolic(val, \
{ F_RDLCK, "F_RDLCK" }, \
{ F_WRLCK, "F_WRLCK" }, \
{ F_UNLCK, "F_UNLCK" })
DECLARE_EVENT_CLASS(filelock_lease,
TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl),
TP_STRUCT__entry(
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
__field(struct file_lock *, fl_next)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_flags)
__field(unsigned char, fl_type)
__field(unsigned long, fl_break_time)
__field(unsigned long, fl_downgrade_time)
),
TP_fast_assign(
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
__entry->fl_next = fl ? fl->fl_next : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_flags = fl ? fl->fl_flags : 0;
__entry->fl_type = fl ? fl->fl_type : 0;
__entry->fl_break_time = fl ? fl->fl_break_time : 0;
__entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->fl_next, __entry->fl_owner,
show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_break_time, __entry->fl_downgrade_time)
);
DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl));
#endif /* _TRACE_FILELOCK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,58 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM filemap
#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_FILEMAP_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page),
TP_STRUCT__entry(
__field(struct page *, page)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
),
TP_fast_assign(
__entry->page = page;
__entry->i_ino = page->mapping->host->i_ino;
__entry->index = page->index;
if (page->mapping->host->i_sb)
__entry->s_dev = page->mapping->host->i_sb->s_dev;
else
__entry->s_dev = page->mapping->host->i_rdev;
),
TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
__entry->page,
page_to_pfn(__entry->page),
__entry->index << PAGE_SHIFT)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_PROTO(struct page *page),
TP_ARGS(page)
);
#endif /* _TRACE_FILEMAP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,42 @@
/*
* The order of these masks is important. Matching masks will be seen
* first and the left over flags will end up showing by themselves.
*
* For example, if we have GFP_KERNEL before GFP_USER we wil get:
*
* GFP_KERNEL|GFP_HARDWALL
*
* Thus most bits set go first.
*/
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
{(unsigned long)GFP_USER, "GFP_USER"}, \
{(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
{(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
{(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
{(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
{(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
{(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
{(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
{(unsigned long)__GFP_IO, "GFP_IO"}, \
{(unsigned long)__GFP_COLD, "GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
{(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
{(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
{(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
{(unsigned long)__GFP_COMP, "GFP_COMP"}, \
{(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
{(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
{(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \
{(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"

View file

@ -0,0 +1,56 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpio
#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPIO_H
#include <linux/tracepoint.h>
TRACE_EVENT(gpio_direction,
TP_PROTO(unsigned gpio, int in, int err),
TP_ARGS(gpio, in, err),
TP_STRUCT__entry(
__field(unsigned, gpio)
__field(int, in)
__field(int, err)
),
TP_fast_assign(
__entry->gpio = gpio;
__entry->in = in;
__entry->err = err;
),
TP_printk("%u %3s (%d)", __entry->gpio,
__entry->in ? "in" : "out", __entry->err)
);
TRACE_EVENT(gpio_value,
TP_PROTO(unsigned gpio, int get, int value),
TP_ARGS(gpio, get, value),
TP_STRUCT__entry(
__field(unsigned, gpio)
__field(int, get)
__field(int, value)
),
TP_fast_assign(
__entry->gpio = gpio;
__entry->get = get;
__entry->value = value;
),
TP_printk("%u %3s %d", __entry->gpio,
__entry->get ? "get" : "set", __entry->value)
);
#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
#include <trace/define_trace.h>

143
include/trace/events/gpu.h Normal file
View file

@ -0,0 +1,143 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpu
#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPU_H
#include <linux/tracepoint.h>
#include <linux/time.h>
#define show_secs_from_ns(ns) \
({ \
u64 t = ns + (NSEC_PER_USEC / 2); \
do_div(t, NSEC_PER_SEC); \
t; \
})
#define show_usecs_from_ns(ns) \
({ \
u64 t = ns + (NSEC_PER_USEC / 2) ; \
u32 rem; \
do_div(t, NSEC_PER_USEC); \
rem = do_div(t, USEC_PER_SEC); \
})
/*
* The gpu_sched_switch event indicates that a switch from one GPU context to
* another occurred on one of the GPU hardware blocks.
*
* The gpu_name argument identifies the GPU hardware block. Each independently
* scheduled GPU hardware block should have a different name. This may be used
* in different ways for different GPUs. For example, if a GPU includes
* multiple processing cores it may use names "GPU 0", "GPU 1", etc. If a GPU
* includes a separately scheduled 2D and 3D hardware block, it might use the
* names "2D" and "3D".
*
* The timestamp argument is the timestamp at which the switch occurred on the
* GPU. These timestamps are in units of nanoseconds and must use
* approximately the same time as sched_clock, though they need not come from
* any CPU clock. The timestamps for a single hardware block must be
* monotonically nondecreasing. This means that if a variable compensation
* offset is used to translate from some other clock to the sched_clock, then
* care must be taken when increasing that offset, and doing so may result in
* multiple events with the same timestamp.
*
* The next_ctx_id argument identifies the next context that was running on
* the GPU hardware block. A value of 0 indicates that the hardware block
* will be idle.
*
* The next_prio argument indicates the priority of the next context at the
* time of the event. The exact numeric values may mean different things for
* different GPUs, but they should follow the rule that lower values indicate a
* higher priority.
*
* The next_job_id argument identifies the batch of work that the GPU will be
* working on. This should correspond to a job_id that was previously traced
* as a gpu_job_enqueue event when the batch of work was created.
*/
TRACE_EVENT(gpu_sched_switch,
TP_PROTO(const char *gpu_name, u64 timestamp,
u32 next_ctx_id, s32 next_prio, u32 next_job_id),
TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
TP_STRUCT__entry(
__string( gpu_name, gpu_name )
__field( u64, timestamp )
__field( u32, next_ctx_id )
__field( s32, next_prio )
__field( u32, next_job_id )
),
TP_fast_assign(
__assign_str(gpu_name, gpu_name);
__entry->timestamp = timestamp;
__entry->next_ctx_id = next_ctx_id;
__entry->next_prio = next_prio;
__entry->next_job_id = next_job_id;
),
TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
"next_job_id=%lu",
__get_str(gpu_name),
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
(unsigned long)__entry->next_ctx_id,
(long)__entry->next_prio,
(unsigned long)__entry->next_job_id)
);
/*
* The gpu_job_enqueue event indicates that a batch of work has been queued up
* to be processed by the GPU. This event is not intended to indicate that
* the batch of work has been submitted to the GPU hardware, but rather that
* it has been submitted to the GPU kernel driver.
*
* This event should be traced on the thread that initiated the work being
* queued. For example, if a batch of work is submitted to the kernel by a
* userland thread, the event should be traced on that thread.
*
* The ctx_id field identifies the GPU context in which the batch of work
* being queued is to be run.
*
* The job_id field identifies the batch of work being queued within the given
* GPU context. The first batch of work submitted for a given GPU context
* should have a job_id of 0, and each subsequent batch of work should
* increment the job_id by 1.
*
* The type field identifies the type of the job being enqueued. The job
* types may be different for different GPU hardware. For example, a GPU may
* differentiate between "2D", "3D", and "compute" jobs.
*/
TRACE_EVENT(gpu_job_enqueue,
TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
TP_ARGS(ctx_id, job_id, type),
TP_STRUCT__entry(
__field( u32, ctx_id )
__field( u32, job_id )
__string( type, type )
),
TP_fast_assign(
__entry->ctx_id = ctx_id;
__entry->job_id = job_id;
__assign_str(type, type);
),
TP_printk("ctx_id=%lu job_id=%lu type=%s",
(unsigned long)__entry->ctx_id,
(unsigned long)__entry->job_id,
__get_str(type))
);
#undef show_secs_from_ns
#undef show_usecs_from_ns
#endif /* _TRACE_GPU_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,253 @@
/*
* include/trace/events/host1x.h
*
* host1x event logging to ftrace.
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM host1x
#if !defined(_TRACE_HOST1X_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOST1X_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(host1x,
TP_PROTO(const char *name),
TP_ARGS(name),
TP_STRUCT__entry(__field(const char *, name)),
TP_fast_assign(__entry->name = name;),
TP_printk("name=%s", __entry->name)
);
DEFINE_EVENT(host1x, host1x_channel_open,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(host1x, host1x_channel_release,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(host1x, host1x_cdma_begin,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(host1x, host1x_cdma_end,
TP_PROTO(const char *name),
TP_ARGS(name)
);
TRACE_EVENT(host1x_cdma_push,
TP_PROTO(const char *name, u32 op1, u32 op2),
TP_ARGS(name, op1, op2),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, op1)
__field(u32, op2)
),
TP_fast_assign(
__entry->name = name;
__entry->op1 = op1;
__entry->op2 = op2;
),
TP_printk("name=%s, op1=%08x, op2=%08x",
__entry->name, __entry->op1, __entry->op2)
);
TRACE_EVENT(host1x_cdma_push_gather,
TP_PROTO(const char *name, u32 mem_id,
u32 words, u32 offset, void *cmdbuf),
TP_ARGS(name, mem_id, words, offset, cmdbuf),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, mem_id)
__field(u32, words)
__field(u32, offset)
__field(bool, cmdbuf)
__dynamic_array(u32, cmdbuf, words)
),
TP_fast_assign(
if (cmdbuf) {
memcpy(__get_dynamic_array(cmdbuf), cmdbuf+offset,
words * sizeof(u32));
}
__entry->cmdbuf = cmdbuf;
__entry->name = name;
__entry->mem_id = mem_id;
__entry->words = words;
__entry->offset = offset;
),
TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]",
__entry->name, __entry->mem_id,
__entry->words, __entry->offset,
__print_hex(__get_dynamic_array(cmdbuf),
__entry->cmdbuf ? __entry->words * 4 : 0))
);
TRACE_EVENT(host1x_channel_submit,
TP_PROTO(const char *name, u32 cmdbufs, u32 relocs, u32 waitchks,
u32 syncpt_id, u32 syncpt_incrs),
TP_ARGS(name, cmdbufs, relocs, waitchks, syncpt_id, syncpt_incrs),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, cmdbufs)
__field(u32, relocs)
__field(u32, waitchks)
__field(u32, syncpt_id)
__field(u32, syncpt_incrs)
),
TP_fast_assign(
__entry->name = name;
__entry->cmdbufs = cmdbufs;
__entry->relocs = relocs;
__entry->waitchks = waitchks;
__entry->syncpt_id = syncpt_id;
__entry->syncpt_incrs = syncpt_incrs;
),
TP_printk("name=%s, cmdbufs=%u, relocs=%u, waitchks=%d,"
"syncpt_id=%u, syncpt_incrs=%u",
__entry->name, __entry->cmdbufs, __entry->relocs, __entry->waitchks,
__entry->syncpt_id, __entry->syncpt_incrs)
);
TRACE_EVENT(host1x_channel_submitted,
TP_PROTO(const char *name, u32 syncpt_base, u32 syncpt_max),
TP_ARGS(name, syncpt_base, syncpt_max),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, syncpt_base)
__field(u32, syncpt_max)
),
TP_fast_assign(
__entry->name = name;
__entry->syncpt_base = syncpt_base;
__entry->syncpt_max = syncpt_max;
),
TP_printk("name=%s, syncpt_base=%d, syncpt_max=%d",
__entry->name, __entry->syncpt_base, __entry->syncpt_max)
);
TRACE_EVENT(host1x_channel_submit_complete,
TP_PROTO(const char *name, int count, u32 thresh),
TP_ARGS(name, count, thresh),
TP_STRUCT__entry(
__field(const char *, name)
__field(int, count)
__field(u32, thresh)
),
TP_fast_assign(
__entry->name = name;
__entry->count = count;
__entry->thresh = thresh;
),
TP_printk("name=%s, count=%d, thresh=%d",
__entry->name, __entry->count, __entry->thresh)
);
TRACE_EVENT(host1x_wait_cdma,
TP_PROTO(const char *name, u32 eventid),
TP_ARGS(name, eventid),
TP_STRUCT__entry(
__field(const char *, name)
__field(u32, eventid)
),
TP_fast_assign(
__entry->name = name;
__entry->eventid = eventid;
),
TP_printk("name=%s, event=%d", __entry->name, __entry->eventid)
);
TRACE_EVENT(host1x_syncpt_load_min,
TP_PROTO(u32 id, u32 val),
TP_ARGS(id, val),
TP_STRUCT__entry(
__field(u32, id)
__field(u32, val)
),
TP_fast_assign(
__entry->id = id;
__entry->val = val;
),
TP_printk("id=%d, val=%d", __entry->id, __entry->val)
);
TRACE_EVENT(host1x_syncpt_wait_check,
TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min),
TP_ARGS(mem_id, offset, syncpt_id, thresh, min),
TP_STRUCT__entry(
__field(void *, mem_id)
__field(u32, offset)
__field(u32, syncpt_id)
__field(u32, thresh)
__field(u32, min)
),
TP_fast_assign(
__entry->mem_id = mem_id;
__entry->offset = offset;
__entry->syncpt_id = syncpt_id;
__entry->thresh = thresh;
__entry->min = min;
),
TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d",
__entry->mem_id, __entry->offset,
__entry->syncpt_id, __entry->thresh,
__entry->min)
);
#endif /* _TRACE_HOST1X_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,29 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hotplug_governor
#if !defined(_TRACE_HOTPLUG_GOVERNOR_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOTPLUG_GOVERNOR_H
#include <linux/tracepoint.h>
TRACE_EVENT(exynos_hpgov_governor_update,
TP_PROTO(int event, int req_cpu_max, int req_cpu_min),
TP_ARGS(event, req_cpu_max, req_cpu_min),
TP_STRUCT__entry(
__field(int, event)
__field(int, req_cpu_max)
__field(int, req_cpu_min)
),
TP_fast_assign(
__entry->event = event;
__entry->req_cpu_max = req_cpu_max;
__entry->req_cpu_min = req_cpu_min;
),
TP_printk("event=%d req_cpu_max=%d req_cpu_min=%d",
__entry->event, __entry->req_cpu_max, __entry->req_cpu_min)
);
#endif /* _TRACE_HOTPLUG_GOVERNOR_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,384 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hswadsp
#if !defined(_TRACE_HSWADSP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HSWADSP_H
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/tracepoint.h>
struct sst_hsw;
struct sst_hsw_stream;
struct sst_hsw_ipc_stream_free_req;
struct sst_hsw_ipc_volume_req;
struct sst_hsw_ipc_stream_alloc_req;
struct sst_hsw_audio_data_format_ipc;
struct sst_hsw_ipc_stream_info_reply;
struct sst_hsw_ipc_device_config_req;
DECLARE_EVENT_CLASS(sst_irq,
TP_PROTO(uint32_t status, uint32_t mask),
TP_ARGS(status, mask),
TP_STRUCT__entry(
__field( unsigned int, status )
__field( unsigned int, mask )
),
TP_fast_assign(
__entry->status = status;
__entry->mask = mask;
),
TP_printk("status 0x%8.8x mask 0x%8.8x",
(unsigned int)__entry->status, (unsigned int)__entry->mask)
);
DEFINE_EVENT(sst_irq, sst_irq_busy,
TP_PROTO(unsigned int status, unsigned int mask),
TP_ARGS(status, mask)
);
DEFINE_EVENT(sst_irq, sst_irq_done,
TP_PROTO(unsigned int status, unsigned int mask),
TP_ARGS(status, mask)
);
DECLARE_EVENT_CLASS(ipc,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val),
TP_STRUCT__entry(
__string( name, name )
__field( unsigned int, val )
),
TP_fast_assign(
__assign_str(name, name);
__entry->val = val;
),
TP_printk("%s 0x%8.8x", __get_str(name), (unsigned int)__entry->val)
);
DEFINE_EVENT(ipc, ipc_request,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val)
);
DEFINE_EVENT(ipc, ipc_reply,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val)
);
DEFINE_EVENT(ipc, ipc_pending_reply,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val)
);
DEFINE_EVENT(ipc, ipc_notification,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val)
);
DEFINE_EVENT(ipc, ipc_error,
TP_PROTO(const char *name, int val),
TP_ARGS(name, val)
);
DECLARE_EVENT_CLASS(stream_position,
TP_PROTO(unsigned int id, unsigned int pos),
TP_ARGS(id, pos),
TP_STRUCT__entry(
__field( unsigned int, id )
__field( unsigned int, pos )
),
TP_fast_assign(
__entry->id = id;
__entry->pos = pos;
),
TP_printk("id %d position 0x%x",
(unsigned int)__entry->id, (unsigned int)__entry->pos)
);
DEFINE_EVENT(stream_position, stream_read_position,
TP_PROTO(unsigned int id, unsigned int pos),
TP_ARGS(id, pos)
);
DEFINE_EVENT(stream_position, stream_write_position,
TP_PROTO(unsigned int id, unsigned int pos),
TP_ARGS(id, pos)
);
TRACE_EVENT(hsw_stream_buffer,
TP_PROTO(struct sst_hsw_stream *stream),
TP_ARGS(stream),
TP_STRUCT__entry(
__field( int, id )
__field( int, pt_addr )
__field( int, num_pages )
__field( int, ring_size )
__field( int, ring_offset )
__field( int, first_pfn )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->pt_addr = stream->request.ringinfo.ring_pt_address;
__entry->num_pages = stream->request.ringinfo.num_pages;
__entry->ring_size = stream->request.ringinfo.ring_size;
__entry->ring_offset = stream->request.ringinfo.ring_offset;
__entry->first_pfn = stream->request.ringinfo.ring_first_pfn;
),
TP_printk("stream %d ring addr 0x%x pages %d size 0x%x offset 0x%x PFN 0x%x",
(int) __entry->id, (int)__entry->pt_addr,
(int)__entry->num_pages, (int)__entry->ring_size,
(int)__entry->ring_offset, (int)__entry->first_pfn)
);
TRACE_EVENT(hsw_stream_alloc_reply,
TP_PROTO(struct sst_hsw_stream *stream),
TP_ARGS(stream),
TP_STRUCT__entry(
__field( int, id )
__field( int, stream_id )
__field( int, mixer_id )
__field( int, peak0 )
__field( int, peak1 )
__field( int, vol0 )
__field( int, vol1 )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->stream_id = stream->reply.stream_hw_id;
__entry->mixer_id = stream->reply.mixer_hw_id;
__entry->peak0 = stream->reply.peak_meter_register_address[0];
__entry->peak1 = stream->reply.peak_meter_register_address[1];
__entry->vol0 = stream->reply.volume_register_address[0];
__entry->vol1 = stream->reply.volume_register_address[1];
),
TP_printk("stream %d hw id %d mixer %d peak 0x%x:0x%x vol 0x%x,0x%x",
(int) __entry->id, (int) __entry->stream_id, (int)__entry->mixer_id,
(int)__entry->peak0, (int)__entry->peak1,
(int)__entry->vol0, (int)__entry->vol1)
);
TRACE_EVENT(hsw_mixer_info_reply,
TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply),
TP_ARGS(reply),
TP_STRUCT__entry(
__field( int, mixer_id )
__field( int, peak0 )
__field( int, peak1 )
__field( int, vol0 )
__field( int, vol1 )
),
TP_fast_assign(
__entry->mixer_id = reply->mixer_hw_id;
__entry->peak0 = reply->peak_meter_register_address[0];
__entry->peak1 = reply->peak_meter_register_address[1];
__entry->vol0 = reply->volume_register_address[0];
__entry->vol1 = reply->volume_register_address[1];
),
TP_printk("mixer id %d peak 0x%x:0x%x vol 0x%x,0x%x",
(int)__entry->mixer_id,
(int)__entry->peak0, (int)__entry->peak1,
(int)__entry->vol0, (int)__entry->vol1)
);
TRACE_EVENT(hsw_stream_data_format,
TP_PROTO(struct sst_hsw_stream *stream,
struct sst_hsw_audio_data_format_ipc *req),
TP_ARGS(stream, req),
TP_STRUCT__entry(
__field( uint32_t, id )
__field( uint32_t, frequency )
__field( uint32_t, bitdepth )
__field( uint32_t, map )
__field( uint32_t, config )
__field( uint32_t, style )
__field( uint8_t, ch_num )
__field( uint8_t, valid_bit )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->frequency = req->frequency;
__entry->bitdepth = req->bitdepth;
__entry->map = req->map;
__entry->config = req->config;
__entry->style = req->style;
__entry->ch_num = req->ch_num;
__entry->valid_bit = req->valid_bit;
),
TP_printk("stream %d freq %d depth %d map 0x%x config 0x%x style 0x%x ch %d bits %d",
(int) __entry->id, (uint32_t)__entry->frequency,
(uint32_t)__entry->bitdepth, (uint32_t)__entry->map,
(uint32_t)__entry->config, (uint32_t)__entry->style,
(uint8_t)__entry->ch_num, (uint8_t)__entry->valid_bit)
);
TRACE_EVENT(hsw_stream_alloc_request,
TP_PROTO(struct sst_hsw_stream *stream,
struct sst_hsw_ipc_stream_alloc_req *req),
TP_ARGS(stream, req),
TP_STRUCT__entry(
__field( uint32_t, id )
__field( uint8_t, path_id )
__field( uint8_t, stream_type )
__field( uint8_t, format_id )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->path_id = req->path_id;
__entry->stream_type = req->stream_type;
__entry->format_id = req->format_id;
),
TP_printk("stream %d path %d type %d format %d",
(int) __entry->id, (uint8_t)__entry->path_id,
(uint8_t)__entry->stream_type, (uint8_t)__entry->format_id)
);
TRACE_EVENT(hsw_stream_free_req,
TP_PROTO(struct sst_hsw_stream *stream,
struct sst_hsw_ipc_stream_free_req *req),
TP_ARGS(stream, req),
TP_STRUCT__entry(
__field( int, id )
__field( int, stream_id )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->stream_id = req->stream_id;
),
TP_printk("stream %d hw id %d",
(int) __entry->id, (int) __entry->stream_id)
);
TRACE_EVENT(hsw_volume_req,
TP_PROTO(struct sst_hsw_stream *stream,
struct sst_hsw_ipc_volume_req *req),
TP_ARGS(stream, req),
TP_STRUCT__entry(
__field( int, id )
__field( uint32_t, channel )
__field( uint32_t, target_volume )
__field( uint64_t, curve_duration )
__field( uint32_t, curve_type )
),
TP_fast_assign(
__entry->id = stream->host_id;
__entry->channel = req->channel;
__entry->target_volume = req->target_volume;
__entry->curve_duration = req->curve_duration;
__entry->curve_type = req->curve_type;
),
TP_printk("stream %d chan 0x%x vol %d duration %llu type %d",
(int) __entry->id, (uint32_t) __entry->channel,
(uint32_t)__entry->target_volume,
(uint64_t)__entry->curve_duration,
(uint32_t)__entry->curve_type)
);
TRACE_EVENT(hsw_device_config_req,
TP_PROTO(struct sst_hsw_ipc_device_config_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field( uint32_t, ssp )
__field( uint32_t, clock_freq )
__field( uint32_t, mode )
__field( uint16_t, clock_divider )
),
TP_fast_assign(
__entry->ssp = req->ssp_interface;
__entry->clock_freq = req->clock_frequency;
__entry->mode = req->mode;
__entry->clock_divider = req->clock_divider;
),
TP_printk("SSP %d Freq %d mode %d div %d",
(uint32_t)__entry->ssp,
(uint32_t)__entry->clock_freq, (uint32_t)__entry->mode,
(uint32_t)__entry->clock_divider)
);
#endif /* _TRACE_HSWADSP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

372
include/trace/events/i2c.h Normal file
View file

@ -0,0 +1,372 @@
/* I2C and SMBUS message transfer tracepoints
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i2c
#if !defined(_TRACE_I2C_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_I2C_H
#include <linux/i2c.h>
#include <linux/tracepoint.h>
/*
* drivers/i2c/i2c-core.c
*/
extern void i2c_transfer_trace_reg(void);
extern void i2c_transfer_trace_unreg(void);
/*
* __i2c_transfer() write request
*/
TRACE_EVENT_FN(i2c_write,
TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
int num),
TP_ARGS(adap, msg, num),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, msg_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u16, len )
__dynamic_array(__u8, buf, msg->len) ),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->msg_nr = num;
__entry->addr = msg->addr;
__entry->flags = msg->flags;
__entry->len = msg->len;
memcpy(__get_dynamic_array(buf), msg->buf, msg->len);
),
TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]",
__entry->adapter_nr,
__entry->msg_nr,
__entry->addr,
__entry->flags,
__entry->len,
__entry->len, __get_dynamic_array(buf)
),
i2c_transfer_trace_reg,
i2c_transfer_trace_unreg);
/*
* __i2c_transfer() read request
*/
TRACE_EVENT_FN(i2c_read,
TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
int num),
TP_ARGS(adap, msg, num),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, msg_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u16, len )
),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->msg_nr = num;
__entry->addr = msg->addr;
__entry->flags = msg->flags;
__entry->len = msg->len;
),
TP_printk("i2c-%d #%u a=%03x f=%04x l=%u",
__entry->adapter_nr,
__entry->msg_nr,
__entry->addr,
__entry->flags,
__entry->len
),
i2c_transfer_trace_reg,
i2c_transfer_trace_unreg);
/*
* __i2c_transfer() read reply
*/
TRACE_EVENT_FN(i2c_reply,
TP_PROTO(const struct i2c_adapter *adap, const struct i2c_msg *msg,
int num),
TP_ARGS(adap, msg, num),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, msg_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u16, len )
__dynamic_array(__u8, buf, msg->len) ),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->msg_nr = num;
__entry->addr = msg->addr;
__entry->flags = msg->flags;
__entry->len = msg->len;
memcpy(__get_dynamic_array(buf), msg->buf, msg->len);
),
TP_printk("i2c-%d #%u a=%03x f=%04x l=%u [%*phD]",
__entry->adapter_nr,
__entry->msg_nr,
__entry->addr,
__entry->flags,
__entry->len,
__entry->len, __get_dynamic_array(buf)
),
i2c_transfer_trace_reg,
i2c_transfer_trace_unreg);
/*
* __i2c_transfer() result
*/
TRACE_EVENT_FN(i2c_result,
TP_PROTO(const struct i2c_adapter *adap, int num, int ret),
TP_ARGS(adap, num, ret),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, nr_msgs )
__field(__s16, ret )
),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->nr_msgs = num;
__entry->ret = ret;
),
TP_printk("i2c-%d n=%u ret=%d",
__entry->adapter_nr,
__entry->nr_msgs,
__entry->ret
),
i2c_transfer_trace_reg,
i2c_transfer_trace_unreg);
/*
* i2c_smbus_xfer() write data or procedure call request
*/
TRACE_EVENT_CONDITION(smbus_write,
TP_PROTO(const struct i2c_adapter *adap,
u16 addr, unsigned short flags,
char read_write, u8 command, int protocol,
const union i2c_smbus_data *data),
TP_ARGS(adap, addr, flags, read_write, command, protocol, data),
TP_CONDITION(read_write == I2C_SMBUS_WRITE ||
protocol == I2C_SMBUS_PROC_CALL ||
protocol == I2C_SMBUS_BLOCK_PROC_CALL),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u8, command )
__field(__u8, len )
__field(__u32, protocol )
__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->addr = addr;
__entry->flags = flags;
__entry->command = command;
__entry->protocol = protocol;
switch (protocol) {
case I2C_SMBUS_BYTE_DATA:
__entry->len = 1;
goto copy;
case I2C_SMBUS_WORD_DATA:
case I2C_SMBUS_PROC_CALL:
__entry->len = 2;
goto copy;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
case I2C_SMBUS_I2C_BLOCK_DATA:
__entry->len = data->block[0] + 1;
copy:
memcpy(__entry->buf, data->block, __entry->len);
break;
case I2C_SMBUS_QUICK:
case I2C_SMBUS_BYTE:
case I2C_SMBUS_I2C_BLOCK_BROKEN:
default:
__entry->len = 0;
}
),
TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]",
__entry->adapter_nr,
__entry->addr,
__entry->flags,
__entry->command,
__print_symbolic(__entry->protocol,
{ I2C_SMBUS_QUICK, "QUICK" },
{ I2C_SMBUS_BYTE, "BYTE" },
{ I2C_SMBUS_BYTE_DATA, "BYTE_DATA" },
{ I2C_SMBUS_WORD_DATA, "WORD_DATA" },
{ I2C_SMBUS_PROC_CALL, "PROC_CALL" },
{ I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" },
{ I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" },
{ I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" },
{ I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }),
__entry->len,
__entry->len, __entry->buf
));
/*
* i2c_smbus_xfer() read data request
*/
TRACE_EVENT_CONDITION(smbus_read,
TP_PROTO(const struct i2c_adapter *adap,
u16 addr, unsigned short flags,
char read_write, u8 command, int protocol),
TP_ARGS(adap, addr, flags, read_write, command, protocol),
TP_CONDITION(!(read_write == I2C_SMBUS_WRITE ||
protocol == I2C_SMBUS_PROC_CALL ||
protocol == I2C_SMBUS_BLOCK_PROC_CALL)),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, flags )
__field(__u16, addr )
__field(__u8, command )
__field(__u32, protocol )
__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->addr = addr;
__entry->flags = flags;
__entry->command = command;
__entry->protocol = protocol;
),
TP_printk("i2c-%d a=%03x f=%04x c=%x %s",
__entry->adapter_nr,
__entry->addr,
__entry->flags,
__entry->command,
__print_symbolic(__entry->protocol,
{ I2C_SMBUS_QUICK, "QUICK" },
{ I2C_SMBUS_BYTE, "BYTE" },
{ I2C_SMBUS_BYTE_DATA, "BYTE_DATA" },
{ I2C_SMBUS_WORD_DATA, "WORD_DATA" },
{ I2C_SMBUS_PROC_CALL, "PROC_CALL" },
{ I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" },
{ I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" },
{ I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" },
{ I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" })
));
/*
* i2c_smbus_xfer() read data or procedure call reply
*/
TRACE_EVENT_CONDITION(smbus_reply,
TP_PROTO(const struct i2c_adapter *adap,
u16 addr, unsigned short flags,
char read_write, u8 command, int protocol,
const union i2c_smbus_data *data),
TP_ARGS(adap, addr, flags, read_write, command, protocol, data),
TP_CONDITION(read_write == I2C_SMBUS_READ),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u8, command )
__field(__u8, len )
__field(__u32, protocol )
__array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->addr = addr;
__entry->flags = flags;
__entry->command = command;
__entry->protocol = protocol;
switch (protocol) {
case I2C_SMBUS_BYTE:
case I2C_SMBUS_BYTE_DATA:
__entry->len = 1;
goto copy;
case I2C_SMBUS_WORD_DATA:
case I2C_SMBUS_PROC_CALL:
__entry->len = 2;
goto copy;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
case I2C_SMBUS_I2C_BLOCK_DATA:
__entry->len = data->block[0] + 1;
copy:
memcpy(__entry->buf, data->block, __entry->len);
break;
case I2C_SMBUS_QUICK:
case I2C_SMBUS_I2C_BLOCK_BROKEN:
default:
__entry->len = 0;
}
),
TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]",
__entry->adapter_nr,
__entry->addr,
__entry->flags,
__entry->command,
__print_symbolic(__entry->protocol,
{ I2C_SMBUS_QUICK, "QUICK" },
{ I2C_SMBUS_BYTE, "BYTE" },
{ I2C_SMBUS_BYTE_DATA, "BYTE_DATA" },
{ I2C_SMBUS_WORD_DATA, "WORD_DATA" },
{ I2C_SMBUS_PROC_CALL, "PROC_CALL" },
{ I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" },
{ I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" },
{ I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" },
{ I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }),
__entry->len,
__entry->len, __entry->buf
));
/*
* i2c_smbus_xfer() result
*/
TRACE_EVENT(smbus_result,
TP_PROTO(const struct i2c_adapter *adap,
u16 addr, unsigned short flags,
char read_write, u8 command, int protocol,
int res),
TP_ARGS(adap, addr, flags, read_write, command, protocol, res),
TP_STRUCT__entry(
__field(int, adapter_nr )
__field(__u16, addr )
__field(__u16, flags )
__field(__u8, read_write )
__field(__u8, command )
__field(__s16, res )
__field(__u32, protocol )
),
TP_fast_assign(
__entry->adapter_nr = adap->nr;
__entry->addr = addr;
__entry->flags = flags;
__entry->read_write = read_write;
__entry->command = command;
__entry->protocol = protocol;
__entry->res = res;
),
TP_printk("i2c-%d a=%03x f=%04x c=%x %s %s res=%d",
__entry->adapter_nr,
__entry->addr,
__entry->flags,
__entry->command,
__print_symbolic(__entry->protocol,
{ I2C_SMBUS_QUICK, "QUICK" },
{ I2C_SMBUS_BYTE, "BYTE" },
{ I2C_SMBUS_BYTE_DATA, "BYTE_DATA" },
{ I2C_SMBUS_WORD_DATA, "WORD_DATA" },
{ I2C_SMBUS_PROC_CALL, "PROC_CALL" },
{ I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" },
{ I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" },
{ I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" },
{ I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }),
__entry->read_write == I2C_SMBUS_WRITE ? "wr" : "rd",
__entry->res
));
#endif /* _TRACE_I2C_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,148 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM intel-sst
#if !defined(_TRACE_INTEL_SST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_INTEL_SST_H
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(sst_ipc_msg,
TP_PROTO(unsigned int val),
TP_ARGS(val),
TP_STRUCT__entry(
__field( unsigned int, val )
),
TP_fast_assign(
__entry->val = val;
),
TP_printk("0x%8.8x", (unsigned int)__entry->val)
);
DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_tx,
TP_PROTO(unsigned int val),
TP_ARGS(val)
);
DEFINE_EVENT(sst_ipc_msg, sst_ipc_msg_rx,
TP_PROTO(unsigned int val),
TP_ARGS(val)
);
DECLARE_EVENT_CLASS(sst_ipc_mailbox,
TP_PROTO(unsigned int offset, unsigned int val),
TP_ARGS(offset, val),
TP_STRUCT__entry(
__field( unsigned int, offset )
__field( unsigned int, val )
),
TP_fast_assign(
__entry->offset = offset;
__entry->val = val;
),
TP_printk(" 0x%4.4x = 0x%8.8x",
(unsigned int)__entry->offset, (unsigned int)__entry->val)
);
DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_rdata,
TP_PROTO(unsigned int offset, unsigned int val),
TP_ARGS(offset, val)
);
DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_inbox_wdata,
TP_PROTO(unsigned int offset, unsigned int val),
TP_ARGS(offset, val)
);
DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_rdata,
TP_PROTO(unsigned int offset, unsigned int val),
TP_ARGS(offset, val)
);
DEFINE_EVENT(sst_ipc_mailbox, sst_ipc_outbox_wdata,
TP_PROTO(unsigned int offset, unsigned int val),
TP_ARGS(offset, val)
);
DECLARE_EVENT_CLASS(sst_ipc_mailbox_info,
TP_PROTO(unsigned int size),
TP_ARGS(size),
TP_STRUCT__entry(
__field( unsigned int, size )
),
TP_fast_assign(
__entry->size = size;
),
TP_printk("Mailbox bytes 0x%8.8x", (unsigned int)__entry->size)
);
DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_read,
TP_PROTO(unsigned int size),
TP_ARGS(size)
);
DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_inbox_write,
TP_PROTO(unsigned int size),
TP_ARGS(size)
);
DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_read,
TP_PROTO(unsigned int size),
TP_ARGS(size)
);
DEFINE_EVENT(sst_ipc_mailbox_info, sst_ipc_outbox_write,
TP_PROTO(unsigned int size),
TP_ARGS(size)
);
#endif /* _TRACE_SST_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,162 @@
/*
* iommu trace points
*
* Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iommu
#if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IOMMU_H
#include <linux/tracepoint.h>
#include <linux/pci.h>
struct device;
DECLARE_EVENT_CLASS(iommu_group_event,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev),
TP_STRUCT__entry(
__field(int, gid)
__string(device, dev_name(dev))
),
TP_fast_assign(
__entry->gid = group_id;
__assign_str(device, dev_name(dev));
),
TP_printk("IOMMU: groupID=%d device=%s",
__entry->gid, __get_str(device)
)
);
DEFINE_EVENT(iommu_group_event, add_device_to_group,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev)
);
DEFINE_EVENT(iommu_group_event, remove_device_from_group,
TP_PROTO(int group_id, struct device *dev),
TP_ARGS(group_id, dev)
);
DECLARE_EVENT_CLASS(iommu_device_event,
TP_PROTO(struct device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__string(device, dev_name(dev))
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
),
TP_printk("IOMMU: device=%s", __get_str(device)
)
);
DEFINE_EVENT(iommu_device_event, attach_device_to_domain,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DECLARE_EVENT_CLASS(iommu_map_unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size),
TP_STRUCT__entry(
__field(u64, iova)
__field(u64, paddr)
__field(int, size)
),
TP_fast_assign(
__entry->iova = iova;
__entry->paddr = paddr;
__entry->size = size;
),
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
__entry->iova, __entry->paddr, __entry->size
)
);
DEFINE_EVENT(iommu_map_unmap, map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size)
);
DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_ARGS(iova, paddr, size),
TP_printk("IOMMU: iova=0x%016llx size=0x%x",
__entry->iova, __entry->size
)
);
DECLARE_EVENT_CLASS(iommu_error,
TP_PROTO(struct device *dev, unsigned long iova, int flags),
TP_ARGS(dev, iova, flags),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__string(driver, dev_driver_string(dev))
__field(u64, iova)
__field(int, flags)
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
__assign_str(driver, dev_driver_string(dev));
__entry->iova = iova;
__entry->flags = flags;
),
TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x",
__get_str(driver), __get_str(device),
__entry->iova, __entry->flags
)
);
DEFINE_EVENT(iommu_error, io_page_fault,
TP_PROTO(struct device *dev, unsigned long iova, int flags),
TP_ARGS(dev, iova, flags)
);
#endif /* _TRACE_IOMMU_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

277
include/trace/events/ion.h Normal file
View file

@ -0,0 +1,277 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ion
#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ION_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#define show_buffer_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{(unsigned long) (1 << 0), "cached"}, \
{(unsigned long) (1 << 1), "needsync"}, \
{(unsigned long) (1 << 2), "kmap"}, \
{(unsigned long) (1 << 3), "nozeroed"}, \
{(unsigned long) (1 << 11), "shrink"}, \
{(unsigned long) (1 << 12), "migrated"}, \
{(unsigned long) (1 << 13), "ready"} \
) : "noncached"
#define dir_string(dir) \
dir == DMA_BIDIRECTIONAL ? "bidirectional" : \
dir == DMA_TO_DEVICE ? "to_device" : \
dir == DMA_FROM_DEVICE ? "from_device" : \
dir == DMA_NONE ? "none" : "invalid"
DECLARE_EVENT_CLASS(ion_alloc,
TP_PROTO(const char *client_name,
unsigned long buffer_id,
size_t len,
size_t align,
unsigned int heap_id_mask,
unsigned int flags),
TP_ARGS(client_name, buffer_id, len, align, heap_id_mask, flags),
TP_STRUCT__entry(
__field( const char *, client_name )
__field( unsigned long, buffer_id )
__field( size_t, len )
__field( size_t, align )
__field( unsigned int, heap_id_mask )
__field( unsigned int, flags )
),
TP_fast_assign(
__entry->client_name = client_name;
__entry->buffer_id = buffer_id;
__entry->len = len;
__entry->align = align;
__entry->heap_id_mask = heap_id_mask;
__entry->flags = flags;
),
TP_printk("client=%s, buffer=%08lx, len=%zd, "
"align=%zd, heap_id_mask=%d, flags=%#x(%s)",
__entry->client_name,
__entry->buffer_id,
__entry->len,
__entry->align,
__entry->heap_id_mask,
__entry->flags,
show_buffer_flags(__entry->flags)
)
);
DEFINE_EVENT(ion_alloc, ion_alloc_start,
TP_PROTO(const char *client_name,
unsigned long buffer_id,
size_t len,
size_t align,
unsigned int heap_id_mask,
unsigned int flags),
TP_ARGS(client_name, buffer_id, len, align, heap_id_mask, flags)
);
DEFINE_EVENT(ion_alloc, ion_alloc_end,
TP_PROTO(const char *client_name,
unsigned long buffer_id,
size_t len,
size_t align,
unsigned int heap_id_mask,
unsigned int flags),
TP_ARGS(client_name, buffer_id, len, align, heap_id_mask, flags)
);
DEFINE_EVENT(ion_alloc, ion_alloc_fail,
TP_PROTO(const char *client_name,
unsigned long buffer_id,
size_t len,
size_t align,
unsigned int heap_id_mask,
unsigned int flags),
TP_ARGS(client_name, buffer_id, len, align, heap_id_mask, flags)
);
DECLARE_EVENT_CLASS(ion_free,
TP_PROTO(unsigned long buffer_id, size_t len, bool shrinker),
TP_ARGS(buffer_id, len, shrinker),
TP_STRUCT__entry(
__field( unsigned long, buffer_id )
__field( size_t, len )
__field( bool, shrinker )
),
TP_fast_assign(
__entry->buffer_id = buffer_id;
__entry->len = len;
__entry->shrinker = shrinker;
),
TP_printk("buffer=%08lx, len=%zd, shrinker=%s",
__entry->buffer_id,
__entry->len,
__entry->shrinker ? "yes" : "no"
)
);
DEFINE_EVENT(ion_free, ion_free_start,
TP_PROTO(unsigned long buffer_id, size_t len, bool shrinker),
TP_ARGS(buffer_id, len, shrinker)
);
DEFINE_EVENT(ion_free, ion_free_end,
TP_PROTO(unsigned long buffer_id, size_t len, bool shrinker),
TP_ARGS(buffer_id, len, shrinker)
);
DECLARE_EVENT_CLASS(ion_mmap,
TP_PROTO(unsigned long buffer_id, size_t len, bool faultmap),
TP_ARGS(buffer_id, len, faultmap),
TP_STRUCT__entry(
__field( unsigned long, buffer_id )
__field( size_t, len )
__field( bool, faultmap )
),
TP_fast_assign(
__entry->buffer_id = buffer_id;
__entry->len = len;
__entry->faultmap = faultmap;
),
TP_printk("buffer=%08lx, len=%zd, faultmap=%s",
__entry->buffer_id,
__entry->len,
__entry->faultmap ? "yes" : "no"
)
);
DEFINE_EVENT(ion_mmap, ion_mmap_start,
TP_PROTO(unsigned long buffer_id, size_t len, bool faultmap),
TP_ARGS(buffer_id, len, faultmap)
);
DEFINE_EVENT(ion_mmap, ion_mmap_end,
TP_PROTO(unsigned long buffer_id, size_t len, bool faultmap),
TP_ARGS(buffer_id, len, faultmap)
);
TRACE_EVENT(ion_shrink,
TP_PROTO(unsigned long nr_to_scan, unsigned long freed),
TP_ARGS(nr_to_scan, freed),
TP_STRUCT__entry(
__field( unsigned long, nr_to_scan )
__field( unsigned long, freed )
),
TP_fast_assign(
__entry->nr_to_scan = nr_to_scan;
__entry->freed = freed;
),
TP_printk("nr_to_scan=%lu, freed=%lu",
__entry->nr_to_scan,
__entry->freed
)
);
DECLARE_EVENT_CLASS(ion_sync,
TP_PROTO(unsigned long caller,
struct device *dev,
enum dma_data_direction dir,
size_t size,
void *vaddr,
off_t offset,
bool flush_all),
TP_ARGS(caller, dev, dir, size, vaddr, offset, flush_all),
TP_STRUCT__entry(
__field( unsigned long, caller )
__field( struct device *, dev )
__field( enum dma_data_direction, dir )
__field( size_t, size )
__field( void *, vaddr )
__field( off_t, offset )
__field( bool, flush_all )
),
TP_fast_assign(
__entry->caller = caller;
__entry->dev = dev;
__entry->dir = dir;
__entry->size = size;
__entry->vaddr = vaddr;
__entry->offset = offset;
__entry->flush_all = flush_all;
),
TP_printk("caller=%ps, dev=%s, dir=%s, size=%zd, "
"va=%p, offs=%ld, all=%s",
&__entry->caller,
dev_name(__entry->dev),
dir_string(__entry->dir),
__entry->size,
__entry->vaddr,
__entry->offset,
__entry->flush_all ? "yes" : "no"
)
);
DEFINE_EVENT(ion_sync, ion_sync_start,
TP_PROTO(unsigned long caller,
struct device *dev,
enum dma_data_direction dir,
size_t size,
void *vaddr,
off_t offset,
bool flush_all),
TP_ARGS(caller, dev, dir, size, vaddr, offset, flush_all)
);
DEFINE_EVENT(ion_sync, ion_sync_end,
TP_PROTO(unsigned long caller,
struct device *dev,
enum dma_data_direction dir,
size_t size,
void *vaddr,
off_t offset,
bool flush_all),
TP_ARGS(caller, dev, dir, size, vaddr, offset, flush_all)
);
#endif /* _TRACE_ION_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,89 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ipi
#if !defined(_TRACE_IPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IPI_H
#include <linux/tracepoint.h>
/**
* ipi_raise - called when a smp cross call is made
*
* @mask: mask of recipient CPUs for the IPI
* @reason: string identifying the IPI purpose
*
* It is necessary for @reason to be a static string declared with
* __tracepoint_string.
*/
TRACE_EVENT(ipi_raise,
TP_PROTO(const struct cpumask *mask, const char *reason),
TP_ARGS(mask, reason),
TP_STRUCT__entry(
__bitmask(target_cpus, nr_cpumask_bits)
__field(const char *, reason)
),
TP_fast_assign(
__assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
__entry->reason = reason;
),
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
);
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
TP_ARGS(reason),
TP_STRUCT__entry(
__field(const char *, reason)
),
TP_fast_assign(
__entry->reason = reason;
),
TP_printk("(%s)", __entry->reason)
);
/**
* ipi_entry - called immediately before the IPI handler
*
* @reason: string identifying the IPI purpose
*
* It is necessary for @reason to be a static string declared with
* __tracepoint_string, ideally the same as used with trace_ipi_raise
* for that IPI.
*/
DEFINE_EVENT(ipi_handler, ipi_entry,
TP_PROTO(const char *reason),
TP_ARGS(reason)
);
/**
* ipi_exit - called immediately after the IPI handler returns
*
* @reason: string identifying the IPI purpose
*
* It is necessary for @reason to be a static string declared with
* __tracepoint_string, ideally the same as used with trace_ipi_raise for
* that IPI.
*/
DEFINE_EVENT(ipi_handler, ipi_exit,
TP_PROTO(const char *reason),
TP_ARGS(reason)
);
#endif /* _TRACE_IPI_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

188
include/trace/events/irq.h Normal file
View file

@ -0,0 +1,188 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
struct irqaction;
struct softirq_action;
struct tasklet_struct;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
/**
* irq_handler_entry - called immediately before the irq action handler
* @irq: irq number
* @action: pointer to struct irqaction
*
* The struct irqaction pointed to by @action contains various
* information about the handler, including the device name,
* @action->name, and the device id, @action->dev_id. When used in
* conjunction with the irq_handler_exit tracepoint, we can figure
* out irq handler latencies.
*/
TRACE_EVENT(irq_handler_entry,
TP_PROTO(int irq, struct irqaction *action),
TP_ARGS(irq, action),
TP_STRUCT__entry(
__field( int, irq )
__string( name, action->name )
),
TP_fast_assign(
__entry->irq = irq;
__assign_str(name, action->name);
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
);
/**
* irq_handler_exit - called immediately after the irq action handler returns
* @irq: irq number
* @action: pointer to struct irqaction
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
TRACE_EVENT(irq_handler_exit,
TP_PROTO(int irq, struct irqaction *action, int ret),
TP_ARGS(irq, action, ret),
TP_STRUCT__entry(
__field( int, irq )
__field( int, ret )
),
TP_fast_assign(
__entry->irq = irq;
__entry->ret = ret;
),
TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
);
DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr),
TP_STRUCT__entry(
__field( unsigned int, vec )
),
TP_fast_assign(
__entry->vec = vec_nr;
),
TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
/**
* softirq_entry - called immediately before the softirq handler
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
* we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_entry,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_exit - called immediately after the softirq handler returns
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq handler routine.
*/
DEFINE_EVENT(softirq, softirq_exit,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_raise - called immediately when a softirq is raised
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/* tasklet traces */
TRACE_EVENT(tasklet_entry,
TP_PROTO(struct tasklet_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__field( void *, function)
__field( unsigned long, data )
),
TP_fast_assign(
__entry->function = t->func;
__entry->data = t->data;
),
TP_printk("function=%pf data=%lx", __entry->function, __entry->data)
);
TRACE_EVENT(tasklet_exit,
TP_PROTO(struct tasklet_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__field( void *, function)
),
TP_fast_assign(
__entry->function = t->func;
),
TP_printk("function=%pf", __entry->function)
);
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

194
include/trace/events/jbd.h Normal file
View file

@ -0,0 +1,194 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM jbd
#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_JBD_H
#include <linux/jbd.h>
#include <linux/tracepoint.h>
TRACE_EVENT(jbd_checkpoint,
TP_PROTO(journal_t *journal, int result),
TP_ARGS(journal, result),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, result )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->result = result;
),
TP_printk("dev %d,%d result %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->result)
);
DECLARE_EVENT_CLASS(jbd_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->transaction = commit_transaction->t_tid;
),
TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction)
);
DEFINE_EVENT(jbd_commit, jbd_start_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd_commit, jbd_commit_locking,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd_commit, jbd_commit_logging,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
TRACE_EVENT(jbd_drop_transaction,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->transaction = commit_transaction->t_tid;
),
TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction)
);
TRACE_EVENT(jbd_end_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, transaction )
__field( int, head )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->transaction = commit_transaction->t_tid;
__entry->head = journal->j_tail_sequence;
),
TP_printk("dev %d,%d transaction %d head %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->head)
);
TRACE_EVENT(jbd_do_submit_data,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->transaction = commit_transaction->t_tid;
),
TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction)
);
TRACE_EVENT(jbd_cleanup_journal_tail,
TP_PROTO(journal_t *journal, tid_t first_tid,
unsigned long block_nr, unsigned long freed),
TP_ARGS(journal, first_tid, block_nr, freed),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( tid_t, tail_sequence )
__field( tid_t, first_tid )
__field(unsigned long, block_nr )
__field(unsigned long, freed )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->tail_sequence = journal->j_tail_sequence;
__entry->first_tid = first_tid;
__entry->block_nr = block_nr;
__entry->freed = freed;
),
TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tail_sequence, __entry->first_tid,
__entry->block_nr, __entry->freed)
);
TRACE_EVENT(journal_write_superblock,
TP_PROTO(journal_t *journal, int write_op),
TP_ARGS(journal, write_op),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, write_op )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->write_op = write_op;
),
TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->write_op)
);
#endif /* _TRACE_JBD_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

385
include/trace/events/jbd2.h Normal file
View file

@ -0,0 +1,385 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM jbd2
#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_JBD2_H
#include <linux/jbd2.h>
#include <linux/tracepoint.h>
struct transaction_chp_stats_s;
struct transaction_run_stats_s;
TRACE_EVENT(jbd2_checkpoint,
TP_PROTO(journal_t *journal, int result),
TP_ARGS(journal, result),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, result )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->result = result;
),
TP_printk("dev %d,%d result %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)
);
DECLARE_EVENT_CLASS(jbd2_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
TP_printk("dev %d,%d transaction %d sync %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit)
);
DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction)
);
TRACE_EVENT(jbd2_end_commit,
TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
TP_ARGS(journal, commit_transaction),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( char, sync_commit )
__field( int, transaction )
__field( int, head )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
__entry->head = journal->j_tail_sequence;
),
TP_printk("dev %d,%d transaction %d sync %d head %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->transaction, __entry->sync_commit, __entry->head)
);
TRACE_EVENT(jbd2_submit_inode_data,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
),
TP_printk("dev %d,%d ino %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino)
);
TRACE_EVENT(jbd2_handle_start,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int requested_blocks),
TP_ARGS(dev, tid, type, line_no, requested_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, requested_blocks)
),
TP_fast_assign(
__entry->dev = dev;
__entry->tid = tid;
__entry->type = type;
__entry->line_no = line_no;
__entry->requested_blocks = requested_blocks;
),
TP_printk("dev %d,%d tid %lu type %u line_no %u "
"requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
TRACE_EVENT(jbd2_handle_extend,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int buffer_credits,
int requested_blocks),
TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, buffer_credits )
__field( int, requested_blocks)
),
TP_fast_assign(
__entry->dev = dev;
__entry->tid = tid;
__entry->type = type;
__entry->line_no = line_no;
__entry->buffer_credits = buffer_credits;
__entry->requested_blocks = requested_blocks;
),
TP_printk("dev %d,%d tid %lu type %u line_no %u "
"buffer_credits %d requested_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->buffer_credits,
__entry->requested_blocks)
);
TRACE_EVENT(jbd2_handle_stats,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int interval, int sync,
int requested_blocks, int dirtied_blocks),
TP_ARGS(dev, tid, type, line_no, interval, sync,
requested_blocks, dirtied_blocks),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned int, type )
__field( unsigned int, line_no )
__field( int, interval )
__field( int, sync )
__field( int, requested_blocks)
__field( int, dirtied_blocks )
),
TP_fast_assign(
__entry->dev = dev;
__entry->tid = tid;
__entry->type = type;
__entry->line_no = line_no;
__entry->interval = interval;
__entry->sync = sync;
__entry->requested_blocks = requested_blocks;
__entry->dirtied_blocks = dirtied_blocks;
),
TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
"sync %d requested_blocks %d dirtied_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
__entry->type, __entry->line_no, __entry->interval,
__entry->sync, __entry->requested_blocks,
__entry->dirtied_blocks)
);
TRACE_EVENT(jbd2_run_stats,
TP_PROTO(dev_t dev, unsigned long tid,
struct transaction_run_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned long, wait )
__field( unsigned long, request_delay )
__field( unsigned long, running )
__field( unsigned long, locked )
__field( unsigned long, flushing )
__field( unsigned long, logging )
__field( __u32, handle_count )
__field( __u32, blocks )
__field( __u32, blocks_logged )
),
TP_fast_assign(
__entry->dev = dev;
__entry->tid = tid;
__entry->wait = stats->rs_wait;
__entry->request_delay = stats->rs_request_delay;
__entry->running = stats->rs_running;
__entry->locked = stats->rs_locked;
__entry->flushing = stats->rs_flushing;
__entry->logging = stats->rs_logging;
__entry->handle_count = stats->rs_handle_count;
__entry->blocks = stats->rs_blocks;
__entry->blocks_logged = stats->rs_blocks_logged;
),
TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
"locked %u flushing %u logging %u handle_count %u "
"blocks %u blocks_logged %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->wait),
jiffies_to_msecs(__entry->request_delay),
jiffies_to_msecs(__entry->running),
jiffies_to_msecs(__entry->locked),
jiffies_to_msecs(__entry->flushing),
jiffies_to_msecs(__entry->logging),
__entry->handle_count, __entry->blocks,
__entry->blocks_logged)
);
TRACE_EVENT(jbd2_checkpoint_stats,
TP_PROTO(dev_t dev, unsigned long tid,
struct transaction_chp_stats_s *stats),
TP_ARGS(dev, tid, stats),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( unsigned long, tid )
__field( unsigned long, chp_time )
__field( __u32, forced_to_close )
__field( __u32, written )
__field( __u32, dropped )
),
TP_fast_assign(
__entry->dev = dev;
__entry->tid = tid;
__entry->chp_time = stats->cs_chp_time;
__entry->forced_to_close= stats->cs_forced_to_close;
__entry->written = stats->cs_written;
__entry->dropped = stats->cs_dropped;
),
TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
"written %u dropped %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
jiffies_to_msecs(__entry->chp_time),
__entry->forced_to_close, __entry->written, __entry->dropped)
);
TRACE_EVENT(jbd2_update_log_tail,
TP_PROTO(journal_t *journal, tid_t first_tid,
unsigned long block_nr, unsigned long freed),
TP_ARGS(journal, first_tid, block_nr, freed),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( tid_t, tail_sequence )
__field( tid_t, first_tid )
__field(unsigned long, block_nr )
__field(unsigned long, freed )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->tail_sequence = journal->j_tail_sequence;
__entry->first_tid = first_tid;
__entry->block_nr = block_nr;
__entry->freed = freed;
),
TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tail_sequence, __entry->first_tid,
__entry->block_nr, __entry->freed)
);
TRACE_EVENT(jbd2_write_superblock,
TP_PROTO(journal_t *journal, int write_op),
TP_ARGS(journal, write_op),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, write_op )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
__entry->write_op = write_op;
),
TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->write_op)
);
TRACE_EVENT(jbd2_lock_buffer_stall,
TP_PROTO(dev_t dev, unsigned long stall_ms),
TP_ARGS(dev, stall_ms),
TP_STRUCT__entry(
__field( dev_t, dev )
__field(unsigned long, stall_ms )
),
TP_fast_assign(
__entry->dev = dev;
__entry->stall_ms = stall_ms;
),
TP_printk("dev %d,%d stall_ms %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->stall_ms)
);
#endif /* _TRACE_JBD2_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

311
include/trace/events/kmem.h Normal file
View file

@ -0,0 +1,311 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kmem
#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KMEM_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <trace/events/gfpflags.h>
DECLARE_EVENT_CLASS(kmem_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( gfp_t, gfp_flags )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = gfp_flags;
),
TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags))
);
DEFINE_EVENT(kmem_alloc, kmalloc,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
);
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
);
DECLARE_EVENT_CLASS(kmem_alloc_node,
TP_PROTO(unsigned long call_site,
const void *ptr,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags,
int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( gfp_t, gfp_flags )
__field( int, node )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = gfp_flags;
__entry->node = node;
),
TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node)
);
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
);
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
);
DECLARE_EVENT_CLASS(kmem_free,
TP_PROTO(unsigned long call_site, const void *ptr),
TP_ARGS(call_site, ptr),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
),
TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
);
DEFINE_EVENT(kmem_free, kfree,
TP_PROTO(unsigned long call_site, const void *ptr),
TP_ARGS(call_site, ptr)
);
DEFINE_EVENT(kmem_free, kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
TP_ARGS(call_site, ptr)
);
TRACE_EVENT(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
TP_STRUCT__entry(
__field( struct page *, page )
__field( unsigned int, order )
),
TP_fast_assign(
__entry->page = page;
__entry->order = order;
),
TP_printk("page=%p pfn=%lu order=%d",
__entry->page,
page_to_pfn(__entry->page),
__entry->order)
);
TRACE_EVENT(mm_page_free_batched,
TP_PROTO(struct page *page, int cold),
TP_ARGS(page, cold),
TP_STRUCT__entry(
__field( struct page *, page )
__field( int, cold )
),
TP_fast_assign(
__entry->page = page;
__entry->cold = cold;
),
TP_printk("page=%p pfn=%lu order=0 cold=%d",
__entry->page,
page_to_pfn(__entry->page),
__entry->cold)
);
TRACE_EVENT(mm_page_alloc,
TP_PROTO(struct page *page, unsigned int order,
gfp_t gfp_flags, int migratetype),
TP_ARGS(page, order, gfp_flags, migratetype),
TP_STRUCT__entry(
__field( struct page *, page )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
__entry->page = page;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
__entry->page,
__entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
);
DECLARE_EVENT_CLASS(mm_page,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
TP_STRUCT__entry(
__field( struct page *, page )
__field( unsigned int, order )
__field( int, migratetype )
),
TP_fast_assign(
__entry->page = page;
__entry->order = order;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
__entry->page,
__entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order,
__entry->migratetype,
__entry->order == 0)
);
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype)
);
DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
__entry->page, page_to_pfn(__entry->page),
__entry->order, __entry->migratetype)
);
TRACE_EVENT(mm_page_alloc_extfrag,
TP_PROTO(struct page *page,
int alloc_order, int fallback_order,
int alloc_migratetype, int fallback_migratetype),
TP_ARGS(page,
alloc_order, fallback_order,
alloc_migratetype, fallback_migratetype),
TP_STRUCT__entry(
__field( struct page *, page )
__field( int, alloc_order )
__field( int, fallback_order )
__field( int, alloc_migratetype )
__field( int, fallback_migratetype )
__field( int, change_ownership )
),
TP_fast_assign(
__entry->page = page;
__entry->alloc_order = alloc_order;
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
__entry->fallback_migratetype = fallback_migratetype;
__entry->change_ownership = (alloc_migratetype ==
get_pageblock_migratetype(page));
),
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
__entry->page,
page_to_pfn(__entry->page),
__entry->alloc_order,
__entry->fallback_order,
pageblock_order,
__entry->alloc_migratetype,
__entry->fallback_migratetype,
__entry->fallback_order < pageblock_order,
__entry->change_ownership)
);
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

343
include/trace/events/kvm.h Normal file
View file

@ -0,0 +1,343 @@
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_MAIN_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
#define kvm_trace_exit_reason \
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
TP_ARGS(reason, errno),
TP_STRUCT__entry(
__field( __u32, reason )
__field( int, errno )
),
TP_fast_assign(
__entry->reason = reason;
__entry->errno = errno;
),
TP_printk("reason %s (%d)",
__entry->errno < 0 ?
(__entry->errno == -EINTR ? "restart" : "error") :
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
TP_STRUCT__entry(
__field( unsigned int, gsi )
__field( int, level )
__field( int, irq_source_id )
),
TP_fast_assign(
__entry->gsi = gsi;
__entry->level = level;
__entry->irq_source_id = irq_source_id;
),
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
{0x2, "SMI"}, \
{0x3, "Res3"}, \
{0x4, "NMI"}, \
{0x5, "INIT"}, \
{0x6, "SIPI"}, \
{0x7, "ExtINT"}
TRACE_EVENT(kvm_ioapic_set_irq,
TP_PROTO(__u64 e, int pin, bool coalesced),
TP_ARGS(e, pin, coalesced),
TP_STRUCT__entry(
__field( __u64, e )
__field( int, pin )
__field( bool, coalesced )
),
TP_fast_assign(
__entry->e = e;
__entry->pin = pin;
__entry->coalesced = coalesced;
),
TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "",
__entry->coalesced ? " (coalesced)" : "")
);
TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
TP_PROTO(__u64 e),
TP_ARGS(e),
TP_STRUCT__entry(
__field( __u64, e )
),
TP_fast_assign(
__entry->e = e;
),
TP_printk("dst %x vec=%u (%s|%s|%s%s)",
(u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "")
);
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
TP_STRUCT__entry(
__field( __u64, address )
__field( __u64, data )
),
TP_fast_assign(
__entry->address = address;
__entry->data = data;
),
TP_printk("dst %u vec %x (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
(__entry->address & (1<<3)) ? "|rh" : "")
);
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
#endif /* defined(__KVM_HAVE_IOAPIC) */
#if defined(CONFIG_HAVE_KVM_IRQFD)
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
TP_STRUCT__entry(
__field( unsigned int, irqchip )
__field( unsigned int, pin )
),
TP_fast_assign(
__entry->irqchip = irqchip;
__entry->pin = pin;
),
#ifdef kvm_irqchips
TP_printk("irqchip %s pin %u",
__print_symbolic(__entry->irqchip, kvm_irqchips),
__entry->pin)
#else
TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
#endif
);
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
#define kvm_trace_symbol_mmio \
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
{ KVM_TRACE_MMIO_READ, "read" }, \
{ KVM_TRACE_MMIO_WRITE, "write" }
TRACE_EVENT(kvm_mmio,
TP_PROTO(int type, int len, u64 gpa, u64 val),
TP_ARGS(type, len, gpa, val),
TP_STRUCT__entry(
__field( u32, type )
__field( u32, len )
__field( u64, gpa )
__field( u64, val )
),
TP_fast_assign(
__entry->type = type;
__entry->len = len;
__entry->gpa = gpa;
__entry->val = val;
),
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
__entry->len, __entry->gpa, __entry->val)
);
#define kvm_fpu_load_symbol \
{0, "unload"}, \
{1, "load"}
TRACE_EVENT(kvm_fpu,
TP_PROTO(int load),
TP_ARGS(load),
TP_STRUCT__entry(
__field( u32, load )
),
TP_fast_assign(
__entry->load = load;
),
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
);
TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
TP_ARGS(gfn, level, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
__field( u8, level )
__field( u8, referenced )
),
TP_fast_assign(
__entry->gfn = gfn;
__entry->level = level;
__entry->hva = ((gfn - slot->base_gfn) <<
PAGE_SHIFT) + slot->userspace_addr;
__entry->referenced = ref;
),
TP_printk("hva %llx gfn %llx level %u %s",
__entry->hva, __entry->gfn, __entry->level,
__entry->referenced ? "YOUNG" : "OLD")
);
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
__field(__u64, gva)
__field(u64, gfn)
),
TP_fast_assign(
__entry->gva = gva;
__entry->gfn = gfn;
),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
__entry->token = token;
__entry->gva = gva;
),
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
TRACE_EVENT(
kvm_async_pf_completed,
TP_PROTO(unsigned long address, u64 gva),
TP_ARGS(address, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
__field(u64, gva)
),
TP_fast_assign(
__entry->address = address;
__entry->gva = gva;
),
TP_printk("gva %#llx address %#lx", __entry->gva,
__entry->address)
);
#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,86 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM lock
#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LOCK_H
#include <linux/lockdep.h>
#include <linux/tracepoint.h>
#ifdef CONFIG_LOCKDEP
TRACE_EVENT(lock_acquire,
TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *next_lock, unsigned long ip),
TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
__field(void *, lockdep_addr)
),
TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
__entry->lockdep_addr = lock;
),
TP_printk("%p %s%s%s", __entry->lockdep_addr,
(__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
DECLARE_EVENT_CLASS(lock,
TP_PROTO(struct lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip),
TP_STRUCT__entry(
__string( name, lock->name )
__field( void *, lockdep_addr )
),
TP_fast_assign(
__assign_str(name, lock->name);
__entry->lockdep_addr = lock;
),
TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
);
DEFINE_EVENT(lock, lock_release,
TP_PROTO(struct lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip)
);
#ifdef CONFIG_LOCK_STAT
DEFINE_EVENT(lock, lock_contended,
TP_PROTO(struct lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip)
);
DEFINE_EVENT(lock, lock_acquired,
TP_PROTO(struct lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip)
);
#endif
#endif
#endif /* _TRACE_LOCK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,69 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mce
#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MCE_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <asm/mce.h>
TRACE_EVENT(mce_record,
TP_PROTO(struct mce *m),
TP_ARGS(m),
TP_STRUCT__entry(
__field( u64, mcgcap )
__field( u64, mcgstatus )
__field( u64, status )
__field( u64, addr )
__field( u64, misc )
__field( u64, ip )
__field( u64, tsc )
__field( u64, walltime )
__field( u32, cpu )
__field( u32, cpuid )
__field( u32, apicid )
__field( u32, socketid )
__field( u8, cs )
__field( u8, bank )
__field( u8, cpuvendor )
),
TP_fast_assign(
__entry->mcgcap = m->mcgcap;
__entry->mcgstatus = m->mcgstatus;
__entry->status = m->status;
__entry->addr = m->addr;
__entry->misc = m->misc;
__entry->ip = m->ip;
__entry->tsc = m->tsc;
__entry->walltime = m->time;
__entry->cpu = m->extcpu;
__entry->cpuid = m->cpuid;
__entry->apicid = m->apicid;
__entry->socketid = m->socketid;
__entry->cs = m->cs;
__entry->bank = m->bank;
__entry->cpuvendor = m->cpuvendor;
),
TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
__entry->cpu,
__entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status,
__entry->addr, __entry->misc,
__entry->cs, __entry->ip,
__entry->tsc,
__entry->cpuvendor, __entry->cpuid,
__entry->walltime,
__entry->socketid,
__entry->apicid)
);
#endif /* _TRACE_MCE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

223
include/trace/events/mfc.h Normal file
View file

@ -0,0 +1,223 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mfc
#if !defined(_TRACE_MFC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MFC_H
#include <linux/types.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(mfc_node,
TP_PROTO(int ctx_num,
int num_inst,
unsigned int type,
int is_drm),
TP_ARGS(ctx_num, num_inst, type, is_drm),
TP_STRUCT__entry(
__field( int, ctx_num )
__field( int, num_inst )
__field( unsigned int, type )
__field( int, is_drm )
),
TP_fast_assign(
__entry->ctx_num = ctx_num;
__entry->num_inst = num_inst;
__entry->type = type;
__entry->is_drm = is_drm;
),
TP_printk("ctx[%d] total inst=%d, type=%d, %s",
__entry->ctx_num,
__entry->num_inst,
__entry->type,
__entry->is_drm ? "drm" : "normal"
)
);
DEFINE_EVENT(mfc_node, mfc_node_open,
TP_PROTO(int ctx_num,
int num_inst,
unsigned int type,
int is_drm),
TP_ARGS(ctx_num, num_inst, type, is_drm)
);
DEFINE_EVENT(mfc_node, mfc_node_close,
TP_PROTO(int ctx_num,
int num_inst,
unsigned int type,
int is_drm),
TP_ARGS(ctx_num, num_inst, type, is_drm)
);
DECLARE_EVENT_CLASS(mfc_loadfw,
TP_PROTO(size_t fw_region_size,
int fw_size),
TP_ARGS(fw_region_size, fw_size),
TP_STRUCT__entry(
__field( size_t, fw_region_size )
__field( int, fw_size )
),
TP_fast_assign(
__entry->fw_region_size = fw_region_size;
__entry->fw_size = fw_size;
),
TP_printk("FW region: %ld, size: %d",
__entry->fw_region_size,
__entry->fw_size
)
);
DEFINE_EVENT(mfc_loadfw, mfc_loadfw_start,
TP_PROTO(size_t fw_region_size,
int fw_size),
TP_ARGS(fw_region_size, fw_size)
);
DEFINE_EVENT(mfc_loadfw, mfc_loadfw_end,
TP_PROTO(size_t fw_region_size,
int fw_size),
TP_ARGS(fw_region_size, fw_size)
);
DECLARE_EVENT_CLASS(mfc_dcpp,
TP_PROTO(int ctx_num,
int is_support_smc,
int drm_fw_status),
TP_ARGS(ctx_num, is_support_smc, drm_fw_status),
TP_STRUCT__entry(
__field( int, ctx_num )
__field( int, is_support_smc )
__field( int, drm_fw_status )
),
TP_fast_assign(
__entry->ctx_num = ctx_num;
__entry->is_support_smc = is_support_smc;
__entry->drm_fw_status = drm_fw_status;
),
TP_printk("ctx[%d] support drm=%d, drm fw %s",
__entry->ctx_num,
__entry->is_support_smc,
__entry->drm_fw_status ? "loaded" : "not-loaded"
)
);
DEFINE_EVENT(mfc_dcpp, mfc_dcpp_start,
TP_PROTO(int ctx_num,
int is_support_smc,
int drm_fw_status),
TP_ARGS(ctx_num, is_support_smc, drm_fw_status)
);
DEFINE_EVENT(mfc_dcpp, mfc_dcpp_end,
TP_PROTO(int ctx_num,
int is_support_smc,
int drm_fw_status),
TP_ARGS(ctx_num, is_support_smc, drm_fw_status)
);
DECLARE_EVENT_CLASS(mfc_frame,
TP_PROTO(int ctx_num,
int reason,
int type,
int is_drm),
TP_ARGS(ctx_num, reason, type, is_drm),
TP_STRUCT__entry(
__field( int, ctx_num )
__field( int, reason )
__field( int, type )
__field( int, is_drm )
),
TP_fast_assign(
__entry->ctx_num = ctx_num;
__entry->reason = reason;
__entry->type = type;
__entry->is_drm = is_drm;
),
TP_printk("ctx[%d] reason=%d, type=%d, %s",
__entry->ctx_num,
__entry->reason,
__entry->type,
__entry->is_drm ? "drm" : "normal"
)
);
DEFINE_EVENT(mfc_frame, mfc_frame_start,
TP_PROTO(int ctx_num,
int reason,
int type,
int is_drm),
TP_ARGS(ctx_num, reason, type, is_drm)
);
DEFINE_EVENT(mfc_frame, mfc_frame_top,
TP_PROTO(int ctx_num,
int reason,
int type,
int is_drm),
TP_ARGS(ctx_num, reason, type, is_drm)
);
DEFINE_EVENT(mfc_frame, mfc_frame_bottom,
TP_PROTO(int ctx_num,
int reason,
int type,
int is_drm),
TP_ARGS(ctx_num, reason, type, is_drm)
);
#endif /* _TRACE_MFC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,80 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM migrate
#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MIGRATE_H
#include <linux/tracepoint.h>
#define MIGRATE_MODE \
{MIGRATE_ASYNC, "MIGRATE_ASYNC"}, \
{MIGRATE_SYNC_LIGHT, "MIGRATE_SYNC_LIGHT"}, \
{MIGRATE_SYNC, "MIGRATE_SYNC"}
#define MIGRATE_REASON \
{MR_COMPACTION, "compaction"}, \
{MR_MEMORY_FAILURE, "memory_failure"}, \
{MR_MEMORY_HOTPLUG, "memory_hotplug"}, \
{MR_SYSCALL, "syscall_or_cpuset"}, \
{MR_MEMPOLICY_MBIND, "mempolicy_mbind"}, \
{MR_NUMA_MISPLACED, "numa_misplaced"}, \
{MR_CMA, "cma"}
TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
enum migrate_mode mode, int reason),
TP_ARGS(succeeded, failed, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
__field( unsigned long, failed)
__field( enum migrate_mode, mode)
__field( int, reason)
),
TP_fast_assign(
__entry->succeeded = succeeded;
__entry->failed = failed;
__entry->mode = mode;
__entry->reason = reason;
),
TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
TRACE_EVENT(mm_numa_migrate_ratelimit,
TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
TP_ARGS(p, dst_nid, nr_pages),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN)
__field( pid_t, pid)
__field( int, dst_nid)
__field( unsigned long, nr_pages)
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->dst_nid = dst_nid;
__entry->nr_pages = nr_pages;
),
TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
__entry->comm,
__entry->pid,
__entry->dst_nid,
__entry->nr_pages)
);
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,91 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mmc
#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MMC_H
#include <linux/tracepoint.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/core.h>
/*
* Unconditional logging of mmc block erase operations,
* including cmd, address, size
*/
DECLARE_EVENT_CLASS(mmc_blk_erase_class,
TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
TP_ARGS(cmd, addr, size),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(unsigned int, addr)
__field(unsigned int, size)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->addr = addr;
__entry->size = size;
),
TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
__entry->cmd, __entry->addr, __entry->size)
);
DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
TP_ARGS(cmd, addr, size));
DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
TP_ARGS(cmd, addr, size));
/*
* Logging of start of read or write mmc block operation,
* including cmd, address, size
*/
DECLARE_EVENT_CLASS(mmc_blk_rw_class,
TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
TP_ARGS(cmd, addr, data),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(unsigned int, addr)
__field(unsigned int, size)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->addr = addr;
__entry->size = data->blocks;
),
TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
__entry->cmd, __entry->addr, __entry->size)
);
DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
TP_ARGS(cmd, addr, data),
TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
(cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
data));
DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
TP_ARGS(cmd, addr, data),
TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
(cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
data));
#endif /* _TRACE_MMC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,133 @@
/*
* Because linux/module.h has tracepoints in the header, and ftrace.h
* used to include this file, define_trace.h includes linux/module.h
* But we do not want the module.h to override the TRACE_SYSTEM macro
* variable that define_trace.h is processing, so we only set it
* when module events are being processed, which would happen when
* CREATE_TRACE_POINTS is defined.
*/
#ifdef CREATE_TRACE_POINTS
#undef TRACE_SYSTEM
#define TRACE_SYSTEM module
#endif
#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MODULE_H
#include <linux/tracepoint.h>
#ifdef CONFIG_MODULES
struct module;
#define show_module_flags(flags) __print_flags(flags, "", \
{ (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
{ (1UL << TAINT_OOT_MODULE), "O" }, \
{ (1UL << TAINT_FORCED_MODULE), "F" }, \
{ (1UL << TAINT_CRAP), "C" }, \
{ (1UL << TAINT_UNSIGNED_MODULE), "E" })
TRACE_EVENT(module_load,
TP_PROTO(struct module *mod),
TP_ARGS(mod),
TP_STRUCT__entry(
__field( unsigned int, taints )
__string( name, mod->name )
),
TP_fast_assign(
__entry->taints = mod->taints;
__assign_str(name, mod->name);
),
TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
);
TRACE_EVENT(module_free,
TP_PROTO(struct module *mod),
TP_ARGS(mod),
TP_STRUCT__entry(
__string( name, mod->name )
),
TP_fast_assign(
__assign_str(name, mod->name);
),
TP_printk("%s", __get_str(name))
);
#ifdef CONFIG_MODULE_UNLOAD
/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
DECLARE_EVENT_CLASS(module_refcnt,
TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip),
TP_STRUCT__entry(
__field( unsigned long, ip )
__field( int, refcnt )
__string( name, mod->name )
),
TP_fast_assign(
__entry->ip = ip;
__entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name);
),
TP_printk("%s call_site=%pf refcnt=%d",
__get_str(name), (void *)__entry->ip, __entry->refcnt)
);
DEFINE_EVENT(module_refcnt, module_get,
TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip)
);
DEFINE_EVENT(module_refcnt, module_put,
TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip)
);
#endif /* CONFIG_MODULE_UNLOAD */
TRACE_EVENT(module_request,
TP_PROTO(char *name, bool wait, unsigned long ip),
TP_ARGS(name, wait, ip),
TP_STRUCT__entry(
__field( unsigned long, ip )
__field( bool, wait )
__string( name, name )
),
TP_fast_assign(
__entry->ip = ip;
__entry->wait = wait;
__assign_str(name, name);
),
TP_printk("%s wait=%d call_site=%pf",
__get_str(name), (int)__entry->wait, (void *)__entry->ip)
);
#endif /* CONFIG_MODULES */
#endif /* _TRACE_MODULE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,38 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM napi
#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NAPI_H_
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#define NO_DEV "(no_device)"
TRACE_EVENT(napi_poll,
TP_PROTO(struct napi_struct *napi),
TP_ARGS(napi),
TP_STRUCT__entry(
__field( struct napi_struct *, napi)
__string( dev_name, napi->dev ? napi->dev->name : NO_DEV)
),
TP_fast_assign(
__entry->napi = napi;
__assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
),
TP_printk("napi poll on napi struct %p for device %s",
__entry->napi, __get_str(dev_name))
);
#undef NO_DEV
#endif /* _TRACE_NAPI_H_ */
/* This part must be outside protection */
#include <trace/define_trace.h>

242
include/trace/events/net.h Normal file
View file

@ -0,0 +1,242 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM net
#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NET_H
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tracepoint.h>
TRACE_EVENT(net_dev_start_xmit,
TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
TP_ARGS(skb, dev),
TP_STRUCT__entry(
__string( name, dev->name )
__field( u16, queue_mapping )
__field( const void *, skbaddr )
__field( bool, vlan_tagged )
__field( u16, vlan_proto )
__field( u16, vlan_tci )
__field( u16, protocol )
__field( u8, ip_summed )
__field( unsigned int, len )
__field( unsigned int, data_len )
__field( int, network_offset )
__field( bool, transport_offset_valid)
__field( int, transport_offset)
__field( u8, tx_flags )
__field( u16, gso_size )
__field( u16, gso_segs )
__field( u16, gso_type )
),
TP_fast_assign(
__assign_str(name, dev->name);
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
__entry->vlan_tagged = vlan_tx_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
__entry->vlan_tci = vlan_tx_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
__entry->network_offset = skb_network_offset(skb);
__entry->transport_offset_valid =
skb_transport_header_was_set(skb);
__entry->transport_offset = skb_transport_offset(skb);
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
),
TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
__get_str(name), __entry->queue_mapping, __entry->skbaddr,
__entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
__entry->protocol, __entry->ip_summed, __entry->len,
__entry->data_len,
__entry->network_offset, __entry->transport_offset_valid,
__entry->transport_offset, __entry->tx_flags,
__entry->gso_size, __entry->gso_segs, __entry->gso_type)
);
TRACE_EVENT(net_dev_xmit,
TP_PROTO(struct sk_buff *skb,
int rc,
struct net_device *dev,
unsigned int skb_len),
TP_ARGS(skb, rc, dev, skb_len),
TP_STRUCT__entry(
__field( void *, skbaddr )
__field( unsigned int, len )
__field( int, rc )
__string( name, dev->name )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb_len;
__entry->rc = rc;
__assign_str(name, dev->name);
),
TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
__get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
);
DECLARE_EVENT_CLASS(net_dev_template,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__field( void *, skbaddr )
__field( unsigned int, len )
__string( name, skb->dev->name )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb->len;
__assign_str(name, skb->dev->name);
),
TP_printk("dev=%s skbaddr=%p len=%u",
__get_str(name), __entry->skbaddr, __entry->len)
)
DEFINE_EVENT(net_dev_template, net_dev_queue,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_template, netif_receive_skb,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_template, netif_rx,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb)
);
DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__string( name, skb->dev->name )
__field( unsigned int, napi_id )
__field( u16, queue_mapping )
__field( const void *, skbaddr )
__field( bool, vlan_tagged )
__field( u16, vlan_proto )
__field( u16, vlan_tci )
__field( u16, protocol )
__field( u8, ip_summed )
__field( u32, hash )
__field( bool, l4_hash )
__field( unsigned int, len )
__field( unsigned int, data_len )
__field( unsigned int, truesize )
__field( bool, mac_header_valid)
__field( int, mac_header )
__field( unsigned char, nr_frags )
__field( u16, gso_size )
__field( u16, gso_type )
),
TP_fast_assign(
__assign_str(name, skb->dev->name);
#ifdef CONFIG_NET_RX_BUSY_POLL
__entry->napi_id = skb->napi_id;
#else
__entry->napi_id = 0;
#endif
__entry->queue_mapping = skb->queue_mapping;
__entry->skbaddr = skb;
__entry->vlan_tagged = vlan_tx_tag_present(skb);
__entry->vlan_proto = ntohs(skb->vlan_proto);
__entry->vlan_tci = vlan_tx_tag_get(skb);
__entry->protocol = ntohs(skb->protocol);
__entry->ip_summed = skb->ip_summed;
__entry->hash = skb->hash;
__entry->l4_hash = skb->l4_hash;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
__entry->truesize = skb->truesize;
__entry->mac_header_valid = skb_mac_header_was_set(skb);
__entry->mac_header = skb_mac_header(skb) - skb->data;
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_type = skb_shinfo(skb)->gso_type;
),
TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
__get_str(name), __entry->napi_id, __entry->queue_mapping,
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
__entry->hash, __entry->l4_hash, __entry->len,
__entry->data_len, __entry->truesize,
__entry->mac_header_valid, __entry->mac_header,
__entry->nr_frags, __entry->gso_size, __entry->gso_type)
);
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
#endif /* _TRACE_NET_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,37 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM nmi
#if !defined(_TRACE_NMI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NMI_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
TRACE_EVENT(nmi_handler,
TP_PROTO(void *handler, s64 delta_ns, int handled),
TP_ARGS(handler, delta_ns, handled),
TP_STRUCT__entry(
__field( void *, handler )
__field( s64, delta_ns)
__field( int, handled )
),
TP_fast_assign(
__entry->handler = handler;
__entry->delta_ns = delta_ns;
__entry->handled = handled;
),
TP_printk("%ps() delta_ns: %lld handled: %d",
__entry->handler,
__entry->delta_ns,
__entry->handled)
);
#endif /* _TRACE_NMI_H */
/* This part ust be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,33 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM oom
#if !defined(_TRACE_OOM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_OOM_H
#include <linux/tracepoint.h>
TRACE_EVENT(oom_score_adj_update,
TP_PROTO(struct task_struct *task),
TP_ARGS(task),
TP_STRUCT__entry(
__field( pid_t, pid)
__array( char, comm, TASK_COMM_LEN )
__field( short, oom_score_adj)
),
TP_fast_assign(
__entry->pid = task->pid;
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
TP_printk("pid=%d comm=%s oom_score_adj=%hd",
__entry->pid, __entry->comm, __entry->oom_score_adj)
);
#endif
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,87 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM pagemap
#if !defined(_TRACE_PAGEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PAGEMAP_H
#include <linux/tracepoint.h>
#include <linux/mm.h>
#define PAGEMAP_MAPPED 0x0001u
#define PAGEMAP_ANONYMOUS 0x0002u
#define PAGEMAP_FILE 0x0004u
#define PAGEMAP_SWAPCACHE 0x0008u
#define PAGEMAP_SWAPBACKED 0x0010u
#define PAGEMAP_MAPPEDDISK 0x0020u
#define PAGEMAP_BUFFERS 0x0040u
#define trace_pagemap_flags(page) ( \
(PageAnon(page) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
(page_mapped(page) ? PAGEMAP_MAPPED : 0) | \
(PageSwapCache(page) ? PAGEMAP_SWAPCACHE : 0) | \
(PageSwapBacked(page) ? PAGEMAP_SWAPBACKED : 0) | \
(PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
(page_has_private(page) ? PAGEMAP_BUFFERS : 0) \
)
TRACE_EVENT(mm_lru_insertion,
TP_PROTO(
struct page *page,
int lru
),
TP_ARGS(page, lru),
TP_STRUCT__entry(
__field(struct page *, page )
__field(unsigned long, pfn )
__field(int, lru )
__field(unsigned long, flags )
),
TP_fast_assign(
__entry->page = page;
__entry->pfn = page_to_pfn(page);
__entry->lru = lru;
__entry->flags = trace_pagemap_flags(page);
),
/* Flag format is based on page-types.c formatting for pagemap */
TP_printk("page=%p pfn=%lu lru=%d flags=%s%s%s%s%s%s",
__entry->page,
__entry->pfn,
__entry->lru,
__entry->flags & PAGEMAP_MAPPED ? "M" : " ",
__entry->flags & PAGEMAP_ANONYMOUS ? "a" : "f",
__entry->flags & PAGEMAP_SWAPCACHE ? "s" : " ",
__entry->flags & PAGEMAP_SWAPBACKED ? "b" : " ",
__entry->flags & PAGEMAP_MAPPEDDISK ? "d" : " ",
__entry->flags & PAGEMAP_BUFFERS ? "B" : " ")
);
TRACE_EVENT(mm_lru_activate,
TP_PROTO(struct page *page),
TP_ARGS(page),
TP_STRUCT__entry(
__field(struct page *, page )
__field(unsigned long, pfn )
),
TP_fast_assign(
__entry->page = page;
__entry->pfn = page_to_pfn(page);
),
/* Flag format is based on page-types.c formatting for pagemap */
TP_printk("page=%p pfn=%lu", __entry->page, __entry->pfn)
);
#endif /* _TRACE_PAGEMAP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,538 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM power
#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_H
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
#include <linux/ftrace_event.h>
#define TPS(x) tracepoint_string(x)
DECLARE_EVENT_CLASS(cpu,
TP_PROTO(unsigned int state, unsigned int cpu_id),
TP_ARGS(state, cpu_id),
TP_STRUCT__entry(
__field( u32, state )
__field( u32, cpu_id )
),
TP_fast_assign(
__entry->state = state;
__entry->cpu_id = cpu_id;
),
TP_printk("state=%d cpu_id=%lu", (int)__entry->state,
(unsigned long)__entry->cpu_id)
);
DEFINE_EVENT(cpu, cpu_idle,
TP_PROTO(unsigned int state, unsigned int cpu_id),
TP_ARGS(state, cpu_id)
);
TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
u32 scaled_busy,
u32 state,
u64 mperf,
u64 aperf,
u32 freq
),
TP_ARGS(core_busy,
scaled_busy,
state,
mperf,
aperf,
freq
),
TP_STRUCT__entry(
__field(u32, core_busy)
__field(u32, scaled_busy)
__field(u32, state)
__field(u64, mperf)
__field(u64, aperf)
__field(u32, freq)
),
TP_fast_assign(
__entry->core_busy = core_busy;
__entry->scaled_busy = scaled_busy;
__entry->state = state;
__entry->mperf = mperf;
__entry->aperf = aperf;
__entry->freq = freq;
),
TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
(unsigned long)__entry->state,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
(unsigned long)__entry->freq
)
);
/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
#define PWR_EVENT_EXIT -1
#endif
#define pm_verb_symbolic(event) \
__print_symbolic(event, \
{ PM_EVENT_SUSPEND, "suspend" }, \
{ PM_EVENT_RESUME, "resume" }, \
{ PM_EVENT_FREEZE, "freeze" }, \
{ PM_EVENT_QUIESCE, "quiesce" }, \
{ PM_EVENT_HIBERNATE, "hibernate" }, \
{ PM_EVENT_THAW, "thaw" }, \
{ PM_EVENT_RESTORE, "restore" }, \
{ PM_EVENT_RECOVER, "recover" })
DEFINE_EVENT(cpu, cpu_frequency,
TP_PROTO(unsigned int frequency, unsigned int cpu_id),
TP_ARGS(frequency, cpu_id)
);
TRACE_EVENT(cpu_frequency_limits,
TP_PROTO(unsigned int max_freq, unsigned int min_freq,
unsigned int cpu_id),
TP_ARGS(max_freq, min_freq, cpu_id),
TP_STRUCT__entry(
__field( u32, min_freq )
__field( u32, max_freq )
__field( u32, cpu_id )
),
TP_fast_assign(
__entry->min_freq = min_freq;
__entry->max_freq = min_freq;
__entry->cpu_id = cpu_id;
),
TP_printk("min=%lu max=%lu cpu_id=%lu",
(unsigned long)__entry->min_freq,
(unsigned long)__entry->max_freq,
(unsigned long)__entry->cpu_id)
);
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
TP_ARGS(dev, pm_ops, event),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__string(driver, dev_driver_string(dev))
__string(parent, dev->parent ? dev_name(dev->parent) : "none")
__string(pm_ops, pm_ops ? pm_ops : "none ")
__field(int, event)
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
__assign_str(driver, dev_driver_string(dev));
__assign_str(parent,
dev->parent ? dev_name(dev->parent) : "none");
__assign_str(pm_ops, pm_ops ? pm_ops : "none ");
__entry->event = event;
),
TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver),
__get_str(device), __get_str(parent), __get_str(pm_ops),
pm_verb_symbolic(__entry->event))
);
TRACE_EVENT(device_pm_callback_end,
TP_PROTO(struct device *dev, int error),
TP_ARGS(dev, error),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__string(driver, dev_driver_string(dev))
__field(int, error)
),
TP_fast_assign(
__assign_str(device, dev_name(dev));
__assign_str(driver, dev_driver_string(dev));
__entry->error = error;
),
TP_printk("%s %s, err=%d",
__get_str(driver), __get_str(device), __entry->error)
);
TRACE_EVENT(suspend_resume,
TP_PROTO(const char *action, int val, bool start),
TP_ARGS(action, val, start),
TP_STRUCT__entry(
__field(const char *, action)
__field(int, val)
__field(bool, start)
),
TP_fast_assign(
__entry->action = action;
__entry->val = val;
__entry->start = start;
),
TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val,
(__entry->start)?"begin":"end")
);
DECLARE_EVENT_CLASS(wakeup_source,
TP_PROTO(const char *name, unsigned int state),
TP_ARGS(name, state),
TP_STRUCT__entry(
__string( name, name )
__field( u64, state )
),
TP_fast_assign(
__assign_str(name, name);
__entry->state = state;
),
TP_printk("%s state=0x%lx", __get_str(name),
(unsigned long)__entry->state)
);
DEFINE_EVENT(wakeup_source, wakeup_source_activate,
TP_PROTO(const char *name, unsigned int state),
TP_ARGS(name, state)
);
DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_PROTO(const char *name, unsigned int state),
TP_ARGS(name, state)
);
/*
* The clock events are used for clock enable/disable and for
* clock rate change
*/
DECLARE_EVENT_CLASS(clock,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id),
TP_STRUCT__entry(
__string( name, name )
__field( u64, state )
__field( u64, cpu_id )
),
TP_fast_assign(
__assign_str(name, name);
__entry->state = state;
__entry->cpu_id = cpu_id;
),
TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
(unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
);
DEFINE_EVENT(clock, clock_enable,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
);
DEFINE_EVENT(clock, clock_disable,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
);
DEFINE_EVENT(clock, clock_set_rate,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
);
TRACE_EVENT(clock_set_parent,
TP_PROTO(const char *name, const char *parent_name),
TP_ARGS(name, parent_name),
TP_STRUCT__entry(
__string( name, name )
__string( parent_name, parent_name )
),
TP_fast_assign(
__assign_str(name, name);
__assign_str(parent_name, parent_name);
),
TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
);
/*
* The power domain events are used for power domains transitions
*/
DECLARE_EVENT_CLASS(power_domain,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id),
TP_STRUCT__entry(
__string( name, name )
__field( u64, state )
__field( u64, cpu_id )
),
TP_fast_assign(
__assign_str(name, name);
__entry->state = state;
__entry->cpu_id = cpu_id;
),
TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
(unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
);
DEFINE_EVENT(power_domain, power_domain_target,
TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
TP_ARGS(name, state, cpu_id)
);
/*
* The pm qos events are used for pm qos update
*/
DECLARE_EVENT_CLASS(pm_qos_request,
TP_PROTO(int pm_qos_class, s32 value),
TP_ARGS(pm_qos_class, value),
TP_STRUCT__entry(
__field( int, pm_qos_class )
__field( s32, value )
),
TP_fast_assign(
__entry->pm_qos_class = pm_qos_class;
__entry->value = value;
),
TP_printk("pm_qos_class=%s value=%d",
__print_symbolic(__entry->pm_qos_class,
{ PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
{ PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
{ PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
__entry->value)
);
DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
TP_PROTO(int pm_qos_class, s32 value),
TP_ARGS(pm_qos_class, value)
);
DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
TP_PROTO(int pm_qos_class, s32 value),
TP_ARGS(pm_qos_class, value)
);
DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
TP_PROTO(int pm_qos_class, s32 value),
TP_ARGS(pm_qos_class, value)
);
TRACE_EVENT(pm_qos_update_request_timeout,
TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
TP_ARGS(pm_qos_class, value, timeout_us),
TP_STRUCT__entry(
__field( int, pm_qos_class )
__field( s32, value )
__field( unsigned long, timeout_us )
),
TP_fast_assign(
__entry->pm_qos_class = pm_qos_class;
__entry->value = value;
__entry->timeout_us = timeout_us;
),
TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
__print_symbolic(__entry->pm_qos_class,
{ PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" },
{ PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" },
{ PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }),
__entry->value, __entry->timeout_us)
);
DECLARE_EVENT_CLASS(pm_qos_update,
TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
TP_ARGS(action, prev_value, curr_value),
TP_STRUCT__entry(
__field( enum pm_qos_req_action, action )
__field( int, prev_value )
__field( int, curr_value )
),
TP_fast_assign(
__entry->action = action;
__entry->prev_value = prev_value;
__entry->curr_value = curr_value;
),
TP_printk("action=%s prev_value=%d curr_value=%d",
__print_symbolic(__entry->action,
{ PM_QOS_ADD_REQ, "ADD_REQ" },
{ PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
{ PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
__entry->prev_value, __entry->curr_value)
);
DEFINE_EVENT(pm_qos_update, pm_qos_update_target,
TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
TP_ARGS(action, prev_value, curr_value)
);
DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags,
TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
TP_ARGS(action, prev_value, curr_value),
TP_printk("action=%s prev_value=0x%x curr_value=0x%x",
__print_symbolic(__entry->action,
{ PM_QOS_ADD_REQ, "ADD_REQ" },
{ PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
{ PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
__entry->prev_value, __entry->curr_value)
);
DECLARE_EVENT_CLASS(dev_pm_qos_request,
TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
s32 new_value),
TP_ARGS(name, type, new_value),
TP_STRUCT__entry(
__string( name, name )
__field( enum dev_pm_qos_req_type, type )
__field( s32, new_value )
),
TP_fast_assign(
__assign_str(name, name);
__entry->type = type;
__entry->new_value = new_value;
),
TP_printk("device=%s type=%s new_value=%d",
__get_str(name),
__print_symbolic(__entry->type,
{ DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" },
{ DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
__entry->new_value)
);
DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request,
TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
s32 new_value),
TP_ARGS(name, type, new_value)
);
DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request,
TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
s32 new_value),
TP_ARGS(name, type, new_value)
);
DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
s32 new_value),
TP_ARGS(name, type, new_value)
);
/* for kernel/notifier.c */
TRACE_EVENT(notifier_pm_suspend,
TP_PROTO(struct notifier_block *nb, unsigned long val),
TP_ARGS(nb, val),
TP_STRUCT__entry(
__field( void *, function )
__field( unsigned long, val )
),
TP_fast_assign(
__entry->function = nb->notifier_call;
__entry->val = val;
),
TP_printk("nb->function=%pf val=%lu", __entry->function, __entry->val)
);
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,67 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM power
#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_CPU_MIGRATE_H
#include <linux/tracepoint.h>
#define __cpu_migrate_proto \
TP_PROTO(u64 timestamp, \
u32 cpu_hwid)
#define __cpu_migrate_args \
TP_ARGS(timestamp, \
cpu_hwid)
DECLARE_EVENT_CLASS(cpu_migrate,
__cpu_migrate_proto,
__cpu_migrate_args,
TP_STRUCT__entry(
__field(u64, timestamp )
__field(u32, cpu_hwid )
),
TP_fast_assign(
__entry->timestamp = timestamp;
__entry->cpu_hwid = cpu_hwid;
),
TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
(unsigned long long)__entry->timestamp,
(unsigned long)__entry->cpu_hwid
)
);
#define __define_cpu_migrate_event(name) \
DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \
__cpu_migrate_proto, \
__cpu_migrate_args \
)
__define_cpu_migrate_event(begin);
__define_cpu_migrate_event(finish);
__define_cpu_migrate_event(current);
#undef __define_cpu_migrate
#undef __cpu_migrate_proto
#undef __cpu_migrate_args
/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
/*
* Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
* a whole-cluster migration:
*/
#define CPU_MIGRATE_ALL_CPUS 0x80000000U
#endif
#endif /* _TRACE_POWER_CPU_MIGRATE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE power_cpu_migrate
#include <trace/define_trace.h>

View file

@ -0,0 +1,28 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM printk
#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PRINTK_H
#include <linux/tracepoint.h>
TRACE_EVENT(console,
TP_PROTO(const char *text, size_t len),
TP_ARGS(text, len),
TP_STRUCT__entry(
__dynamic_array(char, msg, len + 1)
),
TP_fast_assign(
memcpy(__get_dynamic_array(msg), text, len);
((char *)__get_dynamic_array(msg))[len] = 0;
),
TP_printk("%s", __get_str(msg))
);
#endif /* _TRACE_PRINTK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,315 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM random
#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RANDOM_H
#include <linux/writeback.h>
#include <linux/tracepoint.h>
TRACE_EVENT(add_device_randomness,
TP_PROTO(int bytes, unsigned long IP),
TP_ARGS(bytes, IP),
TP_STRUCT__entry(
__field( int, bytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->bytes = bytes;
__entry->IP = IP;
),
TP_printk("bytes %d caller %pF",
__entry->bytes, (void *)__entry->IP)
);
DECLARE_EVENT_CLASS(random__mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bytes = bytes;
__entry->IP = IP;
),
TP_printk("%s pool: bytes %d caller %pF",
__entry->pool_name, __entry->bytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
TP_ARGS(pool_name, bytes, IP)
);
TRACE_EVENT(credit_entropy_bits,
TP_PROTO(const char *pool_name, int bits, int entropy_count,
int entropy_total, unsigned long IP),
TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bits )
__field( int, entropy_count )
__field( int, entropy_total )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->bits = bits;
__entry->entropy_count = entropy_count;
__entry->entropy_total = entropy_total;
__entry->IP = IP;
),
TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
"caller %pF", __entry->pool_name, __entry->bits,
__entry->entropy_count, __entry->entropy_total,
(void *)__entry->IP)
);
TRACE_EVENT(push_to_pool,
TP_PROTO(const char *pool_name, int pool_bits, int input_bits),
TP_ARGS(pool_name, pool_bits, input_bits),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, pool_bits )
__field( int, input_bits )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->pool_bits = pool_bits;
__entry->input_bits = input_bits;
),
TP_printk("%s: pool_bits %d input_pool_bits %d",
__entry->pool_name, __entry->pool_bits,
__entry->input_bits)
);
TRACE_EVENT(debit_entropy,
TP_PROTO(const char *pool_name, int debit_bits),
TP_ARGS(pool_name, debit_bits),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, debit_bits )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->debit_bits = debit_bits;
),
TP_printk("%s: debit_bits %d", __entry->pool_name,
__entry->debit_bits)
);
TRACE_EVENT(add_input_randomness,
TP_PROTO(int input_bits),
TP_ARGS(input_bits),
TP_STRUCT__entry(
__field( int, input_bits )
),
TP_fast_assign(
__entry->input_bits = input_bits;
),
TP_printk("input_pool_bits %d", __entry->input_bits)
);
TRACE_EVENT(add_disk_randomness,
TP_PROTO(dev_t dev, int input_bits),
TP_ARGS(dev, input_bits),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, input_bits )
),
TP_fast_assign(
__entry->dev = dev;
__entry->input_bits = input_bits;
),
TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev),
MINOR(__entry->dev), __entry->input_bits)
);
TRACE_EVENT(xfer_secondary_pool,
TP_PROTO(const char *pool_name, int xfer_bits, int request_bits,
int pool_entropy, int input_entropy),
TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy,
input_entropy),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, xfer_bits )
__field( int, request_bits )
__field( int, pool_entropy )
__field( int, input_entropy )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->xfer_bits = xfer_bits;
__entry->request_bits = request_bits;
__entry->pool_entropy = pool_entropy;
__entry->input_entropy = input_entropy;
),
TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d "
"input_entropy %d", __entry->pool_name, __entry->xfer_bits,
__entry->request_bits, __entry->pool_entropy,
__entry->input_entropy)
);
DECLARE_EVENT_CLASS(random__get_random_bytes,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP),
TP_STRUCT__entry(
__field( int, nbytes )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->nbytes = nbytes;
__entry->IP = IP;
),
TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP)
);
DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
TP_PROTO(int nbytes, unsigned long IP),
TP_ARGS(nbytes, IP)
);
DECLARE_EVENT_CLASS(random__extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, nbytes )
__field( int, entropy_count )
__field(unsigned long, IP )
),
TP_fast_assign(
__entry->pool_name = pool_name;
__entry->nbytes = nbytes;
__entry->entropy_count = entropy_count;
__entry->IP = IP;
),
TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
__entry->pool_name, __entry->nbytes, __entry->entropy_count,
(void *)__entry->IP)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
unsigned long IP),
TP_ARGS(pool_name, nbytes, entropy_count, IP)
);
TRACE_EVENT(random_read,
TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left),
TP_ARGS(got_bits, need_bits, pool_left, input_left),
TP_STRUCT__entry(
__field( int, got_bits )
__field( int, need_bits )
__field( int, pool_left )
__field( int, input_left )
),
TP_fast_assign(
__entry->got_bits = got_bits;
__entry->need_bits = need_bits;
__entry->pool_left = pool_left;
__entry->input_left = input_left;
),
TP_printk("got_bits %d still_needed_bits %d "
"blocking_pool_entropy_left %d input_entropy_left %d",
__entry->got_bits, __entry->got_bits, __entry->pool_left,
__entry->input_left)
);
TRACE_EVENT(urandom_read,
TP_PROTO(int got_bits, int pool_left, int input_left),
TP_ARGS(got_bits, pool_left, input_left),
TP_STRUCT__entry(
__field( int, got_bits )
__field( int, pool_left )
__field( int, input_left )
),
TP_fast_assign(
__entry->got_bits = got_bits;
__entry->pool_left = pool_left;
__entry->input_left = input_left;
),
TP_printk("got_bits %d nonblocking_pool_entropy_left %d "
"input_entropy_left %d", __entry->got_bits,
__entry->pool_left, __entry->input_left)
);
#endif /* _TRACE_RANDOM_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

740
include/trace/events/rcu.h Normal file
View file

@ -0,0 +1,740 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rcu
#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RCU_H
#include <linux/tracepoint.h>
/*
* Tracepoint for start/end markers used for utilization calculations.
* By convention, the string is of the following forms:
*
* "Start <activity>" -- Mark the start of the specified activity,
* such as "context switch". Nesting is permitted.
* "End <activity>" -- Mark the end of the specified activity.
*
* An "@" character within "<activity>" is a comment character: Data
* reduction scripts will ignore the "@" and the remainder of the line.
*/
TRACE_EVENT(rcu_utilization,
TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(
__field(const char *, s)
),
TP_fast_assign(
__entry->s = s;
),
TP_printk("%s", __entry->s)
);
#ifdef CONFIG_RCU_TRACE
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
/*
* Tracepoint for grace-period events. Takes a string identifying the
* RCU flavor, the grace-period number, and a string identifying the
* grace-period-related event as follows:
*
* "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL.
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* "newreq": Request a new grace period.
* "start": Start a grace period.
* "cpustart": CPU first notices a grace-period start.
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
* "fqsstart": GP kthread starts forcing quiescent states.
* "fqsend": GP kthread done forcing quiescent states.
* "fqswaitsig": GP kthread awakened by signal from fqswait state.
* "end": End a grace period.
* "cpuend": CPU first notices a grace-period end.
*/
TRACE_EVENT(rcu_grace_period,
TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
TP_ARGS(rcuname, gpnum, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %s",
__entry->rcuname, __entry->gpnum, __entry->gpevent)
);
/*
* Tracepoint for future grace-period events, including those for no-callbacks
* CPUs. The caller should pull the data from the rcu_node structure,
* other than rcuname, which comes from the rcu_state structure, and event,
* which is one of the following:
*
* "Startleaf": Request a nocb grace period based on leaf-node data.
* "Startedleaf": Leaf-node start proved sufficient.
* "Startedleafroot": Leaf-node start proved sufficient after checking root.
* "Startedroot": Requested a nocb grace period based on root-node data.
* "StartWait": Start waiting for the requested grace period.
* "ResumeWait": Resume waiting after signal.
* "EndWait": Complete wait.
* "Cleanup": Clean up rcu_node structure after previous GP.
* "CleanupMore": Clean up, and another no-CB GP is needed.
*/
TRACE_EVENT(rcu_future_grace_period,
TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
unsigned long c, u8 level, int grplo, int grphi,
const char *gpevent),
TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, completed)
__field(unsigned long, c)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->completed = completed;
__entry->c = c;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %lu %lu %u %d %d %s",
__entry->rcuname, __entry->gpnum, __entry->completed,
__entry->c, __entry->level, __entry->grplo, __entry->grphi,
__entry->gpevent)
);
/*
* Tracepoint for grace-period-initialization events. These are
* distinguished by the type of RCU, the new grace-period number, the
* rcu_node structure level, the starting and ending CPU covered by the
* rcu_node structure, and the mask of CPUs that will be waited for.
* All but the type of RCU are extracted from the rcu_node structure.
*/
TRACE_EVENT(rcu_grace_period_init,
TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
int grplo, int grphi, unsigned long qsmask),
TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
__field(unsigned long, qsmask)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->qsmask = qsmask;
),
TP_printk("%s %lu %u %d %d %lx",
__entry->rcuname, __entry->gpnum, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
*
* The first argument is the name of the RCU flavor, and the second is
* the number of the offloaded CPU are extracted. The third and final
* argument is a string as follows:
*
* "WakeEmpty": Wake rcuo kthread, first CB to empty list.
* "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
* "WakeOvf": Wake rcuo kthread, CB list is huge.
* "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
* "DeferredWake": Carried out the "IsDeferred" wakeup.
* "Poll": Start of new polling cycle for rcu_nocb_poll.
* "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
* "WokeNonEmpty": rcuo kthread woke to find non-empty list.
* "WaitQueue": Enqueue partially done, timed wait for it to complete.
* "WokeQueue": Partial enqueue now complete.
*/
TRACE_EVENT(rcu_nocb_wake,
TP_PROTO(const char *rcuname, int cpu, const char *reason),
TP_ARGS(rcuname, cpu, reason),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(int, cpu)
__field(const char *, reason)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->cpu = cpu;
__entry->reason = reason;
),
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
);
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
* critical sections. Track the type of RCU (which one day might
* include SRCU), the grace-period number that the task is blocking
* (the current or the next), and the task's PID.
*/
TRACE_EVENT(rcu_preempt_task,
TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
TP_ARGS(rcuname, pid, gpnum),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->pid = pid;
),
TP_printk("%s %lu %d",
__entry->rcuname, __entry->gpnum, __entry->pid)
);
/*
* Tracepoint for tasks that blocked within a given preemptible-RCU
* read-side critical section exiting that critical section. Track the
* type of RCU (which one day might include SRCU) and the task's PID.
*/
TRACE_EVENT(rcu_unlock_preempted_task,
TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
TP_ARGS(rcuname, gpnum, pid),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->pid = pid;
),
TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
);
/*
* Tracepoint for quiescent-state-reporting events. These are
* distinguished by the type of RCU, the grace-period number, the
* mask of quiescent lower-level entities, the rcu_node structure level,
* the starting and ending CPU covered by the rcu_node structure, and
* whether there are any blocked tasks blocking the current grace period.
* All but the type of RCU are extracted from the rcu_node structure.
*/
TRACE_EVENT(rcu_quiescent_state_report,
TP_PROTO(const char *rcuname, unsigned long gpnum,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
__field(u8, gp_tasks)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gp_tasks = gp_tasks;
),
TP_printk("%s %lu %lx>%lx %u %d %d %u",
__entry->rcuname, __entry->gpnum,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
);
/*
* Tracepoint for quiescent states detected by force_quiescent_state().
* These trace events include the type of RCU, the grace-period number
* that was blocked by the CPU, the CPU itself, and the type of quiescent
* state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
* or "kick" when kicking a CPU that has been in dyntick-idle mode for
* too long.
*/
TRACE_EVENT(rcu_fqs,
TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
TP_ARGS(rcuname, gpnum, cpu, qsevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->gpnum = gpnum;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
TP_printk("%s %lu %d %s",
__entry->rcuname, __entry->gpnum,
__entry->cpu, __entry->qsevent)
);
#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
* as argument: "Start" for entering dyntick-idle mode, "End" for
* leaving it, "--=" for events moving towards idle, and "++=" for events
* moving away from idle. "Error on entry: not idle task" and "Error on
* exit: not idle task" indicate that a non-idle task is erroneously
* toying with the idle loop.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest. Note that task-related
* events use the upper bits of each number, while interrupt-related
* events use the lower bits.
*/
TRACE_EVENT(rcu_dyntick,
TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long long, oldnesting)
__field(long long, newnesting)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
),
TP_printk("%s %llx %llx", __entry->polarity,
__entry->oldnesting, __entry->newnesting)
);
/*
* Tracepoint for RCU preparation for idle, the goal being to get RCU
* processing done so that the current CPU can shut off its scheduling
* clock and enter dyntick-idle mode. One way to accomplish this is
* to drain all RCU callbacks from this CPU, and the other is to have
* done everything RCU requires for the current grace period. In this
* latter case, the CPU will be awakened at the end of the current grace
* period in order to process the remainder of its callbacks.
*
* These tracepoints take a string as argument:
*
* "No callbacks": Nothing to do, no callbacks on this CPU.
* "In holdoff": Nothing to do, holding off after unsuccessful attempt.
* "Begin holdoff": Attempt failed, don't retry until next jiffy.
* "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
* "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
* "More callbacks": Still more callbacks, try again to clear them out.
* "Callbacks drained": All callbacks processed, off to dyntick idle!
* "Timer": Timer fired to cause CPU to continue processing callbacks.
* "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
* "Cleanup after idle": Idle exited, timer canceled.
*/
TRACE_EVENT(rcu_prep_idle,
TP_PROTO(const char *reason),
TP_ARGS(reason),
TP_STRUCT__entry(
__field(const char *, reason)
),
TP_fast_assign(
__entry->reason = reason;
),
TP_printk("%s", __entry->reason)
);
/*
* Tracepoint for the registration of a single RCU callback function.
* The first argument is the type of RCU, the second argument is
* a pointer to the RCU callback itself, the third element is the
* number of lazy callbacks queued, and the fourth element is the
* total number of callbacks queued.
*/
TRACE_EVENT(rcu_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
__field(long, qlen_lazy)
__field(long, qlen)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->func = rhp->func;
__entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
TP_printk("%s rhp=%p func=%pf %ld/%ld",
__entry->rcuname, __entry->rhp, __entry->func,
__entry->qlen_lazy, __entry->qlen)
);
/*
* Tracepoint for the registration of a single RCU callback of the special
* kfree() form. The first argument is the RCU type, the second argument
* is a pointer to the RCU callback, the third argument is the offset
* of the callback within the enclosing RCU-protected data structure,
* the fourth argument is the number of lazy callbacks queued, and the
* fifth argument is the total number of callbacks queued.
*/
TRACE_EVENT(rcu_kfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
__field(long, qlen_lazy)
__field(long, qlen)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->offset = offset;
__entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
TP_printk("%s rhp=%p func=%ld %ld/%ld",
__entry->rcuname, __entry->rhp, __entry->offset,
__entry->qlen_lazy, __entry->qlen)
);
/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
* the second is the number of lazy callbacks queued, the third is
* the total number of callbacks queued, and the fourth argument is
* the current RCU-callback batch limit.
*/
TRACE_EVENT(rcu_batch_start,
TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(long, qlen_lazy)
__field(long, qlen)
__field(long, blimit)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
__entry->blimit = blimit;
),
TP_printk("%s CBs=%ld/%ld bl=%ld",
__entry->rcuname, __entry->qlen_lazy, __entry->qlen,
__entry->blimit)
);
/*
* Tracepoint for the invocation of a single RCU callback function.
* The first argument is the type of RCU, and the second argument is
* a pointer to the RCU callback itself.
*/
TRACE_EVENT(rcu_invoke_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp),
TP_ARGS(rcuname, rhp),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->func = rhp->func;
),
TP_printk("%s rhp=%p func=%pf",
__entry->rcuname, __entry->rhp, __entry->func)
);
/*
* Tracepoint for the invocation of a single RCU callback of the special
* kfree() form. The first argument is the RCU flavor, the second
* argument is a pointer to the RCU callback, and the third argument
* is the offset of the callback within the enclosing RCU-protected
* data structure.
*/
TRACE_EVENT(rcu_invoke_kfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
TP_ARGS(rcuname, rhp, offset),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->offset = offset;
),
TP_printk("%s rhp=%p func=%ld",
__entry->rcuname, __entry->rhp, __entry->offset)
);
/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
* invoked. The first argument is the name of the RCU flavor,
* the second argument is number of callbacks actually invoked,
* the third argument (cb) is whether or not any of the callbacks that
* were ready to invoke at the beginning of this batch are still
* queued, the fourth argument (nr) is the return value of need_resched(),
* the fifth argument (iit) is 1 if the current task is the idle task,
* and the sixth argument (risk) is the return value from
* rcu_is_callbacks_kthread().
*/
TRACE_EVENT(rcu_batch_end,
TP_PROTO(const char *rcuname, int callbacks_invoked,
char cb, char nr, char iit, char risk),
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(int, callbacks_invoked)
__field(char, cb)
__field(char, nr)
__field(char, iit)
__field(char, risk)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->callbacks_invoked = callbacks_invoked;
__entry->cb = cb;
__entry->nr = nr;
__entry->iit = iit;
__entry->risk = risk;
),
TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
__entry->rcuname, __entry->callbacks_invoked,
__entry->cb ? 'C' : '.',
__entry->nr ? 'S' : '.',
__entry->iit ? 'I' : '.',
__entry->risk ? 'R' : '.')
);
/*
* Tracepoint for rcutorture readers. The first argument is the name
* of the RCU flavor from rcutorture's viewpoint and the second argument
* is the callback address.
*/
TRACE_EVENT(rcu_torture_read,
TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs, unsigned long c_old, unsigned long c),
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
__field(const char *, rcutorturename)
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
__field(unsigned long, c)
),
TP_fast_assign(
__entry->rcutorturename = rcutorturename;
__entry->rhp = rhp;
__entry->secs = secs;
__entry->c_old = c_old;
__entry->c = c;
),
TP_printk("%s torture read %p %luus c: %lu %lu",
__entry->rcutorturename, __entry->rhp,
__entry->secs, __entry->c_old, __entry->c)
);
/*
* Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase:
* "Begin": _rcu_barrier() started.
* "Check": _rcu_barrier() checking for piggybacking.
* "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
* "Inc1": _rcu_barrier() piggyback check counter incremented.
* "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
* "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
* "OnlineQ": _rcu_barrier() found online CPU with callbacks.
* "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
* "Inc2": _rcu_barrier() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
TRACE_EVENT(rcu_barrier,
TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
TP_ARGS(rcuname, s, cpu, cnt, done),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(const char *, s)
__field(int, cpu)
__field(int, cnt)
__field(unsigned long, done)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->s = s;
__entry->cpu = cpu;
__entry->cnt = cnt;
__entry->done = done;
),
TP_printk("%s %s cpu %d remaining %d # %lu",
__entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
__entry->done)
);
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
qsmask) do { } while (0)
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
grplo, grphi, gp_tasks) do { } \
while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
#define trace_rcu_prep_idle(reason) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
do { } while (0)
#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
do { } while (0)
#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
do { } while (0)
#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
#endif /* _TRACE_RCU_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,252 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM regmap
#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_REGMAP_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
struct device;
struct regmap;
/*
* Log register events
*/
DECLARE_EVENT_CLASS(regmap_reg,
TP_PROTO(struct device *dev, unsigned int reg,
unsigned int val),
TP_ARGS(dev, reg, val),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( unsigned int, reg )
__field( unsigned int, val )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->reg = reg;
__entry->val = val;
),
TP_printk("%s reg=%x val=%x", __get_str(name),
(unsigned int)__entry->reg,
(unsigned int)__entry->val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_write,
TP_PROTO(struct device *dev, unsigned int reg,
unsigned int val),
TP_ARGS(dev, reg, val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_read,
TP_PROTO(struct device *dev, unsigned int reg,
unsigned int val),
TP_ARGS(dev, reg, val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
TP_PROTO(struct device *dev, unsigned int reg,
unsigned int val),
TP_ARGS(dev, reg, val)
);
DECLARE_EVENT_CLASS(regmap_block,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( unsigned int, reg )
__field( int, count )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->reg = reg;
__entry->count = count;
),
TP_printk("%s reg=%x count=%d", __get_str(name),
(unsigned int)__entry->reg,
(int)__entry->count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_start,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_done,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_write_start,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
DEFINE_EVENT(regmap_block, regmap_hw_write_done,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
TRACE_EVENT(regcache_sync,
TP_PROTO(struct device *dev, const char *type,
const char *status),
TP_ARGS(dev, type, status),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__string( status, status )
__string( type, type )
__field( int, type )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__assign_str(status, status);
__assign_str(type, type);
),
TP_printk("%s type=%s status=%s", __get_str(name),
__get_str(type), __get_str(status))
);
DECLARE_EVENT_CLASS(regmap_bool,
TP_PROTO(struct device *dev, bool flag),
TP_ARGS(dev, flag),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( int, flag )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->flag = flag;
),
TP_printk("%s flag=%d", __get_str(name),
(int)__entry->flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_only,
TP_PROTO(struct device *dev, bool flag),
TP_ARGS(dev, flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
TP_PROTO(struct device *dev, bool flag),
TP_ARGS(dev, flag)
);
DECLARE_EVENT_CLASS(regmap_async,
TP_PROTO(struct device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
),
TP_printk("%s", __get_str(name))
);
DEFINE_EVENT(regmap_block, regmap_async_write_start,
TP_PROTO(struct device *dev, unsigned int reg, int count),
TP_ARGS(dev, reg, count)
);
DEFINE_EVENT(regmap_async, regmap_async_io_complete,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
TP_PROTO(struct device *dev),
TP_ARGS(dev)
);
TRACE_EVENT(regcache_drop_region,
TP_PROTO(struct device *dev, unsigned int from,
unsigned int to),
TP_ARGS(dev, from, to),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( unsigned int, from )
__field( unsigned int, to )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->from = from;
__entry->to = to;
),
TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
(unsigned int)__entry->to)
);
#endif /* _TRACE_REGMAP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,148 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM regulator
#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_REGULATOR_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
/*
* Events which just log themselves and the regulator name for enable/disable
* type tracking.
*/
DECLARE_EVENT_CLASS(regulator_basic,
TP_PROTO(const char *name),
TP_ARGS(name),
TP_STRUCT__entry(
__string( name, name )
),
TP_fast_assign(
__assign_str(name, name);
),
TP_printk("name=%s", __get_str(name))
);
DEFINE_EVENT(regulator_basic, regulator_enable,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(regulator_basic, regulator_enable_delay,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(regulator_basic, regulator_enable_complete,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(regulator_basic, regulator_disable,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(regulator_basic, regulator_disable_delay,
TP_PROTO(const char *name),
TP_ARGS(name)
);
DEFINE_EVENT(regulator_basic, regulator_disable_complete,
TP_PROTO(const char *name),
TP_ARGS(name)
);
/*
* Events that take a range of numerical values, mostly for voltages
* and so on.
*/
DECLARE_EVENT_CLASS(regulator_range,
TP_PROTO(const char *name, int min, int max),
TP_ARGS(name, min, max),
TP_STRUCT__entry(
__string( name, name )
__field( int, min )
__field( int, max )
),
TP_fast_assign(
__assign_str(name, name);
__entry->min = min;
__entry->max = max;
),
TP_printk("name=%s (%d-%d)", __get_str(name),
(int)__entry->min, (int)__entry->max)
);
DEFINE_EVENT(regulator_range, regulator_set_voltage,
TP_PROTO(const char *name, int min, int max),
TP_ARGS(name, min, max)
);
/*
* Events that take a single value, mostly for readback and refcounts.
*/
DECLARE_EVENT_CLASS(regulator_value,
TP_PROTO(const char *name, unsigned int val),
TP_ARGS(name, val),
TP_STRUCT__entry(
__string( name, name )
__field( unsigned int, val )
),
TP_fast_assign(
__assign_str(name, name);
__entry->val = val;
),
TP_printk("name=%s, val=%u", __get_str(name),
(int)__entry->val)
);
DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
TP_PROTO(const char *name, unsigned int value),
TP_ARGS(name, value)
);
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

100
include/trace/events/rpm.h Normal file
View file

@ -0,0 +1,100 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rpm
#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RUNTIME_POWER_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
struct device;
/*
* The rpm_internal events are used for tracing some important
* runtime pm internal functions.
*/
DECLARE_EVENT_CLASS(rpm_internal,
TP_PROTO(struct device *dev, int flags),
TP_ARGS(dev, flags),
TP_STRUCT__entry(
__string( name, dev_name(dev) )
__field( int, flags )
__field( int , usage_count )
__field( int , disable_depth )
__field( int , runtime_auto )
__field( int , request_pending )
__field( int , irq_safe )
__field( int , child_count )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->flags = flags;
__entry->usage_count = atomic_read(
&dev->power.usage_count);
__entry->disable_depth = dev->power.disable_depth;
__entry->runtime_auto = dev->power.runtime_auto;
__entry->request_pending = dev->power.request_pending;
__entry->irq_safe = dev->power.irq_safe;
__entry->child_count = atomic_read(
&dev->power.child_count);
),
TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
" irq-%-1d child-%d",
__get_str(name), __entry->flags,
__entry->usage_count,
__entry->disable_depth,
__entry->runtime_auto,
__entry->request_pending,
__entry->irq_safe,
__entry->child_count
)
);
DEFINE_EVENT(rpm_internal, rpm_suspend,
TP_PROTO(struct device *dev, int flags),
TP_ARGS(dev, flags)
);
DEFINE_EVENT(rpm_internal, rpm_resume,
TP_PROTO(struct device *dev, int flags),
TP_ARGS(dev, flags)
);
DEFINE_EVENT(rpm_internal, rpm_idle,
TP_PROTO(struct device *dev, int flags),
TP_ARGS(dev, flags)
);
TRACE_EVENT(rpm_return_int,
TP_PROTO(struct device *dev, unsigned long ip, int ret),
TP_ARGS(dev, ip, ret),
TP_STRUCT__entry(
__string( name, dev_name(dev))
__field( unsigned long, ip )
__field( int, ret )
),
TP_fast_assign(
__assign_str(name, dev_name(dev));
__entry->ip = ip;
__entry->ret = ret;
),
TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
__entry->ret)
);
#endif /* _TRACE_RUNTIME_POWER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,831 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
__entry->pid = t->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(__perf_task(p), success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
__entry->success, __entry->target_cpu)
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
{
long state = p->state;
#ifdef CONFIG_PREEMPT
/*
* For all intents and purposes a preempted task is a running task.
*/
if (preempt_count() & PREEMPT_ACTIVE)
state = TASK_RUNNING | TASK_STATE_MAX;
#endif
return state;
}
#endif
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev,
struct task_struct *next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__field( long, prev_state )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->prev_state & (TASK_STATE_MAX-1) ?
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
/*
* Tracepoint for a CPU going offline/online:
*/
TRACE_EVENT(sched_cpu_hotplug,
TP_PROTO(int affected_cpu, int error, int status),
TP_ARGS(affected_cpu, error, status),
TP_STRUCT__entry(
__field( int, affected_cpu )
__field( int, error )
__field( int, status )
),
TP_fast_assign(
__entry->affected_cpu = affected_cpu;
__entry->error = error;
__entry->status = status;
),
TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
__entry->status ? "online" : "offline", __entry->error)
);
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_pid )
__array( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_pid )
),
TP_fast_assign(
memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
__entry->parent_pid = parent->pid;
memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
__entry->parent_comm, __entry->parent_pid,
__entry->child_comm, __entry->child_pid)
);
/*
* Tracepoint for exec:
*/
TRACE_EVENT(sched_process_exec,
TP_PROTO(struct task_struct *p, pid_t old_pid,
struct linux_binprm *bprm),
TP_ARGS(p, old_pid, bprm),
TP_STRUCT__entry(
__string( filename, bprm->filename )
__field( pid_t, pid )
__field( pid_t, old_pid )
),
TP_fast_assign(
__assign_str(filename, bprm->filename);
__entry->pid = p->pid;
__entry->old_pid = old_pid;
),
TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
__entry->pid, __entry->old_pid)
);
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, delay )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting blocked time (time the task is in uninterruptible).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for recording the cause of uninterruptible sleep.
*/
TRACE_EVENT(sched_blocked_reason,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk),
TP_STRUCT__entry(
__field( pid_t, pid )
__field( void*, caller )
__field( bool, io_wait )
),
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->caller = (void*)get_wchan(tsk);
__entry->io_wait = tsk->in_iowait;
),
TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
);
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, __perf_count(runtime), vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = newprio;
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->pid,
__entry->oldprio, __entry->newprio)
);
#ifdef CONFIG_DETECT_HUNG_TASK
TRACE_EVENT(sched_process_hang,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
#endif /* CONFIG_DETECT_HUNG_TASK */
DECLARE_EVENT_CLASS(sched_move_task_template,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu),
TP_STRUCT__entry(
__field( pid_t, pid )
__field( pid_t, tgid )
__field( pid_t, ngid )
__field( int, src_cpu )
__field( int, src_nid )
__field( int, dst_cpu )
__field( int, dst_nid )
),
TP_fast_assign(
__entry->pid = task_pid_nr(tsk);
__entry->tgid = task_tgid_nr(tsk);
__entry->ngid = task_numa_group_id(tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
__entry->dst_cpu = dst_cpu;
__entry->dst_nid = cpu_to_node(dst_cpu);
),
TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
__entry->pid, __entry->tgid, __entry->ngid,
__entry->src_cpu, __entry->src_nid,
__entry->dst_cpu, __entry->dst_nid)
);
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes
*/
DEFINE_EVENT(sched_move_task_template, sched_move_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu)
);
DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu)
);
TRACE_EVENT(sched_swap_numa,
TP_PROTO(struct task_struct *src_tsk, int src_cpu,
struct task_struct *dst_tsk, int dst_cpu),
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
TP_STRUCT__entry(
__field( pid_t, src_pid )
__field( pid_t, src_tgid )
__field( pid_t, src_ngid )
__field( int, src_cpu )
__field( int, src_nid )
__field( pid_t, dst_pid )
__field( pid_t, dst_tgid )
__field( pid_t, dst_ngid )
__field( int, dst_cpu )
__field( int, dst_nid )
),
TP_fast_assign(
__entry->src_pid = task_pid_nr(src_tsk);
__entry->src_tgid = task_tgid_nr(src_tsk);
__entry->src_ngid = task_numa_group_id(src_tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
__entry->dst_pid = task_pid_nr(dst_tsk);
__entry->dst_tgid = task_tgid_nr(dst_tsk);
__entry->dst_ngid = task_numa_group_id(dst_tsk);
__entry->dst_cpu = dst_cpu;
__entry->dst_nid = cpu_to_node(dst_cpu);
),
TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
__entry->src_cpu, __entry->src_nid,
__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
__entry->dst_cpu, __entry->dst_nid)
);
/*
* Tracepoint for showing tracked load contribution.
*/
TRACE_EVENT(sched_task_load_contrib,
TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
TP_ARGS(tsk, load_contrib),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(unsigned long, load_contrib)
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->load_contrib = load_contrib;
),
TP_printk("comm=%s pid=%d load_contrib=%lu",
__entry->comm, __entry->pid,
__entry->load_contrib)
);
/*
* Tracepoint for showing tracked task runnable ratio [0..1023].
*/
TRACE_EVENT(sched_task_runnable_ratio,
TP_PROTO(struct task_struct *tsk, unsigned long ratio),
TP_ARGS(tsk, ratio),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(unsigned long, ratio)
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->ratio = ratio;
),
TP_printk("comm=%s pid=%d ratio=%lu",
__entry->comm, __entry->pid,
__entry->ratio)
);
/*
* Tracepoint for showing tracked rq runnable ratio [0..1023].
*/
TRACE_EVENT(sched_rq_runnable_ratio,
TP_PROTO(int cpu, unsigned long ratio),
TP_ARGS(cpu, ratio),
TP_STRUCT__entry(
__field(int, cpu)
__field(unsigned long, ratio)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->ratio = ratio;
),
TP_printk("cpu=%d ratio=%lu",
__entry->cpu,
__entry->ratio)
);
/*
* Tracepoint for showing tracked rq runnable load.
*/
TRACE_EVENT(sched_rq_runnable_load,
TP_PROTO(int cpu, u64 load),
TP_ARGS(cpu, load),
TP_STRUCT__entry(
__field(int, cpu)
__field(u64, load)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->load = load;
),
TP_printk("cpu=%d load=%llu",
__entry->cpu,
__entry->load)
);
TRACE_EVENT(sched_rq_nr_running,
TP_PROTO(int cpu, unsigned int nr_running, int nr_iowait),
TP_ARGS(cpu, nr_running, nr_iowait),
TP_STRUCT__entry(
__field(int, cpu)
__field(unsigned int, nr_running)
__field(int, nr_iowait)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->nr_running = nr_running;
__entry->nr_iowait = nr_iowait;
),
TP_printk("cpu=%d nr_running=%u nr_iowait=%d",
__entry->cpu,
__entry->nr_running, __entry->nr_iowait)
);
/*
* Tracepoint for showing tracked task cpu usage ratio [0..1023].
*/
TRACE_EVENT(sched_task_usage_ratio,
TP_PROTO(struct task_struct *tsk, unsigned long ratio),
TP_ARGS(tsk, ratio),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(unsigned long, ratio)
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->ratio = ratio;
),
TP_printk("comm=%s pid=%d ratio=%lu",
__entry->comm, __entry->pid,
__entry->ratio)
);
/*
* Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
*/
#define HMP_MIGRATE_WAKEUP 0
#define HMP_MIGRATE_FORCE 1
#define HMP_MIGRATE_OFFLOAD 2
#define HMP_MIGRATE_IDLE_PULL 3
TRACE_EVENT(sched_hmp_migrate,
TP_PROTO(struct task_struct *tsk, int dest, int force),
TP_ARGS(tsk, dest, force),
TP_STRUCT__entry(
__array(char, comm, TASK_COMM_LEN)
__field(pid_t, pid)
__field(int, dest)
__field(int, force)
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->dest = dest;
__entry->force = force;
),
TP_printk("comm=%s pid=%d dest=%d force=%d",
__entry->comm, __entry->pid,
__entry->dest, __entry->force)
);
TRACE_EVENT(sched_hmp_offload_abort,
TP_PROTO(int cpu, int data, char *label),
TP_ARGS(cpu,data,label),
TP_STRUCT__entry(
__array(char, label, 64)
__field(int, cpu)
__field(int, data)
),
TP_fast_assign(
strncpy(__entry->label, label, 64);
__entry->cpu = cpu;
__entry->data = data;
),
TP_printk("cpu=%d data=%d label=%63s",
__entry->cpu, __entry->data,
__entry->label)
);
TRACE_EVENT(sched_hmp_offload_succeed,
TP_PROTO(int cpu, int dest_cpu),
TP_ARGS(cpu,dest_cpu),
TP_STRUCT__entry(
__field(int, cpu)
__field(int, dest_cpu)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->dest_cpu = dest_cpu;
),
TP_printk("cpu=%d dest=%d",
__entry->cpu,
__entry->dest_cpu)
);
/*
* Tracepoint for waking a polling cpu without an IPI.
*/
TRACE_EVENT(sched_wake_idle_without_ipi,
TP_PROTO(int cpu),
TP_ARGS(cpu),
TP_STRUCT__entry(
__field( int, cpu )
),
TP_fast_assign(
__entry->cpu = cpu;
),
TP_printk("cpu=%d", __entry->cpu)
);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

365
include/trace/events/scsi.h Normal file
View file

@ -0,0 +1,365 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM scsi
#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCSI_H
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#define scsi_opcode_name(opcode) { opcode, #opcode }
#define show_opcode_name(val) \
__print_symbolic(val, \
scsi_opcode_name(TEST_UNIT_READY), \
scsi_opcode_name(REZERO_UNIT), \
scsi_opcode_name(REQUEST_SENSE), \
scsi_opcode_name(FORMAT_UNIT), \
scsi_opcode_name(READ_BLOCK_LIMITS), \
scsi_opcode_name(REASSIGN_BLOCKS), \
scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
scsi_opcode_name(READ_6), \
scsi_opcode_name(WRITE_6), \
scsi_opcode_name(SEEK_6), \
scsi_opcode_name(READ_REVERSE), \
scsi_opcode_name(WRITE_FILEMARKS), \
scsi_opcode_name(SPACE), \
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
scsi_opcode_name(RESERVE), \
scsi_opcode_name(RELEASE), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
scsi_opcode_name(START_STOP), \
scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
scsi_opcode_name(SEND_DIAGNOSTIC), \
scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
scsi_opcode_name(SET_WINDOW), \
scsi_opcode_name(READ_CAPACITY), \
scsi_opcode_name(READ_10), \
scsi_opcode_name(WRITE_10), \
scsi_opcode_name(SEEK_10), \
scsi_opcode_name(POSITION_TO_ELEMENT), \
scsi_opcode_name(WRITE_VERIFY), \
scsi_opcode_name(VERIFY), \
scsi_opcode_name(SEARCH_HIGH), \
scsi_opcode_name(SEARCH_EQUAL), \
scsi_opcode_name(SEARCH_LOW), \
scsi_opcode_name(SET_LIMITS), \
scsi_opcode_name(PRE_FETCH), \
scsi_opcode_name(READ_POSITION), \
scsi_opcode_name(SYNCHRONIZE_CACHE), \
scsi_opcode_name(LOCK_UNLOCK_CACHE), \
scsi_opcode_name(READ_DEFECT_DATA), \
scsi_opcode_name(MEDIUM_SCAN), \
scsi_opcode_name(COMPARE), \
scsi_opcode_name(COPY_VERIFY), \
scsi_opcode_name(WRITE_BUFFER), \
scsi_opcode_name(READ_BUFFER), \
scsi_opcode_name(UPDATE_BLOCK), \
scsi_opcode_name(READ_LONG), \
scsi_opcode_name(WRITE_LONG), \
scsi_opcode_name(CHANGE_DEFINITION), \
scsi_opcode_name(WRITE_SAME), \
scsi_opcode_name(UNMAP), \
scsi_opcode_name(READ_TOC), \
scsi_opcode_name(LOG_SELECT), \
scsi_opcode_name(LOG_SENSE), \
scsi_opcode_name(XDWRITEREAD_10), \
scsi_opcode_name(MODE_SELECT_10), \
scsi_opcode_name(RESERVE_10), \
scsi_opcode_name(RELEASE_10), \
scsi_opcode_name(MODE_SENSE_10), \
scsi_opcode_name(PERSISTENT_RESERVE_IN), \
scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
scsi_opcode_name(VARIABLE_LENGTH_CMD), \
scsi_opcode_name(REPORT_LUNS), \
scsi_opcode_name(MAINTENANCE_IN), \
scsi_opcode_name(MAINTENANCE_OUT), \
scsi_opcode_name(MOVE_MEDIUM), \
scsi_opcode_name(EXCHANGE_MEDIUM), \
scsi_opcode_name(READ_12), \
scsi_opcode_name(WRITE_12), \
scsi_opcode_name(WRITE_VERIFY_12), \
scsi_opcode_name(SEARCH_HIGH_12), \
scsi_opcode_name(SEARCH_EQUAL_12), \
scsi_opcode_name(SEARCH_LOW_12), \
scsi_opcode_name(READ_ELEMENT_STATUS), \
scsi_opcode_name(SEND_VOLUME_TAG), \
scsi_opcode_name(WRITE_LONG_2), \
scsi_opcode_name(READ_16), \
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
scsi_opcode_name(SERVICE_ACTION_IN), \
scsi_opcode_name(SAI_READ_CAPACITY_16), \
scsi_opcode_name(SAI_GET_LBA_STATUS), \
scsi_opcode_name(MI_REPORT_TARGET_PGS), \
scsi_opcode_name(MO_SET_TARGET_PGS), \
scsi_opcode_name(READ_32), \
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
scsi_opcode_name(ATA_12))
#define scsi_hostbyte_name(result) { result, #result }
#define show_hostbyte_name(val) \
__print_symbolic(val, \
scsi_hostbyte_name(DID_OK), \
scsi_hostbyte_name(DID_NO_CONNECT), \
scsi_hostbyte_name(DID_BUS_BUSY), \
scsi_hostbyte_name(DID_TIME_OUT), \
scsi_hostbyte_name(DID_BAD_TARGET), \
scsi_hostbyte_name(DID_ABORT), \
scsi_hostbyte_name(DID_PARITY), \
scsi_hostbyte_name(DID_ERROR), \
scsi_hostbyte_name(DID_RESET), \
scsi_hostbyte_name(DID_BAD_INTR), \
scsi_hostbyte_name(DID_PASSTHROUGH), \
scsi_hostbyte_name(DID_SOFT_ERROR), \
scsi_hostbyte_name(DID_IMM_RETRY), \
scsi_hostbyte_name(DID_REQUEUE), \
scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
#define scsi_driverbyte_name(result) { result, #result }
#define show_driverbyte_name(val) \
__print_symbolic(val, \
scsi_driverbyte_name(DRIVER_OK), \
scsi_driverbyte_name(DRIVER_BUSY), \
scsi_driverbyte_name(DRIVER_SOFT), \
scsi_driverbyte_name(DRIVER_MEDIA), \
scsi_driverbyte_name(DRIVER_ERROR), \
scsi_driverbyte_name(DRIVER_INVALID), \
scsi_driverbyte_name(DRIVER_TIMEOUT), \
scsi_driverbyte_name(DRIVER_HARD), \
scsi_driverbyte_name(DRIVER_SENSE))
#define scsi_msgbyte_name(result) { result, #result }
#define show_msgbyte_name(val) \
__print_symbolic(val, \
scsi_msgbyte_name(COMMAND_COMPLETE), \
scsi_msgbyte_name(EXTENDED_MESSAGE), \
scsi_msgbyte_name(SAVE_POINTERS), \
scsi_msgbyte_name(RESTORE_POINTERS), \
scsi_msgbyte_name(DISCONNECT), \
scsi_msgbyte_name(INITIATOR_ERROR), \
scsi_msgbyte_name(ABORT_TASK_SET), \
scsi_msgbyte_name(MESSAGE_REJECT), \
scsi_msgbyte_name(NOP), \
scsi_msgbyte_name(MSG_PARITY_ERROR), \
scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
scsi_msgbyte_name(TARGET_RESET), \
scsi_msgbyte_name(ABORT_TASK), \
scsi_msgbyte_name(CLEAR_TASK_SET), \
scsi_msgbyte_name(INITIATE_RECOVERY), \
scsi_msgbyte_name(RELEASE_RECOVERY), \
scsi_msgbyte_name(CLEAR_ACA), \
scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
scsi_msgbyte_name(ACA), \
scsi_msgbyte_name(QAS_REQUEST), \
scsi_msgbyte_name(BUS_DEVICE_RESET), \
scsi_msgbyte_name(ABORT))
#define scsi_statusbyte_name(result) { result, #result }
#define show_statusbyte_name(val) \
__print_symbolic(val, \
scsi_statusbyte_name(SAM_STAT_GOOD), \
scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
scsi_statusbyte_name(SAM_STAT_BUSY), \
scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
#define scsi_prot_op_name(result) { result, #result }
#define show_prot_op_name(val) \
__print_symbolic(val, \
scsi_prot_op_name(SCSI_PROT_NORMAL), \
scsi_prot_op_name(SCSI_PROT_READ_INSERT), \
scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \
scsi_prot_op_name(SCSI_PROT_READ_STRIP), \
scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \
scsi_prot_op_name(SCSI_PROT_READ_PASS), \
scsi_prot_op_name(SCSI_PROT_WRITE_PASS))
const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
TRACE_EVENT(scsi_dispatch_cmd_start,
TP_PROTO(struct scsi_cmnd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, host_no )
__field( unsigned int, channel )
__field( unsigned int, id )
__field( unsigned int, lun )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
TP_fast_assign(
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
__entry->lun = cmd->device->lun;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s cmnd=(%s %s raw=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
TRACE_EVENT(scsi_dispatch_cmd_error,
TP_PROTO(struct scsi_cmnd *cmd, int rtn),
TP_ARGS(cmd, rtn),
TP_STRUCT__entry(
__field( unsigned int, host_no )
__field( unsigned int, channel )
__field( unsigned int, id )
__field( unsigned int, lun )
__field( int, rtn )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
TP_fast_assign(
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
__entry->lun = cmd->device->lun;
__entry->rtn = rtn;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
__entry->rtn)
);
DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
TP_PROTO(struct scsi_cmnd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, host_no )
__field( unsigned int, channel )
__field( unsigned int, id )
__field( unsigned int, lun )
__field( int, result )
__field( unsigned int, opcode )
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
TP_fast_assign(
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
__entry->lun = cmd->device->lun;
__entry->result = cmd->result;
__entry->opcode = cmd->cmnd[0];
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
"prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
"%s host=%s message=%s status=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
show_driverbyte_name(((__entry->result) >> 24) & 0xff),
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
show_msgbyte_name(((__entry->result) >> 8) & 0xff),
show_statusbyte_name(__entry->result & 0xff))
);
DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
TP_PROTO(struct scsi_cmnd *cmd),
TP_ARGS(cmd));
DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
TP_PROTO(struct scsi_cmnd *cmd),
TP_ARGS(cmd));
TRACE_EVENT(scsi_eh_wakeup,
TP_PROTO(struct Scsi_Host *shost),
TP_ARGS(shost),
TP_STRUCT__entry(
__field( unsigned int, host_no )
),
TP_fast_assign(
__entry->host_no = shost->host_no;
),
TP_printk("host_no=%u", __entry->host_no)
);
#endif /* _TRACE_SCSI_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,125 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM signal
#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SIGNAL_H
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/tracepoint.h>
#define TP_STORE_SIGINFO(__entry, info) \
do { \
if (info == SEND_SIG_NOINFO || \
info == SEND_SIG_FORCED) { \
__entry->errno = 0; \
__entry->code = SI_USER; \
} else if (info == SEND_SIG_PRIV) { \
__entry->errno = 0; \
__entry->code = SI_KERNEL; \
} else { \
__entry->errno = info->si_errno; \
__entry->code = info->si_code; \
} \
} while (0)
#ifndef TRACE_HEADER_MULTI_READ
enum {
TRACE_SIGNAL_DELIVERED,
TRACE_SIGNAL_IGNORED,
TRACE_SIGNAL_ALREADY_PENDING,
TRACE_SIGNAL_OVERFLOW_FAIL,
TRACE_SIGNAL_LOSE_INFO,
};
#endif
/**
* signal_generate - called when a signal is generated
* @sig: signal number
* @info: pointer to struct siginfo
* @task: pointer to struct task_struct
* @group: shared or private
* @result: TRACE_SIGNAL_*
*
* Current process sends a 'sig' signal to 'task' process with
* 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
* 'info' is not a pointer and you can't access its field. Instead,
* SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
* means that si_code is SI_KERNEL.
*/
TRACE_EVENT(signal_generate,
TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
int group, int result),
TP_ARGS(sig, info, task, group, result),
TP_STRUCT__entry(
__field( int, sig )
__field( int, errno )
__field( int, code )
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, group )
__field( int, result )
),
TP_fast_assign(
__entry->sig = sig;
TP_STORE_SIGINFO(__entry, info);
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->pid = task->pid;
__entry->group = group;
__entry->result = result;
),
TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
__entry->sig, __entry->errno, __entry->code,
__entry->comm, __entry->pid, __entry->group,
__entry->result)
);
/**
* signal_deliver - called when a signal is delivered
* @sig: signal number
* @info: pointer to struct siginfo
* @ka: pointer to struct k_sigaction
*
* A 'sig' signal is delivered to current process with 'info' siginfo,
* and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
* SIG_DFL.
* Note that some signals reported by signal_generate tracepoint can be
* lost, ignored or modified (by debugger) before hitting this tracepoint.
* This means, this can show which signals are actually delivered, but
* matching generated signals and delivered signals may not be correct.
*/
TRACE_EVENT(signal_deliver,
TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
TP_ARGS(sig, info, ka),
TP_STRUCT__entry(
__field( int, sig )
__field( int, errno )
__field( int, code )
__field( unsigned long, sa_handler )
__field( unsigned long, sa_flags )
),
TP_fast_assign(
__entry->sig = sig;
TP_STORE_SIGINFO(__entry, info);
__entry->sa_handler = (unsigned long)ka->sa.sa_handler;
__entry->sa_flags = ka->sa.sa_flags;
),
TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
__entry->sig, __entry->errno, __entry->code,
__entry->sa_handler, __entry->sa_flags)
);
#endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,75 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM skb
#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SKB_H
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
/*
* Tracepoint for free an sk_buff:
*/
TRACE_EVENT(kfree_skb,
TP_PROTO(struct sk_buff *skb, void *location),
TP_ARGS(skb, location),
TP_STRUCT__entry(
__field( void *, skbaddr )
__field( void *, location )
__field( unsigned short, protocol )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
__entry->protocol = ntohs(skb->protocol);
),
TP_printk("skbaddr=%p protocol=%u location=%p",
__entry->skbaddr, __entry->protocol, __entry->location)
);
TRACE_EVENT(consume_skb,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__field( void *, skbaddr )
),
TP_fast_assign(
__entry->skbaddr = skb;
),
TP_printk("skbaddr=%p", __entry->skbaddr)
);
TRACE_EVENT(skb_copy_datagram_iovec,
TP_PROTO(const struct sk_buff *skb, int len),
TP_ARGS(skb, len),
TP_STRUCT__entry(
__field( const void *, skbaddr )
__field( int, len )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = len;
),
TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len)
);
#endif /* _TRACE_SKB_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,68 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sock
#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SOCK_H
#include <net/sock.h>
#include <linux/tracepoint.h>
TRACE_EVENT(sock_rcvqueue_full,
TP_PROTO(struct sock *sk, struct sk_buff *skb),
TP_ARGS(sk, skb),
TP_STRUCT__entry(
__field(int, rmem_alloc)
__field(unsigned int, truesize)
__field(int, sk_rcvbuf)
),
TP_fast_assign(
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->truesize = skb->truesize;
__entry->sk_rcvbuf = sk->sk_rcvbuf;
),
TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
__entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
);
TRACE_EVENT(sock_exceed_buf_limit,
TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
TP_ARGS(sk, prot, allocated),
TP_STRUCT__entry(
__array(char, name, 32)
__field(long *, sysctl_mem)
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
),
TP_fast_assign(
strncpy(__entry->name, prot->name, 32);
__entry->sysctl_mem = prot->sysctl_mem;
__entry->allocated = allocated;
__entry->sysctl_rmem = prot->sysctl_rmem[0];
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
),
TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
"sysctl_rmem=%d rmem_alloc=%d",
__entry->name,
__entry->sysctl_mem[0],
__entry->sysctl_mem[1],
__entry->sysctl_mem[2],
__entry->allocated,
__entry->sysctl_rmem,
__entry->rmem_alloc)
);
#endif /* _TRACE_SOCK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

156
include/trace/events/spi.h Normal file
View file

@ -0,0 +1,156 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM spi
#if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SPI_H
#include <linux/ktime.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(spi_master,
TP_PROTO(struct spi_master *master),
TP_ARGS(master),
TP_STRUCT__entry(
__field( int, bus_num )
),
TP_fast_assign(
__entry->bus_num = master->bus_num;
),
TP_printk("spi%d", (int)__entry->bus_num)
);
DEFINE_EVENT(spi_master, spi_master_idle,
TP_PROTO(struct spi_master *master),
TP_ARGS(master)
);
DEFINE_EVENT(spi_master, spi_master_busy,
TP_PROTO(struct spi_master *master),
TP_ARGS(master)
);
DECLARE_EVENT_CLASS(spi_message,
TP_PROTO(struct spi_message *msg),
TP_ARGS(msg),
TP_STRUCT__entry(
__field( int, bus_num )
__field( int, chip_select )
__field( struct spi_message *, msg )
),
TP_fast_assign(
__entry->bus_num = msg->spi->master->bus_num;
__entry->chip_select = msg->spi->chip_select;
__entry->msg = msg;
),
TP_printk("spi%d.%d %p", (int)__entry->bus_num,
(int)__entry->chip_select,
(struct spi_message *)__entry->msg)
);
DEFINE_EVENT(spi_message, spi_message_submit,
TP_PROTO(struct spi_message *msg),
TP_ARGS(msg)
);
DEFINE_EVENT(spi_message, spi_message_start,
TP_PROTO(struct spi_message *msg),
TP_ARGS(msg)
);
TRACE_EVENT(spi_message_done,
TP_PROTO(struct spi_message *msg),
TP_ARGS(msg),
TP_STRUCT__entry(
__field( int, bus_num )
__field( int, chip_select )
__field( struct spi_message *, msg )
__field( unsigned, frame )
__field( unsigned, actual )
),
TP_fast_assign(
__entry->bus_num = msg->spi->master->bus_num;
__entry->chip_select = msg->spi->chip_select;
__entry->msg = msg;
__entry->frame = msg->frame_length;
__entry->actual = msg->actual_length;
),
TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num,
(int)__entry->chip_select,
(struct spi_message *)__entry->msg,
(unsigned)__entry->actual, (unsigned)__entry->frame)
);
DECLARE_EVENT_CLASS(spi_transfer,
TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
TP_ARGS(msg, xfer),
TP_STRUCT__entry(
__field( int, bus_num )
__field( int, chip_select )
__field( struct spi_transfer *, xfer )
__field( int, len )
),
TP_fast_assign(
__entry->bus_num = msg->spi->master->bus_num;
__entry->chip_select = msg->spi->chip_select;
__entry->xfer = xfer;
__entry->len = xfer->len;
),
TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num,
(int)__entry->chip_select,
(struct spi_message *)__entry->xfer,
(int)__entry->len)
);
DEFINE_EVENT(spi_transfer, spi_transfer_start,
TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
TP_ARGS(msg, xfer)
);
DEFINE_EVENT(spi_transfer, spi_transfer_stop,
TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
TP_ARGS(msg, xfer)
);
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,311 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sunrpc
#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SUNRPC_H
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/clnt.h>
#include <net/tcp_states.h>
#include <linux/net.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(rpc_task_status,
TP_PROTO(struct rpc_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, status)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->status = task->tk_status;
),
TP_printk("task:%u@%u, status %d",
__entry->task_id, __entry->client_id,
__entry->status)
);
DEFINE_EVENT(rpc_task_status, rpc_call_status,
TP_PROTO(struct rpc_task *task),
TP_ARGS(task)
);
DEFINE_EVENT(rpc_task_status, rpc_bind_status,
TP_PROTO(struct rpc_task *task),
TP_ARGS(task)
);
TRACE_EVENT(rpc_connect_status,
TP_PROTO(struct rpc_task *task, int status),
TP_ARGS(task, status),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, status)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->status = status;
),
TP_printk("task:%u@%u, status %d",
__entry->task_id, __entry->client_id,
__entry->status)
);
DECLARE_EVENT_CLASS(rpc_task_running,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
TP_ARGS(clnt, task, action),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(const void *, action)
__field(unsigned long, runstate)
__field(int, status)
__field(unsigned short, flags)
),
TP_fast_assign(
__entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->action = action;
__entry->runstate = task->tk_runstate;
__entry->status = task->tk_status;
__entry->flags = task->tk_flags;
),
TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
__entry->status,
__entry->action
)
);
DEFINE_EVENT(rpc_task_running, rpc_task_begin,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
TP_ARGS(clnt, task, action)
);
DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
TP_ARGS(clnt, task, action)
);
DEFINE_EVENT(rpc_task_running, rpc_task_complete,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
TP_ARGS(clnt, task, action)
);
DECLARE_EVENT_CLASS(rpc_task_queued,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
TP_ARGS(clnt, task, q),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(unsigned long, timeout)
__field(unsigned long, runstate)
__field(int, status)
__field(unsigned short, flags)
__string(q_name, rpc_qname(q))
),
TP_fast_assign(
__entry->client_id = clnt->cl_clid;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
__entry->status = task->tk_status;
__entry->flags = task->tk_flags;
__assign_str(q_name, rpc_qname(q));
),
TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
__entry->status,
__entry->timeout,
__get_str(q_name)
)
);
DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
TP_ARGS(clnt, task, q)
);
DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
TP_ARGS(clnt, task, q)
);
#define rpc_show_socket_state(state) \
__print_symbolic(state, \
{ SS_FREE, "FREE" }, \
{ SS_UNCONNECTED, "UNCONNECTED" }, \
{ SS_CONNECTING, "CONNECTING," }, \
{ SS_CONNECTED, "CONNECTED," }, \
{ SS_DISCONNECTING, "DISCONNECTING" })
#define rpc_show_sock_state(state) \
__print_symbolic(state, \
{ TCP_ESTABLISHED, "ESTABLISHED" }, \
{ TCP_SYN_SENT, "SYN_SENT" }, \
{ TCP_SYN_RECV, "SYN_RECV" }, \
{ TCP_FIN_WAIT1, "FIN_WAIT1" }, \
{ TCP_FIN_WAIT2, "FIN_WAIT2" }, \
{ TCP_TIME_WAIT, "TIME_WAIT" }, \
{ TCP_CLOSE, "CLOSE" }, \
{ TCP_CLOSE_WAIT, "CLOSE_WAIT" }, \
{ TCP_LAST_ACK, "LAST_ACK" }, \
{ TCP_LISTEN, "LISTEN" }, \
{ TCP_CLOSING, "CLOSING" })
DECLARE_EVENT_CLASS(xs_socket_event,
TP_PROTO(
struct rpc_xprt *xprt,
struct socket *socket
),
TP_ARGS(xprt, socket),
TP_STRUCT__entry(
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
__string(dstaddr,
xprt->address_strings[RPC_DISPLAY_ADDR])
__string(dstport,
xprt->address_strings[RPC_DISPLAY_PORT])
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
__assign_str(dstaddr,
xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(dstport,
xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk(
"socket:[%llu] dstaddr=%s/%s "
"state=%u (%s) sk_state=%u (%s)",
__entry->ino, __get_str(dstaddr), __get_str(dstport),
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
rpc_show_sock_state(__entry->sock_state)
)
);
#define DEFINE_RPC_SOCKET_EVENT(name) \
DEFINE_EVENT(xs_socket_event, name, \
TP_PROTO( \
struct rpc_xprt *xprt, \
struct socket *socket \
), \
TP_ARGS(xprt, socket))
DECLARE_EVENT_CLASS(xs_socket_event_done,
TP_PROTO(
struct rpc_xprt *xprt,
struct socket *socket,
int error
),
TP_ARGS(xprt, socket, error),
TP_STRUCT__entry(
__field(int, error)
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
__string(dstaddr,
xprt->address_strings[RPC_DISPLAY_ADDR])
__string(dstport,
xprt->address_strings[RPC_DISPLAY_PORT])
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
__entry->error = error;
__assign_str(dstaddr,
xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(dstport,
xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk(
"error=%d socket:[%llu] dstaddr=%s/%s "
"state=%u (%s) sk_state=%u (%s)",
__entry->error,
__entry->ino, __get_str(dstaddr), __get_str(dstport),
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
rpc_show_sock_state(__entry->sock_state)
)
);
#define DEFINE_RPC_SOCKET_EVENT_DONE(name) \
DEFINE_EVENT(xs_socket_event_done, name, \
TP_PROTO( \
struct rpc_xprt *xprt, \
struct socket *socket, \
int error \
), \
TP_ARGS(xprt, socket, error))
DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change);
DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect);
DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error);
DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>

View file

@ -0,0 +1,46 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM swiotlb
#if !defined(_TRACE_SWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SWIOTLB_H
#include <linux/tracepoint.h>
TRACE_EVENT(swiotlb_bounced,
TP_PROTO(struct device *dev,
dma_addr_t dev_addr,
size_t size,
int swiotlb_force),
TP_ARGS(dev, dev_addr, size, swiotlb_force),
TP_STRUCT__entry(
__string( dev_name, dev_name(dev) )
__field( u64, dma_mask )
__field( dma_addr_t, dev_addr )
__field( size_t, size )
__field( int, swiotlb_force )
),
TP_fast_assign(
__assign_str(dev_name, dev_name(dev));
__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
__entry->dev_addr = dev_addr;
__entry->size = size;
__entry->swiotlb_force = swiotlb_force;
),
TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
"size=%zu %s",
__get_str(dev_name),
__entry->dma_mask,
(unsigned long long)__entry->dev_addr,
__entry->size,
__entry->swiotlb_force ? "swiotlb_force" : "" )
);
#endif /* _TRACE_SWIOTLB_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,72 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H
#include <linux/tracepoint.h>
#include <asm/ptrace.h>
#include <asm/syscall.h>
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
TRACE_EVENT_FN(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
TP_ARGS(regs, id),
TP_STRUCT__entry(
__field( long, id )
__array( unsigned long, args, 6 )
),
TP_fast_assign(
__entry->id = id;
syscall_get_arguments(current, regs, 0, 6, __entry->args);
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
__entry->id,
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5]),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
TRACE_EVENT_FN(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field( long, id )
__field( long, ret )
),
TP_fast_assign(
__entry->id = syscall_get_nr(current, regs);
__entry->ret = ret;
),
TP_printk("NR %ld = %ld",
__entry->id, __entry->ret),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#endif /* _TRACE_EVENTS_SYSCALLS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,214 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM target
#if !defined(_TRACE_TARGET_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TARGET_H
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
/* cribbed verbatim from <trace/event/scsi.h> */
#define scsi_opcode_name(opcode) { opcode, #opcode }
#define show_opcode_name(val) \
__print_symbolic(val, \
scsi_opcode_name(TEST_UNIT_READY), \
scsi_opcode_name(REZERO_UNIT), \
scsi_opcode_name(REQUEST_SENSE), \
scsi_opcode_name(FORMAT_UNIT), \
scsi_opcode_name(READ_BLOCK_LIMITS), \
scsi_opcode_name(REASSIGN_BLOCKS), \
scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
scsi_opcode_name(READ_6), \
scsi_opcode_name(WRITE_6), \
scsi_opcode_name(SEEK_6), \
scsi_opcode_name(READ_REVERSE), \
scsi_opcode_name(WRITE_FILEMARKS), \
scsi_opcode_name(SPACE), \
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
scsi_opcode_name(RESERVE), \
scsi_opcode_name(RELEASE), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
scsi_opcode_name(START_STOP), \
scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
scsi_opcode_name(SEND_DIAGNOSTIC), \
scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
scsi_opcode_name(SET_WINDOW), \
scsi_opcode_name(READ_CAPACITY), \
scsi_opcode_name(READ_10), \
scsi_opcode_name(WRITE_10), \
scsi_opcode_name(SEEK_10), \
scsi_opcode_name(POSITION_TO_ELEMENT), \
scsi_opcode_name(WRITE_VERIFY), \
scsi_opcode_name(VERIFY), \
scsi_opcode_name(SEARCH_HIGH), \
scsi_opcode_name(SEARCH_EQUAL), \
scsi_opcode_name(SEARCH_LOW), \
scsi_opcode_name(SET_LIMITS), \
scsi_opcode_name(PRE_FETCH), \
scsi_opcode_name(READ_POSITION), \
scsi_opcode_name(SYNCHRONIZE_CACHE), \
scsi_opcode_name(LOCK_UNLOCK_CACHE), \
scsi_opcode_name(READ_DEFECT_DATA), \
scsi_opcode_name(MEDIUM_SCAN), \
scsi_opcode_name(COMPARE), \
scsi_opcode_name(COPY_VERIFY), \
scsi_opcode_name(WRITE_BUFFER), \
scsi_opcode_name(READ_BUFFER), \
scsi_opcode_name(UPDATE_BLOCK), \
scsi_opcode_name(READ_LONG), \
scsi_opcode_name(WRITE_LONG), \
scsi_opcode_name(CHANGE_DEFINITION), \
scsi_opcode_name(WRITE_SAME), \
scsi_opcode_name(UNMAP), \
scsi_opcode_name(READ_TOC), \
scsi_opcode_name(LOG_SELECT), \
scsi_opcode_name(LOG_SENSE), \
scsi_opcode_name(XDWRITEREAD_10), \
scsi_opcode_name(MODE_SELECT_10), \
scsi_opcode_name(RESERVE_10), \
scsi_opcode_name(RELEASE_10), \
scsi_opcode_name(MODE_SENSE_10), \
scsi_opcode_name(PERSISTENT_RESERVE_IN), \
scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
scsi_opcode_name(VARIABLE_LENGTH_CMD), \
scsi_opcode_name(REPORT_LUNS), \
scsi_opcode_name(MAINTENANCE_IN), \
scsi_opcode_name(MAINTENANCE_OUT), \
scsi_opcode_name(MOVE_MEDIUM), \
scsi_opcode_name(EXCHANGE_MEDIUM), \
scsi_opcode_name(READ_12), \
scsi_opcode_name(WRITE_12), \
scsi_opcode_name(WRITE_VERIFY_12), \
scsi_opcode_name(SEARCH_HIGH_12), \
scsi_opcode_name(SEARCH_EQUAL_12), \
scsi_opcode_name(SEARCH_LOW_12), \
scsi_opcode_name(READ_ELEMENT_STATUS), \
scsi_opcode_name(SEND_VOLUME_TAG), \
scsi_opcode_name(WRITE_LONG_2), \
scsi_opcode_name(READ_16), \
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
scsi_opcode_name(SERVICE_ACTION_IN), \
scsi_opcode_name(SAI_READ_CAPACITY_16), \
scsi_opcode_name(SAI_GET_LBA_STATUS), \
scsi_opcode_name(MI_REPORT_TARGET_PGS), \
scsi_opcode_name(MO_SET_TARGET_PGS), \
scsi_opcode_name(READ_32), \
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
scsi_opcode_name(ATA_12))
#define show_task_attribute_name(val) \
__print_symbolic(val, \
{ MSG_SIMPLE_TAG, "SIMPLE" }, \
{ MSG_HEAD_TAG, "HEAD" }, \
{ MSG_ORDERED_TAG, "ORDERED" }, \
{ MSG_ACA_TAG, "ACA" } )
#define show_scsi_status_name(val) \
__print_symbolic(val, \
{ SAM_STAT_GOOD, "GOOD" }, \
{ SAM_STAT_CHECK_CONDITION, "CHECK CONDITION" }, \
{ SAM_STAT_CONDITION_MET, "CONDITION MET" }, \
{ SAM_STAT_BUSY, "BUSY" }, \
{ SAM_STAT_INTERMEDIATE, "INTERMEDIATE" }, \
{ SAM_STAT_INTERMEDIATE_CONDITION_MET, "INTERMEDIATE CONDITION MET" }, \
{ SAM_STAT_RESERVATION_CONFLICT, "RESERVATION CONFLICT" }, \
{ SAM_STAT_COMMAND_TERMINATED, "COMMAND TERMINATED" }, \
{ SAM_STAT_TASK_SET_FULL, "TASK SET FULL" }, \
{ SAM_STAT_ACA_ACTIVE, "ACA ACTIVE" }, \
{ SAM_STAT_TASK_ABORTED, "TASK ABORTED" } )
TRACE_EVENT(target_sequencer_start,
TP_PROTO(struct se_cmd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__string( initiator, cmd->se_sess->se_node_acl->initiatorname )
),
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
scsi_command_size(__entry->cdb) <= 16 ?
__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
__entry->cdb[1]
)
);
TRACE_EVENT(target_cmd_complete,
TP_PROTO(struct se_cmd *cmd),
TP_ARGS(cmd),
TP_STRUCT__entry(
__field( unsigned int, unpacked_lun )
__field( unsigned int, opcode )
__field( unsigned int, data_length )
__field( unsigned int, task_attribute )
__field( unsigned char, scsi_status )
__field( unsigned char, sense_length )
__array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
__array( unsigned char, sense_data, 18 )
__string(initiator, cmd->se_sess->se_node_acl->initiatorname)
),
TP_fast_assign(
__entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr;
__entry->scsi_status = cmd->scsi_status;
__entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
memcpy(__entry->sense_data, cmd->sense_buffer, __entry->sense_length);
__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
),
TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)",
__get_str(initiator), __entry->unpacked_lun,
show_scsi_status_name(__entry->scsi_status),
__entry->sense_length, __entry->sense_length ? " / " : "",
__print_hex(__entry->sense_data, __entry->sense_length),
show_opcode_name(__entry->opcode),
__entry->data_length, __print_hex(__entry->cdb, 16),
show_task_attribute_name(__entry->task_attribute),
scsi_command_size(__entry->cdb) <= 16 ?
__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
__entry->cdb[1]
)
);
#endif /* _TRACE_TARGET_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,61 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM task
#if !defined(_TRACE_TASK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TASK_H
#include <linux/tracepoint.h>
TRACE_EVENT(task_newtask,
TP_PROTO(struct task_struct *task, unsigned long clone_flags),
TP_ARGS(task, clone_flags),
TP_STRUCT__entry(
__field( pid_t, pid)
__array( char, comm, TASK_COMM_LEN)
__field( unsigned long, clone_flags)
__field( short, oom_score_adj)
),
TP_fast_assign(
__entry->pid = task->pid;
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->clone_flags = clone_flags;
__entry->oom_score_adj = task->signal->oom_score_adj;
),
TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%hd",
__entry->pid, __entry->comm,
__entry->clone_flags, __entry->oom_score_adj)
);
TRACE_EVENT(task_rename,
TP_PROTO(struct task_struct *task, const char *comm),
TP_ARGS(task, comm),
TP_STRUCT__entry(
__field( pid_t, pid)
__array( char, oldcomm, TASK_COMM_LEN)
__array( char, newcomm, TASK_COMM_LEN)
__field( short, oom_score_adj)
),
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
memcpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd",
__entry->pid, __entry->oldcomm,
__entry->newcomm, __entry->oom_score_adj)
);
#endif
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,83 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM thermal
#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_THERMAL_H
#include <linux/thermal.h>
#include <linux/tracepoint.h>
TRACE_EVENT(thermal_temperature,
TP_PROTO(struct thermal_zone_device *tz),
TP_ARGS(tz),
TP_STRUCT__entry(
__string(thermal_zone, tz->type)
__field(int, id)
__field(int, temp_prev)
__field(int, temp)
),
TP_fast_assign(
__assign_str(thermal_zone, tz->type);
__entry->id = tz->id;
__entry->temp_prev = tz->last_temperature;
__entry->temp = tz->temperature;
),
TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
__get_str(thermal_zone), __entry->id, __entry->temp_prev,
__entry->temp)
);
TRACE_EVENT(cdev_update,
TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
TP_ARGS(cdev, target),
TP_STRUCT__entry(
__string(type, cdev->type)
__field(unsigned long, target)
),
TP_fast_assign(
__assign_str(type, cdev->type);
__entry->target = target;
),
TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
);
TRACE_EVENT(thermal_zone_trip,
TP_PROTO(struct thermal_zone_device *tz, int trip,
enum thermal_trip_type trip_type),
TP_ARGS(tz, trip, trip_type),
TP_STRUCT__entry(
__string(thermal_zone, tz->type)
__field(int, id)
__field(int, trip)
__field(enum thermal_trip_type, trip_type)
),
TP_fast_assign(
__assign_str(thermal_zone, tz->type);
__entry->id = tz->id;
__entry->trip = trip;
__entry->trip_type = trip_type;
),
TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
__get_str(thermal_zone), __entry->id, __entry->trip,
__entry->trip_type)
);
#endif /* _TRACE_THERMAL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,88 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM thp
#if !defined(_TRACE_THP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_THP_H
#include <linux/types.h>
#include <linux/tracepoint.h>
TRACE_EVENT(hugepage_invalidate,
TP_PROTO(unsigned long addr, unsigned long pte),
TP_ARGS(addr, pte),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, pte)
),
TP_fast_assign(
__entry->addr = addr;
__entry->pte = pte;
),
TP_printk("hugepage invalidate at addr 0x%lx and pte = 0x%lx",
__entry->addr, __entry->pte)
);
TRACE_EVENT(hugepage_set_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
TP_ARGS(addr, pmd),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, pmd)
),
TP_fast_assign(
__entry->addr = addr;
__entry->pmd = pmd;
),
TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
);
TRACE_EVENT(hugepage_update,
TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set),
TP_ARGS(addr, pte, clr, set),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, pte)
__field(unsigned long, clr)
__field(unsigned long, set)
),
TP_fast_assign(
__entry->addr = addr;
__entry->pte = pte;
__entry->clr = clr;
__entry->set = set;
),
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
TRACE_EVENT(hugepage_splitting,
TP_PROTO(unsigned long addr, unsigned long pte),
TP_ARGS(addr, pte),
TP_STRUCT__entry(
__field(unsigned long, addr)
__field(unsigned long, pte)
),
TP_fast_assign(
__entry->addr = addr;
__entry->pte = pte;
),
TP_printk("hugepage splitting at addr 0x%lx and pte = 0x%lx",
__entry->addr, __entry->pte)
);
#endif /* _TRACE_THP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,350 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM timer
#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TIMER_H
#include <linux/tracepoint.h>
#include <linux/hrtimer.h>
#include <linux/timer.h>
DECLARE_EVENT_CLASS(timer_class,
TP_PROTO(struct timer_list *timer),
TP_ARGS(timer),
TP_STRUCT__entry(
__field( void *, timer )
),
TP_fast_assign(
__entry->timer = timer;
),
TP_printk("timer=%p", __entry->timer)
);
/**
* timer_init - called when the timer is initialized
* @timer: pointer to struct timer_list
*/
DEFINE_EVENT(timer_class, timer_init,
TP_PROTO(struct timer_list *timer),
TP_ARGS(timer)
);
/**
* timer_start - called when the timer is started
* @timer: pointer to struct timer_list
* @expires: the timers expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer, unsigned long expires),
TP_ARGS(timer, expires),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
__field( unsigned long, now )
),
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
__entry->expires = expires;
__entry->now = jiffies;
),
TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now)
);
/**
* timer_expire_entry - called immediately before the timer callback
* @timer: pointer to struct timer_list
*
* Allows to determine the timer latency.
*/
TRACE_EVENT(timer_expire_entry,
TP_PROTO(struct timer_list *timer),
TP_ARGS(timer),
TP_STRUCT__entry(
__field( void *, timer )
__field( unsigned long, now )
__field( void *, function)
),
TP_fast_assign(
__entry->timer = timer;
__entry->now = jiffies;
__entry->function = timer->function;
),
TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
);
/**
* timer_expire_exit - called immediately after the timer callback returns
* @timer: pointer to struct timer_list
*
* When used in combination with the timer_expire_entry tracepoint we can
* determine the runtime of the timer callback function.
*
* NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
* be invalid. We solely track the pointer.
*/
DEFINE_EVENT(timer_class, timer_expire_exit,
TP_PROTO(struct timer_list *timer),
TP_ARGS(timer)
);
/**
* timer_cancel - called when the timer is canceled
* @timer: pointer to struct timer_list
*/
DEFINE_EVENT(timer_class, timer_cancel,
TP_PROTO(struct timer_list *timer),
TP_ARGS(timer)
);
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
* @clockid: the hrtimers clock
* @mode: the hrtimers mode
*/
TRACE_EVENT(hrtimer_init,
TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
enum hrtimer_mode mode),
TP_ARGS(hrtimer, clockid, mode),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( clockid_t, clockid )
__field( enum hrtimer_mode, mode )
),
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->clockid = clockid;
__entry->mode = mode;
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
__entry->clockid == CLOCK_REALTIME ?
"CLOCK_REALTIME" : "CLOCK_MONOTONIC",
__entry->mode == HRTIMER_MODE_ABS ?
"HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
);
/**
* hrtimer_start - called when the hrtimer is started
* @hrtimer: pointer to struct hrtimer
*/
TRACE_EVENT(hrtimer_start,
TP_PROTO(struct hrtimer *hrtimer),
TP_ARGS(hrtimer),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
),
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer).tv64;
__entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
),
TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
__entry->hrtimer, __entry->function,
(unsigned long long)ktime_to_ns((ktime_t) {
.tv64 = __entry->expires }),
(unsigned long long)ktime_to_ns((ktime_t) {
.tv64 = __entry->softexpires }))
);
/**
* hrtimer_expire_entry - called immediately before the hrtimer callback
* @hrtimer: pointer to struct hrtimer
* @now: pointer to variable which contains current time of the
* timers base.
*
* Allows to determine the timer latency.
*/
TRACE_EVENT(hrtimer_expire_entry,
TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
TP_ARGS(hrtimer, now),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( s64, now )
__field( void *, function)
),
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->now = now->tv64;
__entry->function = hrtimer->function;
),
TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
(unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
);
DECLARE_EVENT_CLASS(hrtimer_class,
TP_PROTO(struct hrtimer *hrtimer),
TP_ARGS(hrtimer),
TP_STRUCT__entry(
__field( void *, hrtimer )
),
TP_fast_assign(
__entry->hrtimer = hrtimer;
),
TP_printk("hrtimer=%p", __entry->hrtimer)
);
/**
* hrtimer_expire_exit - called immediately after the hrtimer callback returns
* @hrtimer: pointer to struct hrtimer
*
* When used in combination with the hrtimer_expire_entry tracepoint we can
* determine the runtime of the callback function.
*/
DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
TP_PROTO(struct hrtimer *hrtimer),
TP_ARGS(hrtimer)
);
/**
* hrtimer_cancel - called when the hrtimer is canceled
* @hrtimer: pointer to struct hrtimer
*/
DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
TP_PROTO(struct hrtimer *hrtimer),
TP_ARGS(hrtimer)
);
/**
* itimer_state - called when itimer is started or canceled
* @which: name of the interval timer
* @value: the itimers value, itimer is canceled if value->it_value is
* zero, otherwise it is started
* @expires: the itimers expiry time
*/
TRACE_EVENT(itimer_state,
TP_PROTO(int which, const struct itimerval *const value,
cputime_t expires),
TP_ARGS(which, value, expires),
TP_STRUCT__entry(
__field( int, which )
__field( cputime_t, expires )
__field( long, value_sec )
__field( long, value_usec )
__field( long, interval_sec )
__field( long, interval_usec )
),
TP_fast_assign(
__entry->which = which;
__entry->expires = expires;
__entry->value_sec = value->it_value.tv_sec;
__entry->value_usec = value->it_value.tv_usec;
__entry->interval_sec = value->it_interval.tv_sec;
__entry->interval_usec = value->it_interval.tv_usec;
),
TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
__entry->which, (unsigned long long)__entry->expires,
__entry->value_sec, __entry->value_usec,
__entry->interval_sec, __entry->interval_usec)
);
/**
* itimer_expire - called when itimer expires
* @which: type of the interval timer
* @pid: pid of the process which owns the timer
* @now: current time, used to calculate the latency of itimer
*/
TRACE_EVENT(itimer_expire,
TP_PROTO(int which, struct pid *pid, cputime_t now),
TP_ARGS(which, pid, now),
TP_STRUCT__entry(
__field( int , which )
__field( pid_t, pid )
__field( cputime_t, now )
),
TP_fast_assign(
__entry->which = which;
__entry->now = now;
__entry->pid = pid_nr(pid);
),
TP_printk("which=%d pid=%d now=%llu", __entry->which,
(int) __entry->pid, (unsigned long long)__entry->now)
);
#ifdef CONFIG_NO_HZ_COMMON
TRACE_EVENT(tick_stop,
TP_PROTO(int success, char *error_msg),
TP_ARGS(success, error_msg),
TP_STRUCT__entry(
__field( int , success )
__string( msg, error_msg )
),
TP_fast_assign(
__entry->success = success;
__assign_str(msg, error_msg);
),
TP_printk("success=%s msg=%s", __entry->success ? "yes" : "no", __get_str(msg))
);
#endif
#endif /* _TRACE_TIMER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,42 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tlb
#if !defined(_TRACE_TLB_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TLB_H
#include <linux/mm_types.h>
#include <linux/tracepoint.h>
#define TLB_FLUSH_REASON \
{ TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" }, \
{ TLB_REMOTE_SHOOTDOWN, "remote shootdown" }, \
{ TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
{ TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
TRACE_EVENT_CONDITION(tlb_flush,
TP_PROTO(int reason, unsigned long pages),
TP_ARGS(reason, pages),
TP_CONDITION(cpu_online(smp_processor_id())),
TP_STRUCT__entry(
__field( int, reason)
__field(unsigned long, pages)
),
TP_fast_assign(
__entry->reason = reason;
__entry->pages = pages;
),
TP_printk("pages:%ld reason:%s (%d)",
__entry->pages,
__print_symbolic(__entry->reason, TLB_FLUSH_REASON),
__entry->reason)
);
#endif /* _TRACE_TLB_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,32 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM udp
#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_UDP_H
#include <linux/udp.h>
#include <linux/tracepoint.h>
TRACE_EVENT(udp_fail_queue_rcv_skb,
TP_PROTO(int rc, struct sock *sk),
TP_ARGS(rc, sk),
TP_STRUCT__entry(
__field(int, rc)
__field(__u16, lport)
),
TP_fast_assign(
__entry->rc = rc;
__entry->lport = inet_sk(sk)->inet_num;
),
TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
);
#endif /* _TRACE_UDP_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

158
include/trace/events/v4l2.h Normal file
View file

@ -0,0 +1,158 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM v4l2
#if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_V4L2_H
#include <linux/tracepoint.h>
#define show_type(type) \
__print_symbolic(type, \
{ V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" }, \
{ V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" }, \
{ V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" }, \
{ V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" }, \
{ V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" }, \
{ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" }, \
{ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" }, \
{ V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
{ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
{ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" }, \
{ V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" }, \
{ V4L2_BUF_TYPE_PRIVATE, "PRIVATE" })
#define show_field(field) \
__print_symbolic(field, \
{ V4L2_FIELD_ANY, "ANY" }, \
{ V4L2_FIELD_NONE, "NONE" }, \
{ V4L2_FIELD_TOP, "TOP" }, \
{ V4L2_FIELD_BOTTOM, "BOTTOM" }, \
{ V4L2_FIELD_INTERLACED, "INTERLACED" }, \
{ V4L2_FIELD_SEQ_TB, "SEQ_TB" }, \
{ V4L2_FIELD_SEQ_BT, "SEQ_BT" }, \
{ V4L2_FIELD_ALTERNATE, "ALTERNATE" }, \
{ V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" }, \
{ V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" })
#define show_timecode_type(type) \
__print_symbolic(type, \
{ V4L2_TC_TYPE_24FPS, "24FPS" }, \
{ V4L2_TC_TYPE_25FPS, "25FPS" }, \
{ V4L2_TC_TYPE_30FPS, "30FPS" }, \
{ V4L2_TC_TYPE_50FPS, "50FPS" }, \
{ V4L2_TC_TYPE_60FPS, "60FPS" })
#define show_flags(flags) \
__print_flags(flags, "|", \
{ V4L2_BUF_FLAG_MAPPED, "MAPPED" }, \
{ V4L2_BUF_FLAG_QUEUED, "QUEUED" }, \
{ V4L2_BUF_FLAG_DONE, "DONE" }, \
{ V4L2_BUF_FLAG_KEYFRAME, "KEYFRAME" }, \
{ V4L2_BUF_FLAG_PFRAME, "PFRAME" }, \
{ V4L2_BUF_FLAG_BFRAME, "BFRAME" }, \
{ V4L2_BUF_FLAG_ERROR, "ERROR" }, \
{ V4L2_BUF_FLAG_TIMECODE, "TIMECODE" }, \
{ V4L2_BUF_FLAG_PREPARED, "PREPARED" }, \
{ V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \
{ V4L2_BUF_FLAG_NO_CACHE_CLEAN, "NO_CACHE_CLEAN" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \
{ V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" })
#define show_timecode_flags(flags) \
__print_flags(flags, "|", \
{ V4L2_TC_FLAG_DROPFRAME, "DROPFRAME" }, \
{ V4L2_TC_FLAG_COLORFRAME, "COLORFRAME" }, \
{ V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \
{ V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" })
#define V4L2_TRACE_EVENT(event_name) \
TRACE_EVENT(event_name, \
TP_PROTO(int minor, struct v4l2_buffer *buf), \
\
TP_ARGS(minor, buf), \
\
TP_STRUCT__entry( \
__field(int, minor) \
__field(u32, index) \
__field(u32, type) \
__field(u32, bytesused) \
__field(u32, flags) \
__field(u32, field) \
__field(s64, timestamp) \
__field(u32, timecode_type) \
__field(u32, timecode_flags) \
__field(u8, timecode_frames) \
__field(u8, timecode_seconds) \
__field(u8, timecode_minutes) \
__field(u8, timecode_hours) \
__field(u8, timecode_userbits0) \
__field(u8, timecode_userbits1) \
__field(u8, timecode_userbits2) \
__field(u8, timecode_userbits3) \
__field(u32, sequence) \
), \
\
TP_fast_assign( \
__entry->minor = minor; \
__entry->index = buf->index; \
__entry->type = buf->type; \
__entry->bytesused = buf->bytesused; \
__entry->flags = buf->flags; \
__entry->field = buf->field; \
__entry->timestamp = \
timeval_to_ns(&buf->timestamp); \
__entry->timecode_type = buf->timecode.type; \
__entry->timecode_flags = buf->timecode.flags; \
__entry->timecode_frames = \
buf->timecode.frames; \
__entry->timecode_seconds = \
buf->timecode.seconds; \
__entry->timecode_minutes = \
buf->timecode.minutes; \
__entry->timecode_hours = buf->timecode.hours; \
__entry->timecode_userbits0 = \
buf->timecode.userbits[0]; \
__entry->timecode_userbits1 = \
buf->timecode.userbits[1]; \
__entry->timecode_userbits2 = \
buf->timecode.userbits[2]; \
__entry->timecode_userbits3 = \
buf->timecode.userbits[3]; \
__entry->sequence = buf->sequence; \
), \
\
TP_printk("minor = %d, index = %u, type = %s, " \
"bytesused = %u, flags = %s, " \
"field = %s, timestamp = %llu, timecode = { " \
"type = %s, flags = %s, frames = %u, " \
"seconds = %u, minutes = %u, hours = %u, " \
"userbits = { %u %u %u %u } }, " \
"sequence = %u", __entry->minor, \
__entry->index, show_type(__entry->type), \
__entry->bytesused, \
show_flags(__entry->flags), \
show_field(__entry->field), \
__entry->timestamp, \
show_timecode_type(__entry->timecode_type), \
show_timecode_flags(__entry->timecode_flags), \
__entry->timecode_frames, \
__entry->timecode_seconds, \
__entry->timecode_minutes, \
__entry->timecode_hours, \
__entry->timecode_userbits0, \
__entry->timecode_userbits1, \
__entry->timecode_userbits2, \
__entry->timecode_userbits3, \
__entry->sequence \
) \
)
V4L2_TRACE_EVENT(v4l2_dqbuf);
V4L2_TRACE_EVENT(v4l2_qbuf);
#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,390 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM vmscan
#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_VMSCAN_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <trace/events/gfpflags.h>
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
#define RECLAIM_WB_MIXED 0x0010u
#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
#define RECLAIM_WB_ASYNC 0x0008u
#define show_reclaim_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
{RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
{RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
{RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
#define trace_reclaim_flags(page) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
)
#define trace_shrink_flags(file) \
( \
(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
TP_PROTO(int nid),
TP_ARGS(nid),
TP_STRUCT__entry(
__field( int, nid )
),
TP_fast_assign(
__entry->nid = nid;
),
TP_printk("nid=%d", __entry->nid)
);
TRACE_EVENT(mm_vmscan_kswapd_wake,
TP_PROTO(int nid, int order),
TP_ARGS(nid, order),
TP_STRUCT__entry(
__field( int, nid )
__field( int, order )
),
TP_fast_assign(
__entry->nid = nid;
__entry->order = order;
),
TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
);
TRACE_EVENT(mm_vmscan_wakeup_kswapd,
TP_PROTO(int nid, int zid, int order),
TP_ARGS(nid, zid, order),
TP_STRUCT__entry(
__field( int, nid )
__field( int, zid )
__field( int, order )
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->order = order;
),
TP_printk("nid=%d zid=%d order=%d",
__entry->nid,
__entry->zid,
__entry->order)
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
TP_ARGS(order, may_writepage, gfp_flags),
TP_STRUCT__entry(
__field( int, order )
__field( int, may_writepage )
__field( gfp_t, gfp_flags )
),
TP_fast_assign(
__entry->order = order;
__entry->may_writepage = may_writepage;
__entry->gfp_flags = gfp_flags;
),
TP_printk("order=%d may_writepage=%d gfp_flags=%s",
__entry->order,
__entry->may_writepage,
show_gfp_flags(__entry->gfp_flags))
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
TP_ARGS(order, may_writepage, gfp_flags)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
TP_ARGS(order, may_writepage, gfp_flags)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
TP_ARGS(order, may_writepage, gfp_flags)
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
TP_PROTO(unsigned long nr_reclaimed),
TP_ARGS(nr_reclaimed),
TP_STRUCT__entry(
__field( unsigned long, nr_reclaimed )
),
TP_fast_assign(
__entry->nr_reclaimed = nr_reclaimed;
),
TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end,
TP_PROTO(unsigned long nr_reclaimed),
TP_ARGS(nr_reclaimed)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
TP_PROTO(unsigned long nr_reclaimed),
TP_ARGS(nr_reclaimed)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
TP_PROTO(unsigned long nr_reclaimed),
TP_ARGS(nr_reclaimed)
);
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
long nr_objects_to_shrink, unsigned long pgs_scanned,
unsigned long lru_pgs, unsigned long cache_items,
unsigned long long delta, unsigned long total_scan),
TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
cache_items, delta, total_scan),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
__field(void *, shrink)
__field(int, nid)
__field(long, nr_objects_to_shrink)
__field(gfp_t, gfp_flags)
__field(unsigned long, pgs_scanned)
__field(unsigned long, lru_pgs)
__field(unsigned long, cache_items)
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
),
TP_fast_assign(
__entry->shr = shr;
__entry->shrink = shr->scan_objects;
__entry->nid = sc->nid;
__entry->nr_objects_to_shrink = nr_objects_to_shrink;
__entry->gfp_flags = sc->gfp_mask;
__entry->pgs_scanned = pgs_scanned;
__entry->lru_pgs = lru_pgs;
__entry->cache_items = cache_items;
__entry->delta = delta;
__entry->total_scan = total_scan;
),
TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
__entry->shrink,
__entry->shr,
__entry->nid,
__entry->nr_objects_to_shrink,
show_gfp_flags(__entry->gfp_flags),
__entry->pgs_scanned,
__entry->lru_pgs,
__entry->cache_items,
__entry->delta,
__entry->total_scan)
);
TRACE_EVENT(mm_shrink_slab_end,
TP_PROTO(struct shrinker *shr, int nid, int shrinker_retval,
long unused_scan_cnt, long new_scan_cnt, long total_scan),
TP_ARGS(shr, nid, shrinker_retval, unused_scan_cnt, new_scan_cnt,
total_scan),
TP_STRUCT__entry(
__field(struct shrinker *, shr)
__field(int, nid)
__field(void *, shrink)
__field(long, unused_scan)
__field(long, new_scan)
__field(int, retval)
__field(long, total_scan)
),
TP_fast_assign(
__entry->shr = shr;
__entry->nid = nid;
__entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt;
__entry->retval = shrinker_retval;
__entry->total_scan = total_scan;
),
TP_printk("%pF %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
__entry->shrink,
__entry->shr,
__entry->nid,
__entry->unused_scan,
__entry->new_scan,
__entry->total_scan,
__entry->retval)
);
DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
TP_PROTO(int order,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
isolate_mode_t isolate_mode,
int file),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
TP_STRUCT__entry(
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
__field(isolate_mode_t, isolate_mode)
__field(int, file)
),
TP_fast_assign(
__entry->order = order;
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
__entry->isolate_mode = isolate_mode;
__entry->file = file;
),
TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
__entry->isolate_mode,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
__entry->nr_taken,
__entry->file)
);
DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
TP_PROTO(int order,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
isolate_mode_t isolate_mode,
int file),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
TP_PROTO(int order,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
isolate_mode_t isolate_mode,
int file),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
TRACE_EVENT(mm_vmscan_writepage,
TP_PROTO(struct page *page,
int reclaim_flags),
TP_ARGS(page, reclaim_flags),
TP_STRUCT__entry(
__field(struct page *, page)
__field(int, reclaim_flags)
),
TP_fast_assign(
__entry->page = page;
__entry->reclaim_flags = reclaim_flags;
),
TP_printk("page=%p pfn=%lu flags=%s",
__entry->page,
page_to_pfn(__entry->page),
show_reclaim_flags(__entry->reclaim_flags))
);
TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid, int zid,
unsigned long nr_scanned, unsigned long nr_reclaimed,
int priority, int reclaim_flags),
TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
TP_STRUCT__entry(
__field(int, nid)
__field(int, zid)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_reclaimed)
__field(int, priority)
__field(int, reclaim_flags)
),
TP_fast_assign(
__entry->nid = nid;
__entry->zid = zid;
__entry->nr_scanned = nr_scanned;
__entry->nr_reclaimed = nr_reclaimed;
__entry->priority = priority;
__entry->reclaim_flags = reclaim_flags;
),
TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
__entry->nid, __entry->zid,
__entry->nr_scanned, __entry->nr_reclaimed,
__entry->priority,
show_reclaim_flags(__entry->reclaim_flags))
);
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,121 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM workqueue
#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_WORKQUEUE_H
#include <linux/tracepoint.h>
#include <linux/workqueue.h>
DECLARE_EVENT_CLASS(workqueue_work,
TP_PROTO(struct work_struct *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
),
TP_fast_assign(
__entry->work = work;
),
TP_printk("work struct %p", __entry->work)
);
/**
* workqueue_queue_work - called when a work gets queued
* @req_cpu: the requested cpu
* @pwq: pointer to struct pool_workqueue
* @work: pointer to struct work_struct
*
* This event occurs when a work is queued immediately or once a
* delayed work is actually queued on a workqueue (ie: once the delay
* has been reached).
*/
TRACE_EVENT(workqueue_queue_work,
TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, pwq, work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
__field( void *, workqueue)
__field( unsigned int, req_cpu )
__field( unsigned int, cpu )
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
__entry->workqueue = pwq->wq;
__entry->req_cpu = req_cpu;
__entry->cpu = pwq->pool->cpu;
),
TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
__entry->work, __entry->function, __entry->workqueue,
__entry->req_cpu, __entry->cpu)
);
/**
* workqueue_activate_work - called when a work gets activated
* @work: pointer to struct work_struct
*
* This event occurs when a queued work is put on the active queue,
* which happens immediately after queueing unless @max_active limit
* is reached.
*/
DEFINE_EVENT(workqueue_work, workqueue_activate_work,
TP_PROTO(struct work_struct *work),
TP_ARGS(work)
);
/**
* workqueue_execute_start - called immediately before the workqueue callback
* @work: pointer to struct work_struct
*
* Allows to track workqueue execution.
*/
TRACE_EVENT(workqueue_execute_start,
TP_PROTO(struct work_struct *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
),
TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
);
/**
* workqueue_execute_end - called immediately after the workqueue callback
* @work: pointer to struct work_struct
*
* Allows to track workqueue execution.
*/
DEFINE_EVENT(workqueue_work, workqueue_execute_end,
TP_PROTO(struct work_struct *work),
TP_ARGS(work)
);
#endif /* _TRACE_WORKQUEUE_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,604 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM writeback
#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_WRITEBACK_H
#include <linux/tracepoint.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
#define show_inode_state(state) \
__print_flags(state, "|", \
{I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
{I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
{I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
{I_NEW, "I_NEW"}, \
{I_WILL_FREE, "I_WILL_FREE"}, \
{I_FREEING, "I_FREEING"}, \
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_REFERENCED, "I_REFERENCED"} \
)
#define WB_WORK_REASON \
{WB_REASON_BACKGROUND, "background"}, \
{WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
{WB_REASON_SYNC, "sync"}, \
{WB_REASON_PERIODIC, "periodic"}, \
{WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
{WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
{WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
{WB_REASON_FORKER_THREAD, "forker_thread"}
struct wb_writeback_work;
TRACE_EVENT(writeback_dirty_page,
TP_PROTO(struct page *page, struct address_space *mapping),
TP_ARGS(page, mapping),
TP_STRUCT__entry (
__array(char, name, 32)
__field(unsigned long, ino)
__field(pgoff_t, index)
),
TP_fast_assign(
strncpy(__entry->name,
mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
TP_printk("bdi %s: ino=%lu index=%lu",
__entry->name,
__entry->ino,
__entry->index
)
);
DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_PROTO(struct inode *inode, int flags),
TP_ARGS(inode, flags),
TP_STRUCT__entry (
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, flags)
),
TP_fast_assign(
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
/* may be called for files on pseudo FSes w/ unregistered bdi */
strncpy(__entry->name,
bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
__entry->flags = flags;
),
TP_printk("bdi %s: ino=%lu flags=%s",
__entry->name,
__entry->ino,
show_inode_state(__entry->flags)
)
);
DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
TP_PROTO(struct inode *inode, int flags),
TP_ARGS(inode, flags)
);
DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
TP_PROTO(struct inode *inode, int flags),
TP_ARGS(inode, flags)
);
DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
TP_ARGS(inode, wbc),
TP_STRUCT__entry (
__array(char, name, 32)
__field(unsigned long, ino)
__field(int, sync_mode)
),
TP_fast_assign(
strncpy(__entry->name,
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
),
TP_printk("bdi %s: ino=%lu sync_mode=%d",
__entry->name,
__entry->ino,
__entry->sync_mode
)
);
DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
TP_ARGS(inode, wbc)
);
DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
TP_PROTO(struct inode *inode, struct writeback_control *wbc),
TP_ARGS(inode, wbc)
);
DECLARE_EVENT_CLASS(writeback_work_class,
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
TP_ARGS(bdi, work),
TP_STRUCT__entry(
__array(char, name, 32)
__field(long, nr_pages)
__field(dev_t, sb_dev)
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
),
TP_fast_assign(
struct device *dev = bdi->dev;
if (!dev)
dev = default_backing_dev_info.dev;
strncpy(__entry->name, dev_name(dev), 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
__entry->for_kupdate = work->for_kupdate;
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
"kupdate=%d range_cyclic=%d background=%d reason=%s",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
__entry->sync_mode,
__entry->for_kupdate,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
DEFINE_EVENT(writeback_work_class, name, \
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
TP_ARGS(bdi, work))
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
TRACE_EVENT(writeback_pages_written,
TP_PROTO(long pages_written),
TP_ARGS(pages_written),
TP_STRUCT__entry(
__field(long, pages)
),
TP_fast_assign(
__entry->pages = pages_written;
),
TP_printk("%ld", __entry->pages)
);
DECLARE_EVENT_CLASS(writeback_class,
TP_PROTO(struct backing_dev_info *bdi),
TP_ARGS(bdi),
TP_STRUCT__entry(
__array(char, name, 32)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(bdi->dev), 32);
),
TP_printk("bdi %s",
__entry->name
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
DEFINE_EVENT(writeback_class, name, \
TP_PROTO(struct backing_dev_info *bdi), \
TP_ARGS(bdi))
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
TP_ARGS(wbc, bdi),
TP_STRUCT__entry(
__array(char, name, 32)
__field(long, nr_to_write)
__field(long, pages_skipped)
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
__field(int, for_reclaim)
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(bdi->dev), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
"start=0x%lx end=0x%lx",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
__entry->sync_mode,
__entry->for_kupdate,
__entry->for_background,
__entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end)
)
#define DEFINE_WBC_EVENT(name) \
DEFINE_EVENT(wbc_class, name, \
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
TP_ARGS(wbc, bdi))
DEFINE_WBC_EVENT(wbc_writepage);
TRACE_EVENT(writeback_queue_io,
TP_PROTO(struct bdi_writeback *wb,
struct wb_writeback_work *work,
int moved),
TP_ARGS(wb, work, moved),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, older)
__field(long, age)
__field(int, moved)
__field(int, reason)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
),
TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON)
)
);
TRACE_EVENT(global_dirty_state,
TP_PROTO(unsigned long background_thresh,
unsigned long dirty_thresh
),
TP_ARGS(background_thresh,
dirty_thresh
),
TP_STRUCT__entry(
__field(unsigned long, nr_dirty)
__field(unsigned long, nr_writeback)
__field(unsigned long, nr_unstable)
__field(unsigned long, background_thresh)
__field(unsigned long, dirty_thresh)
__field(unsigned long, dirty_limit)
__field(unsigned long, nr_dirtied)
__field(unsigned long, nr_written)
),
TP_fast_assign(
__entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
__entry->nr_writeback = global_page_state(NR_WRITEBACK);
__entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
__entry->nr_dirtied = global_page_state(NR_DIRTIED);
__entry->nr_written = global_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh;
__entry->dirty_thresh = dirty_thresh;
__entry->dirty_limit = global_dirty_limit;
),
TP_printk("dirty=%lu writeback=%lu unstable=%lu "
"bg_thresh=%lu thresh=%lu limit=%lu "
"dirtied=%lu written=%lu",
__entry->nr_dirty,
__entry->nr_writeback,
__entry->nr_unstable,
__entry->background_thresh,
__entry->dirty_thresh,
__entry->dirty_limit,
__entry->nr_dirtied,
__entry->nr_written
)
);
#define KBps(x) ((x) << (PAGE_SHIFT - 10))
TRACE_EVENT(bdi_dirty_ratelimit,
TP_PROTO(struct backing_dev_info *bdi,
unsigned long dirty_rate,
unsigned long task_ratelimit),
TP_ARGS(bdi, dirty_rate, task_ratelimit),
TP_STRUCT__entry(
__array(char, bdi, 32)
__field(unsigned long, write_bw)
__field(unsigned long, avg_write_bw)
__field(unsigned long, dirty_rate)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
),
TP_fast_assign(
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
__entry->write_bw = KBps(bdi->write_bandwidth);
__entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
KBps(bdi->balanced_dirty_ratelimit);
),
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"balanced_dirty_ratelimit=%lu",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
__entry->dirty_rate, /* bdi dirty rate */
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit /* the balanced ratelimit */
)
);
TRACE_EVENT(balance_dirty_pages,
TP_PROTO(struct backing_dev_info *bdi,
unsigned long thresh,
unsigned long bg_thresh,
unsigned long dirty,
unsigned long bdi_thresh,
unsigned long bdi_dirty,
unsigned long dirty_ratelimit,
unsigned long task_ratelimit,
unsigned long dirtied,
unsigned long period,
long pause,
unsigned long start_time),
TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
TP_STRUCT__entry(
__array( char, bdi, 32)
__field(unsigned long, limit)
__field(unsigned long, setpoint)
__field(unsigned long, dirty)
__field(unsigned long, bdi_setpoint)
__field(unsigned long, bdi_dirty)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned int, dirtied)
__field(unsigned int, dirtied_pause)
__field(unsigned long, paused)
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
),
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
__entry->limit = global_dirty_limit;
__entry->setpoint = (global_dirty_limit + freerun) / 2;
__entry->dirty = dirty;
__entry->bdi_setpoint = __entry->setpoint *
bdi_thresh / (thresh + 1);
__entry->bdi_dirty = bdi_dirty;
__entry->dirty_ratelimit = KBps(dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->dirtied = dirtied;
__entry->dirtied_pause = current->nr_dirtied_pause;
__entry->think = current->dirty_paused_when == 0 ? 0 :
(long)(jiffies - current->dirty_paused_when) * 1000/HZ;
__entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
),
TP_printk("bdi %s: "
"limit=%lu setpoint=%lu dirty=%lu "
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
"paused=%lu pause=%ld period=%lu think=%ld",
__entry->bdi,
__entry->limit,
__entry->setpoint,
__entry->dirty,
__entry->bdi_setpoint,
__entry->bdi_dirty,
__entry->dirty_ratelimit,
__entry->task_ratelimit,
__entry->dirtied,
__entry->dirtied_pause,
__entry->paused, /* ms */
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think /* ms */
)
);
TRACE_EVENT(writeback_sb_inodes_requeue,
TP_PROTO(struct inode *inode),
TP_ARGS(inode),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
),
TP_fast_assign(
strncpy(__entry->name,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ
)
);
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
TP_ARGS(usec_timeout, usec_delayed),
TP_STRUCT__entry(
__field( unsigned int, usec_timeout )
__field( unsigned int, usec_delayed )
),
TP_fast_assign(
__entry->usec_timeout = usec_timeout;
__entry->usec_delayed = usec_delayed;
),
TP_printk("usec_timeout=%u usec_delayed=%u",
__entry->usec_timeout,
__entry->usec_delayed)
);
DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
TP_ARGS(usec_timeout, usec_delayed)
);
DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
TP_ARGS(usec_timeout, usec_delayed)
);
DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write
),
TP_ARGS(inode, wbc, nr_to_write),
TP_STRUCT__entry(
__array(char, name, 32)
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
),
TP_fast_assign(
strncpy(__entry->name,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
"index=%lu to_write=%ld wrote=%lu",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote
)
);
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write),
TP_ARGS(inode, wbc, nr_to_write)
);
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
unsigned long nr_to_write),
TP_ARGS(inode, wbc, nr_to_write)
);
#endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

516
include/trace/events/xen.h Normal file
View file

@ -0,0 +1,516 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM xen
#if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
#include <asm/paravirt_types.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
TP_PROTO(enum paravirt_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
__field(enum paravirt_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
(__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
(__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
TP_PROTO(enum paravirt_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
DEFINE_XEN_MC_BATCH(xen_mc_issue);
TRACE_EVENT(xen_mc_entry,
TP_PROTO(struct multicall_entry *mc, unsigned nargs),
TP_ARGS(mc, nargs),
TP_STRUCT__entry(
__field(unsigned int, op)
__field(unsigned int, nargs)
__array(unsigned long, args, 6)
),
TP_fast_assign(__entry->op = mc->op;
__entry->nargs = nargs;
memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs);
memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs));
),
TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
__entry->op, xen_hypercall_name(__entry->op),
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5])
);
TRACE_EVENT(xen_mc_entry_alloc,
TP_PROTO(size_t args),
TP_ARGS(args),
TP_STRUCT__entry(
__field(size_t, args)
),
TP_fast_assign(__entry->args = args),
TP_printk("alloc entry %zu arg bytes", __entry->args)
);
TRACE_EVENT(xen_mc_callback,
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
TP_ARGS(fn, data),
TP_STRUCT__entry(
__field(xen_mc_callback_fn_t, fn)
__field(void *, data)
),
TP_fast_assign(
__entry->fn = fn;
__entry->data = data;
),
TP_printk("callback %pf, data %p",
__entry->fn, __entry->data)
);
TRACE_EVENT(xen_mc_flush_reason,
TP_PROTO(enum xen_mc_flush_reason reason),
TP_ARGS(reason),
TP_STRUCT__entry(
__field(enum xen_mc_flush_reason, reason)
),
TP_fast_assign(__entry->reason = reason),
TP_printk("flush reason %s",
(__entry->reason == XEN_MC_FL_NONE) ? "NONE" :
(__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" :
(__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" :
(__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??")
);
TRACE_EVENT(xen_mc_flush,
TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
TP_ARGS(mcidx, argidx, cbidx),
TP_STRUCT__entry(
__field(unsigned, mcidx)
__field(unsigned, argidx)
__field(unsigned, cbidx)
),
TP_fast_assign(__entry->mcidx = mcidx;
__entry->argidx = argidx;
__entry->cbidx = cbidx),
TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
__entry->mcidx, __entry->argidx, __entry->cbidx)
);
TRACE_EVENT(xen_mc_extend_args,
TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res),
TP_ARGS(op, args, res),
TP_STRUCT__entry(
__field(unsigned int, op)
__field(size_t, args)
__field(enum xen_mc_extend_args, res)
),
TP_fast_assign(__entry->op = op;
__entry->args = args;
__entry->res = res),
TP_printk("extending op %u%s by %zu bytes res %s",
__entry->op, xen_hypercall_name(__entry->op),
__entry->args,
__entry->res == XEN_MC_XE_OK ? "OK" :
__entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
__entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
);
/* mmu */
DECLARE_EVENT_CLASS(xen_mmu__set_pte,
TP_PROTO(pte_t *ptep, pte_t pteval),
TP_ARGS(ptep, pteval),
TP_STRUCT__entry(
__field(pte_t *, ptep)
__field(pteval_t, pteval)
),
TP_fast_assign(__entry->ptep = ptep;
__entry->pteval = pteval.pte),
TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
__entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
);
#define DEFINE_XEN_MMU_SET_PTE(name) \
DEFINE_EVENT(xen_mmu__set_pte, name, \
TP_PROTO(pte_t *ptep, pte_t pteval), \
TP_ARGS(ptep, pteval))
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
TRACE_EVENT(xen_mmu_set_domain_pte,
TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
TP_ARGS(ptep, pteval, domid),
TP_STRUCT__entry(
__field(pte_t *, ptep)
__field(pteval_t, pteval)
__field(unsigned, domid)
),
TP_fast_assign(__entry->ptep = ptep;
__entry->pteval = pteval.pte;
__entry->domid = domid),
TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
__entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
__entry->domid)
);
TRACE_EVENT(xen_mmu_set_pte_at,
TP_PROTO(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval),
TP_ARGS(mm, addr, ptep, pteval),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
__field(pteval_t, pteval)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep;
__entry->pteval = pteval.pte),
TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
__entry->mm, __entry->addr, __entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
);
TRACE_EVENT(xen_mmu_pte_clear,
TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
TP_ARGS(mm, addr, ptep),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep),
TP_printk("mm %p addr %lx ptep %p",
__entry->mm, __entry->addr, __entry->ptep)
);
TRACE_EVENT(xen_mmu_set_pmd,
TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
TP_ARGS(pmdp, pmdval),
TP_STRUCT__entry(
__field(pmd_t *, pmdp)
__field(pmdval_t, pmdval)
),
TP_fast_assign(__entry->pmdp = pmdp;
__entry->pmdval = pmdval.pmd),
TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
__entry->pmdp,
(int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
(int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
);
TRACE_EVENT(xen_mmu_pmd_clear,
TP_PROTO(pmd_t *pmdp),
TP_ARGS(pmdp),
TP_STRUCT__entry(
__field(pmd_t *, pmdp)
),
TP_fast_assign(__entry->pmdp = pmdp),
TP_printk("pmdp %p", __entry->pmdp)
);
#if PAGETABLE_LEVELS >= 4
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
TP_ARGS(pudp, pudval),
TP_STRUCT__entry(
__field(pud_t *, pudp)
__field(pudval_t, pudval)
),
TP_fast_assign(__entry->pudp = pudp;
__entry->pudval = native_pud_val(pudval)),
TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
__entry->pudp,
(int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
(int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
);
TRACE_EVENT(xen_mmu_set_pgd,
TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
TP_ARGS(pgdp, user_pgdp, pgdval),
TP_STRUCT__entry(
__field(pgd_t *, pgdp)
__field(pgd_t *, user_pgdp)
__field(pgdval_t, pgdval)
),
TP_fast_assign(__entry->pgdp = pgdp;
__entry->user_pgdp = user_pgdp;
__entry->pgdval = pgdval.pgd),
TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
__entry->pgdp, __entry->user_pgdp,
(int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
(int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
);
TRACE_EVENT(xen_mmu_pud_clear,
TP_PROTO(pud_t *pudp),
TP_ARGS(pudp),
TP_STRUCT__entry(
__field(pud_t *, pudp)
),
TP_fast_assign(__entry->pudp = pudp),
TP_printk("pudp %p", __entry->pudp)
);
#else
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
TP_ARGS(pudp, pudval),
TP_STRUCT__entry(
__field(pud_t *, pudp)
__field(pudval_t, pudval)
),
TP_fast_assign(__entry->pudp = pudp;
__entry->pudval = native_pud_val(pudval)),
TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
__entry->pudp,
(int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
(int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
);
#endif
TRACE_EVENT(xen_mmu_pgd_clear,
TP_PROTO(pgd_t *pgdp),
TP_ARGS(pgdp),
TP_STRUCT__entry(
__field(pgd_t *, pgdp)
),
TP_fast_assign(__entry->pgdp = pgdp),
TP_printk("pgdp %p", __entry->pgdp)
);
DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
TP_PROTO(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval),
TP_ARGS(mm, addr, ptep, pteval),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
__field(pteval_t, pteval)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep;
__entry->pteval = pteval.pte),
TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
__entry->mm, __entry->addr, __entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
);
#define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
TP_PROTO(struct mm_struct *mm, unsigned long addr, \
pte_t *ptep, pte_t pteval), \
TP_ARGS(mm, addr, ptep, pteval))
DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
TRACE_EVENT(xen_mmu_alloc_ptpage,
TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
TP_ARGS(mm, pfn, level, pinned),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
__field(unsigned, level)
__field(bool, pinned)
),
TP_fast_assign(__entry->mm = mm;
__entry->pfn = pfn;
__entry->level = level;
__entry->pinned = pinned),
TP_printk("mm %p pfn %lx level %d %spinned",
__entry->mm, __entry->pfn, __entry->level,
__entry->pinned ? "" : "un")
);
TRACE_EVENT(xen_mmu_release_ptpage,
TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
TP_ARGS(pfn, level, pinned),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(unsigned, level)
__field(bool, pinned)
),
TP_fast_assign(__entry->pfn = pfn;
__entry->level = level;
__entry->pinned = pinned),
TP_printk("pfn %lx level %d %spinned",
__entry->pfn, __entry->level,
__entry->pinned ? "" : "un")
);
DECLARE_EVENT_CLASS(xen_mmu_pgd,
TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
TP_ARGS(mm, pgd),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(pgd_t *, pgd)
),
TP_fast_assign(__entry->mm = mm;
__entry->pgd = pgd),
TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
);
#define DEFINE_XEN_MMU_PGD_EVENT(name) \
DEFINE_EVENT(xen_mmu_pgd, name, \
TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
TP_ARGS(mm, pgd))
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
TRACE_EVENT(xen_mmu_flush_tlb_all,
TP_PROTO(int x),
TP_ARGS(x),
TP_STRUCT__entry(__array(char, x, 0)),
TP_fast_assign((void)x),
TP_printk("%s", "")
);
TRACE_EVENT(xen_mmu_flush_tlb,
TP_PROTO(int x),
TP_ARGS(x),
TP_STRUCT__entry(__array(char, x, 0)),
TP_fast_assign((void)x),
TP_printk("%s", "")
);
TRACE_EVENT(xen_mmu_flush_tlb_single,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
TP_STRUCT__entry(
__field(unsigned long, addr)
),
TP_fast_assign(__entry->addr = addr),
TP_printk("addr %lx", __entry->addr)
);
TRACE_EVENT(xen_mmu_flush_tlb_others,
TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
unsigned long addr, unsigned long end),
TP_ARGS(cpus, mm, addr, end),
TP_STRUCT__entry(
__field(unsigned, ncpus)
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(unsigned long, end)
),
TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
__entry->mm = mm;
__entry->addr = addr,
__entry->end = end),
TP_printk("ncpus %d mm %p addr %lx, end %lx",
__entry->ncpus, __entry->mm, __entry->addr, __entry->end)
);
TRACE_EVENT(xen_mmu_write_cr3,
TP_PROTO(bool kernel, unsigned long cr3),
TP_ARGS(kernel, cr3),
TP_STRUCT__entry(
__field(bool, kernel)
__field(unsigned long, cr3)
),
TP_fast_assign(__entry->kernel = kernel;
__entry->cr3 = cr3),
TP_printk("%s cr3 %lx",
__entry->kernel ? "kernel" : "user", __entry->cr3)
);
/* CPU */
TRACE_EVENT(xen_cpu_write_ldt_entry,
TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc),
TP_ARGS(dt, entrynum, desc),
TP_STRUCT__entry(
__field(struct desc_struct *, dt)
__field(int, entrynum)
__field(u64, desc)
),
TP_fast_assign(__entry->dt = dt;
__entry->entrynum = entrynum;
__entry->desc = desc;
),
TP_printk("dt %p entrynum %d entry %016llx",
__entry->dt, __entry->entrynum,
(unsigned long long)__entry->desc)
);
TRACE_EVENT(xen_cpu_write_idt_entry,
TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent),
TP_ARGS(dt, entrynum, ent),
TP_STRUCT__entry(
__field(gate_desc *, dt)
__field(int, entrynum)
),
TP_fast_assign(__entry->dt = dt;
__entry->entrynum = entrynum;
),
TP_printk("dt %p entrynum %d",
__entry->dt, __entry->entrynum)
);
TRACE_EVENT(xen_cpu_load_idt,
TP_PROTO(const struct desc_ptr *desc),
TP_ARGS(desc),
TP_STRUCT__entry(
__field(unsigned long, addr)
),
TP_fast_assign(__entry->addr = desc->address),
TP_printk("addr %lx", __entry->addr)
);
TRACE_EVENT(xen_cpu_write_gdt_entry,
TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type),
TP_ARGS(dt, entrynum, desc, type),
TP_STRUCT__entry(
__field(u64, desc)
__field(struct desc_struct *, dt)
__field(int, entrynum)
__field(int, type)
),
TP_fast_assign(__entry->dt = dt;
__entry->entrynum = entrynum;
__entry->desc = *(u64 *)desc;
__entry->type = type;
),
TP_printk("dt %p entrynum %d type %d desc %016llx",
__entry->dt, __entry->entrynum, __entry->type,
(unsigned long long)__entry->desc)
);
TRACE_EVENT(xen_cpu_set_ldt,
TP_PROTO(const void *addr, unsigned entries),
TP_ARGS(addr, entries),
TP_STRUCT__entry(
__field(const void *, addr)
__field(unsigned, entries)
),
TP_fast_assign(__entry->addr = addr;
__entry->entries = entries),
TP_printk("addr %p entries %u",
__entry->addr, __entry->entries)
);
#endif /* _TRACE_XEN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,50 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM zswap
#if !defined(_TRACE_ZSWAP_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ZSWAP_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <trace/events/gfpflags.h>
DECLARE_EVENT_CLASS(mm_zswap_writebackd_template,
TP_PROTO(unsigned long pool_pages),
TP_ARGS(pool_pages),
TP_STRUCT__entry(
__field(unsigned long, pool_pages)
),
TP_fast_assign(
__entry->pool_pages = pool_pages;
),
TP_printk("zswap_pool_pages=%lu", __entry->pool_pages)
);
DEFINE_EVENT(mm_zswap_writebackd_template, mm_zswap_writebackd_sleep,
TP_PROTO(unsigned long pool_pages),
TP_ARGS(pool_pages)
);
DEFINE_EVENT(mm_zswap_writebackd_template, mm_zswap_wakeup_writebackd,
TP_PROTO(unsigned long pool_pages),
TP_ARGS(pool_pages)
);
DEFINE_EVENT(mm_zswap_writebackd_template, mm_zswap_writebackd_wake,
TP_PROTO(unsigned long pool_pages),
TP_ARGS(pool_pages)
);
#endif /* _TRACE_ZSWAP_H */
#include <trace/define_trace.h>

820
include/trace/ftrace.h Normal file
View file

@ -0,0 +1,820 @@
/*
* Stage 1 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
* struct ftrace_raw_<call> {
* struct trace_entry ent;
* <type> <item>;
* <type2> <item2>[<len>];
* [...]
* };
*
* The <type> <item> is created by the __field(type, item) macro or
* the __array(type2, item2, len) macro.
* We simply do "type item;", and that will create the fields
* in the structure.
*/
#include <linux/ftrace_event.h>
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
* handlers for events. That is, if all events have the same
* parameters and just have distinct trace points.
* Each tracepoint can be defined with DEFINE_EVENT and that
* will map the DECLARE_EVENT_CLASS to the tracepoint.
*
* TRACE_EVENT is a one to one mapping between tracepoint and template.
*/
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DECLARE_EVENT_CLASS(name, \
PARAMS(proto), \
PARAMS(args), \
PARAMS(tstruct), \
PARAMS(assign), \
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
#undef __field
#define __field(type, item) type item;
#undef __field_ext
#define __field_ext(type, item, filter_type) type item;
#undef __field_struct
#define __field_struct(type, item) type item;
#undef __field_struct_ext
#define __field_struct_ext(type, item, filter_type) type item;
#undef __array
#define __array(type, item, len) type item[len];
#undef __dynamic_array
#define __dynamic_array(type, item, len) u32 __data_loc_##item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
struct ftrace_raw_##name { \
struct trace_entry ent; \
tstruct \
char __data[0]; \
}; \
\
static struct ftrace_event_class event_class_##name;
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) event_##name
#undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
/* Callbacks are meaningless to ftrace. */
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(name, value) \
__TRACE_EVENT_FLAGS(name, value)
#undef TRACE_EVENT_PERF_PERM
#define TRACE_EVENT_PERF_PERM(name, expr...) \
__TRACE_EVENT_PERF_PERM(name, expr)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 2 of the trace events.
*
* Include the following:
*
* struct ftrace_data_offsets_<call> {
* u32 <item1>;
* u32 <item2>;
* [...]
* };
*
* The __dynamic_array() macro will create each u32 <item>, this is
* to keep the offset of each array from the beginning of the event.
* The size of an array is also encoded, in the higher 16 bits of <item>.
*/
#undef __field
#define __field(type, item)
#undef __field_ext
#define __field_ext(type, item, filter_type)
#undef __field_struct
#define __field_struct(type, item)
#undef __field_struct_ext
#define __field_struct_ext(type, item, filter_type)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) u32 item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
struct ftrace_data_offsets_##call { \
tstruct; \
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(event, flag)
#undef TRACE_EVENT_PERF_PERM
#define TRACE_EVENT_PERF_PERM(event, expr...)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 3 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
* enum print_line_t
* ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
* {
* struct trace_seq *s = &iter->seq;
* struct ftrace_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
* int ret;
*
* entry = iter->ent;
*
* if (entry->type != event_<call>->event.type) {
* WARN_ON_ONCE(1);
* return TRACE_TYPE_UNHANDLED;
* }
*
* field = (typeof(field))entry;
*
* trace_seq_init(p);
* ret = trace_seq_printf(s, "%s: ", <call>);
* if (ret)
* ret = trace_seq_printf(s, <TP_printk> "\n");
* if (!ret)
* return TRACE_TYPE_PARTIAL_LINE;
*
* return TRACE_TYPE_HANDLED;
* }
*
* This is the method used to print the raw event to the trace
* output format. Note, this is not needed if the data is read
* in binary.
*/
#undef __entry
#define __entry field
#undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args
#undef __get_dynamic_array
#define __get_dynamic_array(field) \
((void *)__entry + (__entry->__data_loc_##field & 0xffff))
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) \
((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __get_bitmask
#define __get_bitmask(field) \
({ \
void *__bitmask = __get_dynamic_array(field); \
unsigned int __bitmask_size; \
__bitmask_size = __get_dynamic_array_len(field); \
ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
#undef __print_flags
#define __print_flags(flag, delim, flag_array...) \
({ \
static const struct trace_print_flags __flags[] = \
{ flag_array, { -1, NULL }}; \
ftrace_print_flags_seq(p, delim, flag, __flags); \
})
#undef __print_symbolic
#define __print_symbolic(value, symbol_array...) \
({ \
static const struct trace_print_flags symbols[] = \
{ symbol_array, { -1, NULL }}; \
ftrace_print_symbols_seq(p, value, symbols); \
})
#undef __print_symbolic_u64
#if BITS_PER_LONG == 32
#define __print_symbolic_u64(value, symbol_array...) \
({ \
static const struct trace_print_flags_u64 symbols[] = \
{ symbol_array, { -1, NULL } }; \
ftrace_print_symbols_seq_u64(p, value, symbols); \
})
#else
#define __print_symbolic_u64(value, symbol_array...) \
__print_symbolic(value, symbol_array)
#endif
#undef __print_hex
#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *trace_event) \
{ \
struct trace_seq *s = &iter->seq; \
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
struct ftrace_raw_##call *field; \
int ret; \
\
field = (typeof(field))iter->ent; \
\
ret = ftrace_raw_output_prep(iter, trace_event); \
if (ret) \
return ret; \
\
ret = trace_seq_printf(s, print); \
if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \
\
return TRACE_TYPE_HANDLED; \
} \
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
.trace = ftrace_raw_output_##call, \
};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *event) \
{ \
struct ftrace_raw_##template *field; \
struct trace_entry *entry; \
struct trace_seq *p = &iter->tmp_seq; \
\
entry = iter->ent; \
\
if (entry->type != event_##call.event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
\
field = (typeof(field))entry; \
\
trace_seq_init(p); \
return ftrace_output_call(iter, #call, print); \
} \
static struct trace_event_functions ftrace_event_type_funcs_##call = { \
.trace = ftrace_raw_output_##call, \
};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __field_ext
#define __field_ext(type, item, filter_type) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), filter_type); \
if (ret) \
return ret;
#undef __field_struct_ext
#define __field_struct_ext(type, item, filter_type) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
0, filter_type); \
if (ret) \
return ret;
#undef __field
#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
#undef __field_struct
#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
#undef __array
#define __array(type, item, len) \
do { \
char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret; \
} while (0);
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
offsetof(typeof(field), __data_loc_##item), \
sizeof(field.__data_loc_##item), \
is_signed_type(type), FILTER_OTHER);
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace __init \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
struct ftrace_raw_##call field; \
int ret; \
\
tstruct; \
\
return ret; \
}
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* remember the offset of each array from the beginning of the event.
*/
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __field_ext
#define __field_ext(type, item, filter_type)
#undef __field_struct
#define __field_struct(type, item)
#undef __field_struct_ext
#define __field_struct_ext(type, item, filter_type)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__item_length = (len) * sizeof(type); \
__data_offsets->item = __data_size + \
offsetof(typeof(*entry), __data); \
__data_offsets->item |= __item_length << 16; \
__data_size += __item_length;
#undef __string
#define __string(item, src) __dynamic_array(char, item, \
strlen((src) ? (const char *)(src) : "(null)") + 1)
/*
* __bitmask_size_in_bytes_raw is the number of bytes needed to hold
* num_possible_cpus().
*/
#define __bitmask_size_in_bytes_raw(nr_bits) \
(((nr_bits) + 7) / 8)
#define __bitmask_size_in_longs(nr_bits) \
((__bitmask_size_in_bytes_raw(nr_bits) + \
((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
/*
* __bitmask_size_in_bytes is the number of bytes needed to hold
* num_possible_cpus() padded out to the nearest long. This is what
* is saved in the buffer, just to be consistent.
*/
#define __bitmask_size_in_bytes(nr_bits) \
(__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
__bitmask_size_in_longs(nr_bits))
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static inline notrace int ftrace_get_offsets_##call( \
struct ftrace_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
int __maybe_unused __item_length; \
struct ftrace_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
return __data_size; \
}
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 4 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
*
* For those macros defined with TRACE_EVENT:
*
* static struct ftrace_event_call event_<call>;
*
* static void ftrace_raw_event_<call>(void *__data, proto)
* {
* struct ftrace_event_file *ftrace_file = __data;
* struct ftrace_event_call *event_call = ftrace_file->event_call;
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* unsigned long eflags = ftrace_file->flags;
* enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
* int __data_size;
* int pc;
*
* if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
* if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
* event_triggers_call(ftrace_file, NULL);
* if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
* return;
* }
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
* event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
* { <assign>; } <-- Here we assign the entries by the __field and
* __array macros.
*
* if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
* __tt = event_triggers_call(ftrace_file, entry);
*
* if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
* &ftrace_file->flags))
* ring_buffer_discard_commit(buffer, event);
* else if (!filter_check_discard(ftrace_file, entry, buffer, event))
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
*
* if (__tt)
* event_triggers_post_call(ftrace_file, __tt);
* }
*
* static struct trace_event ftrace_event_type_<call> = {
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
* static const char print_fmt_<call>[] = <TP_printk>;
*
* static struct ftrace_event_class __used event_class_<template> = {
* .system = "<system>",
* .define_fields = ftrace_define_fields_<call>,
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
* .raw_init = trace_event_raw_init,
* .probe = ftrace_raw_event_##call,
* .reg = ftrace_event_reg,
* };
*
* static struct ftrace_event_call event_<call> = {
* .class = event_class_<template>,
* {
* .tp = &__tracepoint_<call>,
* },
* .event = &ftrace_event_type_<call>,
* .print_fmt = print_fmt_<call>,
* .flags = TRACE_EVENT_FL_TRACEPOINT,
* };
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct ftrace_event_call __used
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
*
*/
#ifdef CONFIG_PERF_EVENTS
#define _TRACE_PERF_PROTO(call, proto) \
static notrace void \
perf_trace_##call(void *__data, proto);
#define _TRACE_PERF_INIT(call) \
.perf_probe = perf_trace_##call,
#else
#define _TRACE_PERF_PROTO(call, proto)
#define _TRACE_PERF_INIT(call)
#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __field_struct
#define __field_struct(type, item)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__entry->__data_loc_##item = __data_offsets.item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __assign_str
#define __assign_str(dst, src) \
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __assign_bitmask
#define __assign_bitmask(dst, src, nr_bits) \
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
#undef TP_fast_assign
#define TP_fast_assign(args...) args
#undef __perf_addr
#define __perf_addr(a) (a)
#undef __perf_count
#define __perf_count(c) (c)
#undef __perf_task
#define __perf_task(t) (t)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
static notrace void \
ftrace_raw_event_##call(void *__data, proto) \
{ \
struct ftrace_event_file *ftrace_file = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_buffer fbuffer; \
struct ftrace_raw_##call *entry; \
int __data_size; \
\
if (ftrace_trigger_soft_disabled(ftrace_file)) \
return; \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
sizeof(*entry) + __data_size); \
\
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \
\
ftrace_event_buffer_commit(&fbuffer); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the ftrace probe will
* fail to compile unless it too is updated.
*/
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
static inline void ftrace_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(ftrace_raw_event_##template); \
}
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __entry
#define __entry REC
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
#undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str
#undef __get_bitmask
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
static const char print_fmt_##call[] = print; \
static struct ftrace_event_class __used __refdata event_class_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
.probe = ftrace_raw_event_##call, \
.reg = ftrace_event_reg, \
_TRACE_PERF_INIT(call) \
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
static struct ftrace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &ftrace_event_type_funcs_##template, \
.print_fmt = print_fmt_##template, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
static const char print_fmt_##call[] = print; \
\
static struct ftrace_event_call __used event_##call = { \
.class = &event_class_##template, \
{ \
.tp = &__tracepoint_##call, \
}, \
.event.funcs = &ftrace_event_type_funcs_##call, \
.print_fmt = print_fmt_##call, \
.flags = TRACE_EVENT_FL_TRACEPOINT, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#ifdef CONFIG_PERF_EVENTS
#undef __entry
#define __entry entry
#undef __get_dynamic_array
#define __get_dynamic_array(field) \
((void *)__entry + (__entry->__data_loc_##field & 0xffff))
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) \
((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __perf_addr
#define __perf_addr(a) (__addr = (a))
#undef __perf_count
#define __perf_count(c) (__count = (c))
#undef __perf_task
#define __perf_task(t) (__task = (t))
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
perf_trace_##call(void *__data, proto) \
{ \
struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \
u64 __addr = 0, __count = 1; \
struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
int __data_size; \
int rctx; \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
head = this_cpu_ptr(event_call->perf_events); \
if (__builtin_constant_p(!__task) && !__task && \
hlist_empty(head)) \
return; \
\
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
perf_fetch_caller_regs(&__regs); \
entry = perf_trace_buf_prepare(__entry_size, \
event_call->event.type, &__regs, &rctx); \
if (!entry) \
return; \
\
tstruct \
\
{ assign; } \
\
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, &__regs, head, __task); \
}
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
* perf probe will fail to compile unless it too is updated.
*/
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
static inline void perf_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(perf_trace_##template); \
}
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_PERF_EVENTS */

50
include/trace/syscall.h Normal file
View file

@ -0,0 +1,50 @@
#ifndef _TRACE_SYSCALL_H
#define _TRACE_SYSCALL_H
#include <linux/tracepoint.h>
#include <linux/unistd.h>
#include <linux/ftrace_event.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
/*
* A syscall entry in the ftrace syscalls array.
*
* @name: name of the syscall
* @syscall_nr: number of the syscall
* @nb_args: number of parameters it takes
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
* @enter_fields: list of fields for syscall_enter trace event
* @enter_event: associated syscall_enter trace event
* @exit_event: associated syscall_exit trace event
*/
struct syscall_metadata {
const char *name;
int syscall_nr;
int nb_args;
const char **types;
const char **args;
struct list_head enter_fields;
struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event;
};
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
static inline void syscall_tracepoint_update(struct task_struct *p)
{
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
else
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
}
#else
static inline void syscall_tracepoint_update(struct task_struct *p)
{
}
#endif
#endif /* _TRACE_SYSCALL_H */