mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
9
kernel/events/Makefile
Normal file
9
kernel/events/Makefile
Normal file
|
@ -0,0 +1,9 @@
|
|||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_core.o = -pg
|
||||
endif
|
||||
|
||||
obj-y := core.o ring_buffer.o callchain.o
|
||||
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
|
209
kernel/events/callchain.c
Normal file
209
kernel/events/callchain.c
Normal file
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Performance events callchain code, extracted from core.c:
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/slab.h>
|
||||
#include "internal.h"
|
||||
|
||||
struct callchain_cpus_entries {
|
||||
struct rcu_head rcu_head;
|
||||
struct perf_callchain_entry *cpu_entries[0];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
|
||||
static atomic_t nr_callchain_events;
|
||||
static DEFINE_MUTEX(callchain_mutex);
|
||||
static struct callchain_cpus_entries *callchain_cpus_entries;
|
||||
|
||||
|
||||
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static void release_callchain_buffers_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct callchain_cpus_entries *entries;
|
||||
int cpu;
|
||||
|
||||
entries = container_of(head, struct callchain_cpus_entries, rcu_head);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
kfree(entries->cpu_entries[cpu]);
|
||||
|
||||
kfree(entries);
|
||||
}
|
||||
|
||||
static void release_callchain_buffers(void)
|
||||
{
|
||||
struct callchain_cpus_entries *entries;
|
||||
|
||||
entries = callchain_cpus_entries;
|
||||
RCU_INIT_POINTER(callchain_cpus_entries, NULL);
|
||||
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
|
||||
}
|
||||
|
||||
static int alloc_callchain_buffers(void)
|
||||
{
|
||||
int cpu;
|
||||
int size;
|
||||
struct callchain_cpus_entries *entries;
|
||||
|
||||
/*
|
||||
* We can't use the percpu allocation API for data that can be
|
||||
* accessed from NMI. Use a temporary manual per cpu allocation
|
||||
* until that gets sorted out.
|
||||
*/
|
||||
size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
|
||||
|
||||
entries = kzalloc(size, GFP_KERNEL);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
|
||||
cpu_to_node(cpu));
|
||||
if (!entries->cpu_entries[cpu])
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rcu_assign_pointer(callchain_cpus_entries, entries);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
for_each_possible_cpu(cpu)
|
||||
kfree(entries->cpu_entries[cpu]);
|
||||
kfree(entries);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int get_callchain_buffers(void)
|
||||
{
|
||||
int err = 0;
|
||||
int count;
|
||||
|
||||
mutex_lock(&callchain_mutex);
|
||||
|
||||
count = atomic_inc_return(&nr_callchain_events);
|
||||
if (WARN_ON_ONCE(count < 1)) {
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (count > 1) {
|
||||
/* If the allocation failed, give up */
|
||||
if (!callchain_cpus_entries)
|
||||
err = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = alloc_callchain_buffers();
|
||||
exit:
|
||||
if (err)
|
||||
atomic_dec(&nr_callchain_events);
|
||||
|
||||
mutex_unlock(&callchain_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void put_callchain_buffers(void)
|
||||
{
|
||||
if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
|
||||
release_callchain_buffers();
|
||||
mutex_unlock(&callchain_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static struct perf_callchain_entry *get_callchain_entry(int *rctx)
|
||||
{
|
||||
int cpu;
|
||||
struct callchain_cpus_entries *entries;
|
||||
|
||||
*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
|
||||
if (*rctx == -1)
|
||||
return NULL;
|
||||
|
||||
entries = rcu_dereference(callchain_cpus_entries);
|
||||
if (!entries)
|
||||
return NULL;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
return &entries->cpu_entries[cpu][*rctx];
|
||||
}
|
||||
|
||||
static void
|
||||
put_callchain_entry(int rctx)
|
||||
{
|
||||
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
|
||||
}
|
||||
|
||||
struct perf_callchain_entry *
|
||||
perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
int rctx;
|
||||
struct perf_callchain_entry *entry;
|
||||
|
||||
int kernel = !event->attr.exclude_callchain_kernel;
|
||||
int user = !event->attr.exclude_callchain_user;
|
||||
|
||||
if (!kernel && !user)
|
||||
return NULL;
|
||||
|
||||
entry = get_callchain_entry(&rctx);
|
||||
if (rctx == -1)
|
||||
return NULL;
|
||||
|
||||
if (!entry)
|
||||
goto exit_put;
|
||||
|
||||
entry->nr = 0;
|
||||
|
||||
if (kernel && !user_mode(regs)) {
|
||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||
perf_callchain_kernel(entry, regs);
|
||||
}
|
||||
|
||||
if (user) {
|
||||
if (!user_mode(regs)) {
|
||||
if (current->mm)
|
||||
regs = task_pt_regs(current);
|
||||
else
|
||||
regs = NULL;
|
||||
}
|
||||
|
||||
if (regs) {
|
||||
/*
|
||||
* Disallow cross-task user callchains.
|
||||
*/
|
||||
if (event->ctx->task && event->ctx->task != current)
|
||||
goto exit_put;
|
||||
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_user(entry, regs);
|
||||
}
|
||||
}
|
||||
|
||||
exit_put:
|
||||
put_callchain_entry(rctx);
|
||||
|
||||
return entry;
|
||||
}
|
8339
kernel/events/core.c
Normal file
8339
kernel/events/core.c
Normal file
File diff suppressed because it is too large
Load diff
655
kernel/events/hw_breakpoint.c
Normal file
655
kernel/events/hw_breakpoint.c
Normal file
|
@ -0,0 +1,655 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2007 Alan Stern
|
||||
* Copyright (C) IBM Corporation, 2009
|
||||
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
* Thanks to Ingo Molnar for his many suggestions.
|
||||
*
|
||||
* Authors: Alan Stern <stern@rowland.harvard.edu>
|
||||
* K.Prasad <prasad@linux.vnet.ibm.com>
|
||||
* Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
|
||||
* using the CPU's debug registers.
|
||||
* This file contains the arch-independent routines.
|
||||
*/
|
||||
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <linux/hw_breakpoint.h>
|
||||
/*
|
||||
* Constraints data
|
||||
*/
|
||||
struct bp_cpuinfo {
|
||||
/* Number of pinned cpu breakpoints in a cpu */
|
||||
unsigned int cpu_pinned;
|
||||
/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
|
||||
unsigned int *tsk_pinned;
|
||||
/* Number of non-pinned cpu/task breakpoints in a cpu */
|
||||
unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
|
||||
static int nr_slots[TYPE_MAX];
|
||||
|
||||
static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
|
||||
{
|
||||
return per_cpu_ptr(bp_cpuinfo + type, cpu);
|
||||
}
|
||||
|
||||
/* Keep track of the breakpoints attached to tasks */
|
||||
static LIST_HEAD(bp_task_head);
|
||||
|
||||
static int constraints_initialized;
|
||||
|
||||
/* Gather the number of total pinned and un-pinned bp in a cpuset */
|
||||
struct bp_busy_slots {
|
||||
unsigned int pinned;
|
||||
unsigned int flexible;
|
||||
};
|
||||
|
||||
/* Serialize accesses to the above constraints */
|
||||
static DEFINE_MUTEX(nr_bp_mutex);
|
||||
|
||||
__weak int hw_breakpoint_weight(struct perf_event *bp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
|
||||
{
|
||||
if (bp->attr.bp_type & HW_BREAKPOINT_RW)
|
||||
return TYPE_DATA;
|
||||
|
||||
return TYPE_INST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the maximum number of pinned breakpoints a task
|
||||
* have in this cpu
|
||||
*/
|
||||
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
||||
{
|
||||
unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
|
||||
int i;
|
||||
|
||||
for (i = nr_slots[type] - 1; i >= 0; i--) {
|
||||
if (tsk_pinned[i] > 0)
|
||||
return i + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Count the number of breakpoints of the same type and same task.
|
||||
* The given event must be not on the list.
|
||||
*/
|
||||
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
|
||||
{
|
||||
struct task_struct *tsk = bp->hw.bp_target;
|
||||
struct perf_event *iter;
|
||||
int count = 0;
|
||||
|
||||
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
||||
if (iter->hw.bp_target == tsk &&
|
||||
find_slot_idx(iter) == type &&
|
||||
(iter->cpu < 0 || cpu == iter->cpu))
|
||||
count += hw_breakpoint_weight(iter);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
|
||||
{
|
||||
if (bp->cpu >= 0)
|
||||
return cpumask_of(bp->cpu);
|
||||
return cpu_possible_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the number of pinned/un-pinned breakpoints we have in
|
||||
* a given cpu (cpu > -1) or in all of them (cpu = -1).
|
||||
*/
|
||||
static void
|
||||
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
||||
enum bp_type_idx type)
|
||||
{
|
||||
const struct cpumask *cpumask = cpumask_of_bp(bp);
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
struct bp_cpuinfo *info = get_bp_info(cpu, type);
|
||||
int nr;
|
||||
|
||||
nr = info->cpu_pinned;
|
||||
if (!bp->hw.bp_target)
|
||||
nr += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
nr += task_bp_pinned(cpu, bp, type);
|
||||
|
||||
if (nr > slots->pinned)
|
||||
slots->pinned = nr;
|
||||
|
||||
nr = info->flexible;
|
||||
if (nr > slots->flexible)
|
||||
slots->flexible = nr;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For now, continue to consider flexible as pinned, until we can
|
||||
* ensure no flexible event can ever be scheduled before a pinned event
|
||||
* in a same cpu.
|
||||
*/
|
||||
static void
|
||||
fetch_this_slot(struct bp_busy_slots *slots, int weight)
|
||||
{
|
||||
slots->pinned += weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a pinned breakpoint for the given task in our constraint table
|
||||
*/
|
||||
static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
|
||||
enum bp_type_idx type, int weight)
|
||||
{
|
||||
unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
|
||||
int old_idx, new_idx;
|
||||
|
||||
old_idx = task_bp_pinned(cpu, bp, type) - 1;
|
||||
new_idx = old_idx + weight;
|
||||
|
||||
if (old_idx >= 0)
|
||||
tsk_pinned[old_idx]--;
|
||||
if (new_idx >= 0)
|
||||
tsk_pinned[new_idx]++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add/remove the given breakpoint in our constraint table
|
||||
*/
|
||||
static void
|
||||
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
||||
int weight)
|
||||
{
|
||||
const struct cpumask *cpumask = cpumask_of_bp(bp);
|
||||
int cpu;
|
||||
|
||||
if (!enable)
|
||||
weight = -weight;
|
||||
|
||||
/* Pinned counter cpu profiling */
|
||||
if (!bp->hw.bp_target) {
|
||||
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Pinned counter task profiling */
|
||||
for_each_cpu(cpu, cpumask)
|
||||
toggle_bp_task_slot(bp, cpu, type, weight);
|
||||
|
||||
if (enable)
|
||||
list_add_tail(&bp->hw.bp_list, &bp_task_head);
|
||||
else
|
||||
list_del(&bp->hw.bp_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to perform processor-specific cleanup during unregistration
|
||||
*/
|
||||
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
/*
|
||||
* A weak stub function here for those archs that don't define
|
||||
* it inside arch/.../kernel/hw_breakpoint.c
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Contraints to check before allowing this new breakpoint counter:
|
||||
*
|
||||
* == Non-pinned counter == (Considered as pinned for now)
|
||||
*
|
||||
* - If attached to a single cpu, check:
|
||||
*
|
||||
* (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
|
||||
* + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
|
||||
*
|
||||
* -> If there are already non-pinned counters in this cpu, it means
|
||||
* there is already a free slot for them.
|
||||
* Otherwise, we check that the maximum number of per task
|
||||
* breakpoints (for this cpu) plus the number of per cpu breakpoint
|
||||
* (for this cpu) doesn't cover every registers.
|
||||
*
|
||||
* - If attached to every cpus, check:
|
||||
*
|
||||
* (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
|
||||
* + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
|
||||
*
|
||||
* -> This is roughly the same, except we check the number of per cpu
|
||||
* bp for every cpu and we keep the max one. Same for the per tasks
|
||||
* breakpoints.
|
||||
*
|
||||
*
|
||||
* == Pinned counter ==
|
||||
*
|
||||
* - If attached to a single cpu, check:
|
||||
*
|
||||
* ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
|
||||
* + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
|
||||
*
|
||||
* -> Same checks as before. But now the info->flexible, if any, must keep
|
||||
* one register at least (or they will never be fed).
|
||||
*
|
||||
* - If attached to every cpus, check:
|
||||
*
|
||||
* ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
|
||||
* + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
|
||||
*/
|
||||
static int __reserve_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
struct bp_busy_slots slots = {0};
|
||||
enum bp_type_idx type;
|
||||
int weight;
|
||||
|
||||
/* We couldn't initialize breakpoint constraints on boot */
|
||||
if (!constraints_initialized)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Basic checks */
|
||||
if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
|
||||
bp->attr.bp_type == HW_BREAKPOINT_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
type = find_slot_idx(bp);
|
||||
weight = hw_breakpoint_weight(bp);
|
||||
|
||||
fetch_bp_busy_slots(&slots, bp, type);
|
||||
/*
|
||||
* Simulate the addition of this breakpoint to the constraints
|
||||
* and see the result.
|
||||
*/
|
||||
fetch_this_slot(&slots, weight);
|
||||
|
||||
/* Flexible counters need to keep at least one slot */
|
||||
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
|
||||
return -ENOSPC;
|
||||
|
||||
toggle_bp_slot(bp, true, type, weight);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int reserve_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&nr_bp_mutex);
|
||||
|
||||
ret = __reserve_bp_slot(bp);
|
||||
|
||||
mutex_unlock(&nr_bp_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __release_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
enum bp_type_idx type;
|
||||
int weight;
|
||||
|
||||
type = find_slot_idx(bp);
|
||||
weight = hw_breakpoint_weight(bp);
|
||||
toggle_bp_slot(bp, false, type, weight);
|
||||
}
|
||||
|
||||
void release_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
mutex_lock(&nr_bp_mutex);
|
||||
|
||||
arch_unregister_hw_breakpoint(bp);
|
||||
__release_bp_slot(bp);
|
||||
|
||||
mutex_unlock(&nr_bp_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the kernel debugger to reserve breakpoint slots without
|
||||
* taking a lock using the dbg_* variant of for the reserve and
|
||||
* release breakpoint slots.
|
||||
*/
|
||||
int dbg_reserve_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
if (mutex_is_locked(&nr_bp_mutex))
|
||||
return -1;
|
||||
|
||||
return __reserve_bp_slot(bp);
|
||||
}
|
||||
|
||||
int dbg_release_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
if (mutex_is_locked(&nr_bp_mutex))
|
||||
return -1;
|
||||
|
||||
__release_bp_slot(bp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int validate_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = arch_validate_hwbkpt_settings(bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (arch_check_bp_in_kernelspace(bp)) {
|
||||
if (bp->attr.exclude_kernel)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Don't let unprivileged users set a breakpoint in the trap
|
||||
* path to avoid trap recursion attacks.
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = reserve_bp_slot(bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = validate_hw_breakpoint(bp);
|
||||
|
||||
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
|
||||
if (ret)
|
||||
release_bp_slot(bp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* register_user_hw_breakpoint - register a hardware breakpoint for user space
|
||||
* @attr: breakpoint attributes
|
||||
* @triggered: callback to trigger when we hit the breakpoint
|
||||
* @tsk: pointer to 'task_struct' of the process to which the address belongs
|
||||
*/
|
||||
struct perf_event *
|
||||
register_user_hw_breakpoint(struct perf_event_attr *attr,
|
||||
perf_overflow_handler_t triggered,
|
||||
void *context,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
|
||||
context);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
|
||||
|
||||
/**
|
||||
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
|
||||
* @bp: the breakpoint structure to modify
|
||||
* @attr: new breakpoint attributes
|
||||
* @triggered: callback to trigger when we hit the breakpoint
|
||||
* @tsk: pointer to 'task_struct' of the process to which the address belongs
|
||||
*/
|
||||
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
|
||||
{
|
||||
u64 old_addr = bp->attr.bp_addr;
|
||||
u64 old_len = bp->attr.bp_len;
|
||||
int old_type = bp->attr.bp_type;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
|
||||
* will not be possible to raise IPIs that invoke __perf_event_disable.
|
||||
* So call the function directly after making sure we are targeting the
|
||||
* current task.
|
||||
*/
|
||||
if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
|
||||
__perf_event_disable(bp);
|
||||
else
|
||||
perf_event_disable(bp);
|
||||
|
||||
bp->attr.bp_addr = attr->bp_addr;
|
||||
bp->attr.bp_type = attr->bp_type;
|
||||
bp->attr.bp_len = attr->bp_len;
|
||||
|
||||
if (attr->disabled)
|
||||
goto end;
|
||||
|
||||
err = validate_hw_breakpoint(bp);
|
||||
if (!err)
|
||||
perf_event_enable(bp);
|
||||
|
||||
if (err) {
|
||||
bp->attr.bp_addr = old_addr;
|
||||
bp->attr.bp_type = old_type;
|
||||
bp->attr.bp_len = old_len;
|
||||
if (!bp->attr.disabled)
|
||||
perf_event_enable(bp);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
end:
|
||||
bp->attr.disabled = attr->disabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
|
||||
|
||||
/**
|
||||
* unregister_hw_breakpoint - unregister a user-space hardware breakpoint
|
||||
* @bp: the breakpoint structure to unregister
|
||||
*/
|
||||
void unregister_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
if (!bp)
|
||||
return;
|
||||
perf_event_release_kernel(bp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
|
||||
|
||||
/**
|
||||
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
|
||||
* @attr: breakpoint attributes
|
||||
* @triggered: callback to trigger when we hit the breakpoint
|
||||
*
|
||||
* @return a set of per_cpu pointers to perf events
|
||||
*/
|
||||
struct perf_event * __percpu *
|
||||
register_wide_hw_breakpoint(struct perf_event_attr *attr,
|
||||
perf_overflow_handler_t triggered,
|
||||
void *context)
|
||||
{
|
||||
struct perf_event * __percpu *cpu_events, *bp;
|
||||
long err = 0;
|
||||
int cpu;
|
||||
|
||||
cpu_events = alloc_percpu(typeof(*cpu_events));
|
||||
if (!cpu_events)
|
||||
return (void __percpu __force *)ERR_PTR(-ENOMEM);
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
bp = perf_event_create_kernel_counter(attr, cpu, NULL,
|
||||
triggered, context);
|
||||
if (IS_ERR(bp)) {
|
||||
err = PTR_ERR(bp);
|
||||
break;
|
||||
}
|
||||
|
||||
per_cpu(*cpu_events, cpu) = bp;
|
||||
}
|
||||
put_online_cpus();
|
||||
|
||||
if (likely(!err))
|
||||
return cpu_events;
|
||||
|
||||
unregister_wide_hw_breakpoint(cpu_events);
|
||||
return (void __percpu __force *)ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
|
||||
|
||||
/**
|
||||
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
|
||||
* @cpu_events: the per cpu set of events to unregister
|
||||
*/
|
||||
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
|
||||
|
||||
free_percpu(cpu_events);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
|
||||
|
||||
static struct notifier_block hw_breakpoint_exceptions_nb = {
|
||||
.notifier_call = hw_breakpoint_exceptions_notify,
|
||||
/* we need to be notified first */
|
||||
.priority = 0x7fffffff
|
||||
};
|
||||
|
||||
static void bp_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
release_bp_slot(event);
|
||||
}
|
||||
|
||||
static int hw_breakpoint_event_init(struct perf_event *bp)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (bp->attr.type != PERF_TYPE_BREAKPOINT)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* no branch sampling for breakpoint events
|
||||
*/
|
||||
if (has_branch_stack(bp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = register_perf_hw_breakpoint(bp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
bp->destroy = bp_perf_event_destroy;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hw_breakpoint_add(struct perf_event *bp, int flags)
|
||||
{
|
||||
if (!(flags & PERF_EF_START))
|
||||
bp->hw.state = PERF_HES_STOPPED;
|
||||
|
||||
if (is_sampling_event(bp)) {
|
||||
bp->hw.last_period = bp->hw.sample_period;
|
||||
perf_swevent_set_period(bp);
|
||||
}
|
||||
|
||||
return arch_install_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
static void hw_breakpoint_del(struct perf_event *bp, int flags)
|
||||
{
|
||||
arch_uninstall_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
static void hw_breakpoint_start(struct perf_event *bp, int flags)
|
||||
{
|
||||
bp->hw.state = 0;
|
||||
}
|
||||
|
||||
static void hw_breakpoint_stop(struct perf_event *bp, int flags)
|
||||
{
|
||||
bp->hw.state = PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
static struct pmu perf_breakpoint = {
|
||||
.task_ctx_nr = perf_sw_context, /* could eventually get its own */
|
||||
|
||||
.event_init = hw_breakpoint_event_init,
|
||||
.add = hw_breakpoint_add,
|
||||
.del = hw_breakpoint_del,
|
||||
.start = hw_breakpoint_start,
|
||||
.stop = hw_breakpoint_stop,
|
||||
.read = hw_breakpoint_pmu_read,
|
||||
};
|
||||
|
||||
int __init init_hw_breakpoint(void)
|
||||
{
|
||||
int cpu, err_cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TYPE_MAX; i++)
|
||||
nr_slots[i] = hw_breakpoint_slots(i);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for (i = 0; i < TYPE_MAX; i++) {
|
||||
struct bp_cpuinfo *info = get_bp_info(cpu, i);
|
||||
|
||||
info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
|
||||
GFP_KERNEL);
|
||||
if (!info->tsk_pinned)
|
||||
goto err_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
constraints_initialized = 1;
|
||||
|
||||
perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
|
||||
|
||||
return register_die_notifier(&hw_breakpoint_exceptions_nb);
|
||||
|
||||
err_alloc:
|
||||
for_each_possible_cpu(err_cpu) {
|
||||
for (i = 0; i < TYPE_MAX; i++)
|
||||
kfree(get_bp_info(err_cpu, i)->tsk_pinned);
|
||||
if (err_cpu == cpu)
|
||||
break;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
198
kernel/events/internal.h
Normal file
198
kernel/events/internal.h
Normal file
|
@ -0,0 +1,198 @@
|
|||
#ifndef _KERNEL_EVENTS_INTERNAL_H
|
||||
#define _KERNEL_EVENTS_INTERNAL_H
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/* Buffer handling */
|
||||
|
||||
#define RING_BUFFER_WRITABLE 0x01
|
||||
|
||||
struct ring_buffer {
|
||||
atomic_t refcount;
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
struct work_struct work;
|
||||
int page_order; /* allocation order */
|
||||
#endif
|
||||
int nr_pages; /* nr of data pages */
|
||||
int overwrite; /* can overwrite itself */
|
||||
|
||||
atomic_t poll; /* POLL_ for wakeups */
|
||||
|
||||
local_t head; /* write position */
|
||||
local_t nest; /* nested writers */
|
||||
local_t events; /* event limit */
|
||||
local_t wakeup; /* wakeup stamp */
|
||||
local_t lost; /* nr records lost */
|
||||
|
||||
long watermark; /* wakeup watermark */
|
||||
/* poll crap */
|
||||
spinlock_t event_lock;
|
||||
struct list_head event_list;
|
||||
|
||||
atomic_t mmap_count;
|
||||
unsigned long mmap_locked;
|
||||
struct user_struct *mmap_user;
|
||||
|
||||
struct perf_event_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
};
|
||||
|
||||
extern void rb_free(struct ring_buffer *rb);
|
||||
extern struct ring_buffer *
|
||||
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
|
||||
extern void perf_event_wakeup(struct perf_event *event);
|
||||
|
||||
extern void
|
||||
perf_event_header__init_id(struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event);
|
||||
extern void
|
||||
perf_event__output_id_sample(struct perf_event *event,
|
||||
struct perf_output_handle *handle,
|
||||
struct perf_sample_data *sample);
|
||||
|
||||
extern struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
|
||||
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
/*
|
||||
* Back perf_mmap() with vmalloc memory.
|
||||
*
|
||||
* Required for architectures that have d-cache aliasing issues.
|
||||
*/
|
||||
|
||||
static inline int page_order(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->page_order;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int page_order(struct ring_buffer *rb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long perf_data_size(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
|
||||
}
|
||||
|
||||
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
|
||||
static inline unsigned long \
|
||||
func_name(struct perf_output_handle *handle, \
|
||||
const void *buf, unsigned long len) \
|
||||
{ \
|
||||
unsigned long size, written; \
|
||||
\
|
||||
do { \
|
||||
size = min(handle->size, len); \
|
||||
written = memcpy_func(handle->addr, buf, size); \
|
||||
written = size - written; \
|
||||
\
|
||||
len -= written; \
|
||||
handle->addr += written; \
|
||||
buf += written; \
|
||||
handle->size -= written; \
|
||||
if (!handle->size) { \
|
||||
struct ring_buffer *rb = handle->rb; \
|
||||
\
|
||||
handle->page++; \
|
||||
handle->page &= rb->nr_pages - 1; \
|
||||
handle->addr = rb->data_pages[handle->page]; \
|
||||
handle->size = PAGE_SIZE << page_order(rb); \
|
||||
} \
|
||||
} while (len && written == size); \
|
||||
\
|
||||
return len; \
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
memcpy_common(void *dst, const void *src, unsigned long n)
|
||||
{
|
||||
memcpy(dst, src, n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
|
||||
|
||||
static inline unsigned long
|
||||
memcpy_skip(void *dst, const void *src, unsigned long n)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
|
||||
|
||||
#ifndef arch_perf_out_copy_user
|
||||
#define arch_perf_out_copy_user arch_perf_out_copy_user
|
||||
|
||||
static inline unsigned long
|
||||
arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
pagefault_disable();
|
||||
ret = __copy_from_user_inatomic(dst, src, n);
|
||||
pagefault_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
|
||||
|
||||
/* Callchain handling */
|
||||
extern struct perf_callchain_entry *
|
||||
perf_callchain(struct perf_event *event, struct pt_regs *regs);
|
||||
extern int get_callchain_buffers(void);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
static inline int get_recursion_context(int *recursion)
|
||||
{
|
||||
int rctx;
|
||||
|
||||
if (in_nmi())
|
||||
rctx = 3;
|
||||
else if (in_irq())
|
||||
rctx = 2;
|
||||
else if (in_softirq())
|
||||
rctx = 1;
|
||||
else
|
||||
rctx = 0;
|
||||
|
||||
if (recursion[rctx])
|
||||
return -1;
|
||||
|
||||
recursion[rctx]++;
|
||||
barrier();
|
||||
|
||||
return rctx;
|
||||
}
|
||||
|
||||
static inline void put_recursion_context(int *recursion, int rctx)
|
||||
{
|
||||
barrier();
|
||||
recursion[rctx]--;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
|
||||
static inline bool arch_perf_have_user_stack_dump(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
|
||||
#else
|
||||
static inline bool arch_perf_have_user_stack_dump(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define perf_user_stack_pointer(regs) 0
|
||||
#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
|
||||
|
||||
#endif /* _KERNEL_EVENTS_INTERNAL_H */
|
417
kernel/events/ring_buffer.c
Normal file
417
kernel/events/ring_buffer.c
Normal file
|
@ -0,0 +1,417 @@
|
|||
/*
|
||||
* Performance events ring-buffer code:
|
||||
*
|
||||
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/circ_buf.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static void perf_output_wakeup(struct perf_output_handle *handle)
|
||||
{
|
||||
atomic_set(&handle->rb->poll, POLL_IN);
|
||||
|
||||
handle->event->pending_wakeup = 1;
|
||||
irq_work_queue(&handle->event->pending);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to ensure a later event_id doesn't publish a head when a former
|
||||
* event isn't done writing. However since we need to deal with NMIs we
|
||||
* cannot fully serialize things.
|
||||
*
|
||||
* We only publish the head (and generate a wakeup) when the outer-most
|
||||
* event completes.
|
||||
*/
|
||||
static void perf_output_get_handle(struct perf_output_handle *handle)
|
||||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
|
||||
preempt_disable();
|
||||
local_inc(&rb->nest);
|
||||
handle->wakeup = local_read(&rb->wakeup);
|
||||
}
|
||||
|
||||
static void perf_output_put_handle(struct perf_output_handle *handle)
|
||||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
unsigned long head;
|
||||
|
||||
again:
|
||||
head = local_read(&rb->head);
|
||||
|
||||
/*
|
||||
* IRQ/NMI can happen here, which means we can miss a head update.
|
||||
*/
|
||||
|
||||
if (!local_dec_and_test(&rb->nest))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Since the mmap() consumer (userspace) can run on a different CPU:
|
||||
*
|
||||
* kernel user
|
||||
*
|
||||
* if (LOAD ->data_tail) { LOAD ->data_head
|
||||
* (A) smp_rmb() (C)
|
||||
* STORE $data LOAD $data
|
||||
* smp_wmb() (B) smp_mb() (D)
|
||||
* STORE ->data_head STORE ->data_tail
|
||||
* }
|
||||
*
|
||||
* Where A pairs with D, and B pairs with C.
|
||||
*
|
||||
* In our case (A) is a control dependency that separates the load of
|
||||
* the ->data_tail and the stores of $data. In case ->data_tail
|
||||
* indicates there is no room in the buffer to store $data we do not.
|
||||
*
|
||||
* D needs to be a full barrier since it separates the data READ
|
||||
* from the tail WRITE.
|
||||
*
|
||||
* For B a WMB is sufficient since it separates two WRITEs, and for C
|
||||
* an RMB is sufficient since it separates two READs.
|
||||
*
|
||||
* See perf_output_begin().
|
||||
*/
|
||||
smp_wmb(); /* B, matches C */
|
||||
rb->user_page->data_head = head;
|
||||
|
||||
/*
|
||||
* Now check if we missed an update -- rely on previous implied
|
||||
* compiler barriers to force a re-read.
|
||||
*/
|
||||
if (unlikely(head != local_read(&rb->head))) {
|
||||
local_inc(&rb->nest);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (handle->wakeup != local_read(&rb->wakeup))
|
||||
perf_output_wakeup(handle);
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long tail, offset, head;
|
||||
int have_lost, page_shift;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 id;
|
||||
u64 lost;
|
||||
} lost_event;
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* For inherited events we send all the output towards the parent.
|
||||
*/
|
||||
if (event->parent)
|
||||
event = event->parent;
|
||||
|
||||
rb = rcu_dereference(event->rb);
|
||||
if (unlikely(!rb))
|
||||
goto out;
|
||||
|
||||
if (unlikely(!rb->nr_pages))
|
||||
goto out;
|
||||
|
||||
handle->rb = rb;
|
||||
handle->event = event;
|
||||
|
||||
have_lost = local_read(&rb->lost);
|
||||
if (unlikely(have_lost)) {
|
||||
size += sizeof(lost_event);
|
||||
if (event->attr.sample_id_all)
|
||||
size += event->id_header_size;
|
||||
}
|
||||
|
||||
perf_output_get_handle(handle);
|
||||
|
||||
do {
|
||||
tail = ACCESS_ONCE(rb->user_page->data_tail);
|
||||
offset = head = local_read(&rb->head);
|
||||
if (!rb->overwrite &&
|
||||
unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* The above forms a control dependency barrier separating the
|
||||
* @tail load above from the data stores below. Since the @tail
|
||||
* load is required to compute the branch to fail below.
|
||||
*
|
||||
* A, matches D; the full memory barrier userspace SHOULD issue
|
||||
* after reading the data and before storing the new tail
|
||||
* position.
|
||||
*
|
||||
* See perf_output_put_handle().
|
||||
*/
|
||||
|
||||
head += size;
|
||||
} while (local_cmpxchg(&rb->head, offset, head) != offset);
|
||||
|
||||
/*
|
||||
* We rely on the implied barrier() by local_cmpxchg() to ensure
|
||||
* none of the data stores below can be lifted up by the compiler.
|
||||
*/
|
||||
|
||||
if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
|
||||
local_add(rb->watermark, &rb->wakeup);
|
||||
|
||||
page_shift = PAGE_SHIFT + page_order(rb);
|
||||
|
||||
handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
|
||||
offset &= (1UL << page_shift) - 1;
|
||||
handle->addr = rb->data_pages[handle->page] + offset;
|
||||
handle->size = (1UL << page_shift) - offset;
|
||||
|
||||
if (unlikely(have_lost)) {
|
||||
struct perf_sample_data sample_data;
|
||||
|
||||
lost_event.header.size = sizeof(lost_event);
|
||||
lost_event.header.type = PERF_RECORD_LOST;
|
||||
lost_event.header.misc = 0;
|
||||
lost_event.id = event->id;
|
||||
lost_event.lost = local_xchg(&rb->lost, 0);
|
||||
|
||||
perf_event_header__init_id(&lost_event.header,
|
||||
&sample_data, event);
|
||||
perf_output_put(handle, lost_event);
|
||||
perf_event__output_id_sample(event, handle, &sample_data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
local_inc(&rb->lost);
|
||||
perf_output_put_handle(handle);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
unsigned int perf_output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len)
|
||||
{
|
||||
return __output_copy(handle, buf, len);
|
||||
}
|
||||
|
||||
unsigned int perf_output_skip(struct perf_output_handle *handle,
|
||||
unsigned int len)
|
||||
{
|
||||
return __output_skip(handle, NULL, len);
|
||||
}
|
||||
|
||||
void perf_output_end(struct perf_output_handle *handle)
|
||||
{
|
||||
perf_output_put_handle(handle);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void
|
||||
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
||||
{
|
||||
long max_size = perf_data_size(rb);
|
||||
|
||||
if (watermark)
|
||||
rb->watermark = min(max_size, watermark);
|
||||
|
||||
if (!rb->watermark)
|
||||
rb->watermark = max_size / 2;
|
||||
|
||||
if (flags & RING_BUFFER_WRITABLE)
|
||||
rb->overwrite = 0;
|
||||
else
|
||||
rb->overwrite = 1;
|
||||
|
||||
atomic_set(&rb->refcount, 1);
|
||||
|
||||
INIT_LIST_HEAD(&rb->event_list);
|
||||
spin_lock_init(&rb->event_lock);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PERF_USE_VMALLOC
|
||||
|
||||
/*
|
||||
* Back perf_mmap() with regular GFP_KERNEL-0 pages.
|
||||
*/
|
||||
|
||||
struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
|
||||
{
|
||||
if (pgoff > rb->nr_pages)
|
||||
return NULL;
|
||||
|
||||
if (pgoff == 0)
|
||||
return virt_to_page(rb->user_page);
|
||||
|
||||
return virt_to_page(rb->data_pages[pgoff - 1]);
|
||||
}
|
||||
|
||||
static void *perf_mmap_alloc_page(int cpu)
|
||||
{
|
||||
struct page *page;
|
||||
int node;
|
||||
|
||||
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
|
||||
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long size;
|
||||
int i;
|
||||
|
||||
size = sizeof(struct ring_buffer);
|
||||
size += nr_pages * sizeof(void *);
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto fail;
|
||||
|
||||
rb->user_page = perf_mmap_alloc_page(cpu);
|
||||
if (!rb->user_page)
|
||||
goto fail_user_page;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
rb->data_pages[i] = perf_mmap_alloc_page(cpu);
|
||||
if (!rb->data_pages[i])
|
||||
goto fail_data_pages;
|
||||
}
|
||||
|
||||
rb->nr_pages = nr_pages;
|
||||
|
||||
ring_buffer_init(rb, watermark, flags);
|
||||
|
||||
return rb;
|
||||
|
||||
fail_data_pages:
|
||||
for (i--; i >= 0; i--)
|
||||
free_page((unsigned long)rb->data_pages[i]);
|
||||
|
||||
free_page((unsigned long)rb->user_page);
|
||||
|
||||
fail_user_page:
|
||||
kfree(rb);
|
||||
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void perf_mmap_free_page(unsigned long addr)
|
||||
{
|
||||
struct page *page = virt_to_page((void *)addr);
|
||||
|
||||
page->mapping = NULL;
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
void rb_free(struct ring_buffer *rb)
|
||||
{
|
||||
int i;
|
||||
|
||||
perf_mmap_free_page((unsigned long)rb->user_page);
|
||||
for (i = 0; i < rb->nr_pages; i++)
|
||||
perf_mmap_free_page((unsigned long)rb->data_pages[i]);
|
||||
kfree(rb);
|
||||
}
|
||||
|
||||
#else
|
||||
static int data_page_nr(struct ring_buffer *rb)
|
||||
{
|
||||
return rb->nr_pages << page_order(rb);
|
||||
}
|
||||
|
||||
struct page *
|
||||
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
|
||||
{
|
||||
/* The '>' counts in the user page. */
|
||||
if (pgoff > data_page_nr(rb))
|
||||
return NULL;
|
||||
|
||||
return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void perf_mmap_unmark_page(void *addr)
|
||||
{
|
||||
struct page *page = vmalloc_to_page(addr);
|
||||
|
||||
page->mapping = NULL;
|
||||
}
|
||||
|
||||
static void rb_free_work(struct work_struct *work)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
void *base;
|
||||
int i, nr;
|
||||
|
||||
rb = container_of(work, struct ring_buffer, work);
|
||||
nr = data_page_nr(rb);
|
||||
|
||||
base = rb->user_page;
|
||||
/* The '<=' counts in the user page. */
|
||||
for (i = 0; i <= nr; i++)
|
||||
perf_mmap_unmark_page(base + (i * PAGE_SIZE));
|
||||
|
||||
vfree(base);
|
||||
kfree(rb);
|
||||
}
|
||||
|
||||
void rb_free(struct ring_buffer *rb)
|
||||
{
|
||||
schedule_work(&rb->work);
|
||||
}
|
||||
|
||||
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
unsigned long size;
|
||||
void *all_buf;
|
||||
|
||||
size = sizeof(struct ring_buffer);
|
||||
size += sizeof(void *);
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto fail;
|
||||
|
||||
INIT_WORK(&rb->work, rb_free_work);
|
||||
|
||||
all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
|
||||
if (!all_buf)
|
||||
goto fail_all_buf;
|
||||
|
||||
rb->user_page = all_buf;
|
||||
rb->data_pages[0] = all_buf + PAGE_SIZE;
|
||||
rb->page_order = ilog2(nr_pages);
|
||||
rb->nr_pages = !!nr_pages;
|
||||
|
||||
ring_buffer_init(rb, watermark, flags);
|
||||
|
||||
return rb;
|
||||
|
||||
fail_all_buf:
|
||||
kfree(rb);
|
||||
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
1993
kernel/events/uprobes.c
Normal file
1993
kernel/events/uprobes.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue