mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
17
arch/powerpc/perf/Makefile
Normal file
17
arch/powerpc/perf/Makefile
Normal file
|
@ -0,0 +1,17 @@
|
|||
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += callchain.o
|
||||
|
||||
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
|
||||
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
|
||||
power5+-pmu.o power6-pmu.o power7-pmu.o \
|
||||
power8-pmu.o
|
||||
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
|
||||
|
||||
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
|
||||
obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
|
||||
|
||||
obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o
|
||||
|
||||
obj-$(CONFIG_PPC64) += $(obj64-y)
|
||||
obj-$(CONFIG_PPC32) += $(obj32-y)
|
44
arch/powerpc/perf/bhrb.S
Normal file
44
arch/powerpc/perf/bhrb.S
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Basic assembly code to read BHRB entries
|
||||
*
|
||||
* Copyright 2013 Anshuman Khandual, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
.text
|
||||
|
||||
.balign 8
|
||||
|
||||
/* r3 = n (where n = [0-31])
|
||||
* The maximum number of BHRB entries supported with PPC_MFBHRBE instruction
|
||||
* is 1024. We have limited number of table entries here as POWER8 implements
|
||||
* 32 BHRB entries.
|
||||
*/
|
||||
|
||||
/* .global read_bhrb */
|
||||
_GLOBAL(read_bhrb)
|
||||
cmpldi r3,31
|
||||
bgt 1f
|
||||
ld r4,bhrb_table@got(r2)
|
||||
sldi r3,r3,3
|
||||
add r3,r4,r3
|
||||
mtctr r3
|
||||
bctr
|
||||
1: li r3,0
|
||||
blr
|
||||
|
||||
#define MFBHRB_TABLE1(n) PPC_MFBHRBE(R3,n); blr
|
||||
#define MFBHRB_TABLE2(n) MFBHRB_TABLE1(n); MFBHRB_TABLE1(n+1)
|
||||
#define MFBHRB_TABLE4(n) MFBHRB_TABLE2(n); MFBHRB_TABLE2(n+2)
|
||||
#define MFBHRB_TABLE8(n) MFBHRB_TABLE4(n); MFBHRB_TABLE4(n+4)
|
||||
#define MFBHRB_TABLE16(n) MFBHRB_TABLE8(n); MFBHRB_TABLE8(n+8)
|
||||
#define MFBHRB_TABLE32(n) MFBHRB_TABLE16(n); MFBHRB_TABLE16(n+16)
|
||||
|
||||
bhrb_table:
|
||||
MFBHRB_TABLE32(0)
|
492
arch/powerpc/perf/callchain.c
Normal file
492
arch/powerpc/perf/callchain.c
Normal file
|
@ -0,0 +1,492 @@
|
|||
/*
|
||||
* Performance counter callchain support - powerpc architecture code
|
||||
*
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#ifdef CONFIG_PPC64
|
||||
#include "../kernel/ppc32.h"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Is sp valid as the address of the next kernel stack frame after prev_sp?
|
||||
* The next frame may be in a different stack area but should not go
|
||||
* back down in the same stack area.
|
||||
*/
|
||||
static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
||||
{
|
||||
if (sp & 0xf)
|
||||
return 0; /* must be 16-byte aligned */
|
||||
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
||||
return 0;
|
||||
if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
|
||||
return 1;
|
||||
/*
|
||||
* sp could decrease when we jump off an interrupt stack
|
||||
* back to the regular process stack.
|
||||
*/
|
||||
if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
unsigned long next_ip;
|
||||
unsigned long lr;
|
||||
long level = 0;
|
||||
unsigned long *fp;
|
||||
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, perf_instruction_pointer(regs));
|
||||
|
||||
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
fp = (unsigned long *) sp;
|
||||
next_sp = fp[0];
|
||||
|
||||
if (next_sp == sp + STACK_INT_FRAME_SIZE &&
|
||||
fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
|
||||
/*
|
||||
* This looks like an interrupt frame for an
|
||||
* interrupt that occurred in the kernel
|
||||
*/
|
||||
regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
|
||||
next_ip = regs->nip;
|
||||
lr = regs->link;
|
||||
level = 0;
|
||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||
|
||||
} else {
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
else
|
||||
next_ip = fp[STACK_FRAME_LR_SAVE];
|
||||
|
||||
/*
|
||||
* We can't tell which of the first two addresses
|
||||
* we get are valid, but we can filter out the
|
||||
* obviously bogus ones here. We replace them
|
||||
* with 0 rather than removing them entirely so
|
||||
* that userspace can tell which is which.
|
||||
*/
|
||||
if ((level == 1 && next_ip == lr) ||
|
||||
(level <= 1 && !kernel_text_address(next_ip)))
|
||||
next_ip = 0;
|
||||
|
||||
++level;
|
||||
}
|
||||
|
||||
perf_callchain_store(entry, next_ip);
|
||||
if (!valid_next_sp(next_sp, sp))
|
||||
return;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* On 64-bit we don't want to invoke hash_page on user addresses from
|
||||
* interrupt context, so if the access faults, we read the page tables
|
||||
* to find which page (if any) is mapped and access it directly.
|
||||
*/
|
||||
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
|
||||
{
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep, pte;
|
||||
unsigned shift;
|
||||
unsigned long addr = (unsigned long) ptr;
|
||||
unsigned long offset;
|
||||
unsigned long pfn;
|
||||
void *kaddr;
|
||||
|
||||
pgdir = current->mm->pgd;
|
||||
if (!pgdir)
|
||||
return -EFAULT;
|
||||
|
||||
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
|
||||
if (!shift)
|
||||
shift = PAGE_SHIFT;
|
||||
|
||||
/* align address to page boundary */
|
||||
offset = addr & ((1UL << shift) - 1);
|
||||
addr -= offset;
|
||||
|
||||
if (ptep == NULL)
|
||||
return -EFAULT;
|
||||
pte = *ptep;
|
||||
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
|
||||
return -EFAULT;
|
||||
pfn = pte_pfn(pte);
|
||||
if (!page_is_ram(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
/* no highmem to worry about here */
|
||||
kaddr = pfn_to_kaddr(pfn);
|
||||
memcpy(ret, kaddr + offset, nb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
|
||||
((unsigned long)ptr & 7))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
if (!__get_user_inatomic(*ret, ptr)) {
|
||||
pagefault_enable();
|
||||
return 0;
|
||||
}
|
||||
pagefault_enable();
|
||||
|
||||
return read_user_stack_slow(ptr, ret, 8);
|
||||
}
|
||||
|
||||
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||
((unsigned long)ptr & 3))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
if (!__get_user_inatomic(*ret, ptr)) {
|
||||
pagefault_enable();
|
||||
return 0;
|
||||
}
|
||||
pagefault_enable();
|
||||
|
||||
return read_user_stack_slow(ptr, ret, 4);
|
||||
}
|
||||
|
||||
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||
{
|
||||
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit user processes use the same stack frame for RT and non-RT signals.
|
||||
*/
|
||||
struct signal_frame_64 {
|
||||
char dummy[__SIGNAL_FRAMESIZE];
|
||||
struct ucontext uc;
|
||||
unsigned long unused[2];
|
||||
unsigned int tramp[6];
|
||||
struct siginfo *pinfo;
|
||||
void *puc;
|
||||
struct siginfo info;
|
||||
char abigap[288];
|
||||
};
|
||||
|
||||
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_64, tramp))
|
||||
return 1;
|
||||
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do some sanity checking on the signal frame pointed to by sp.
|
||||
* We check the pinfo and puc pointers in the frame.
|
||||
*/
|
||||
static int sane_signal_64_frame(unsigned long sp)
|
||||
{
|
||||
struct signal_frame_64 __user *sf;
|
||||
unsigned long pinfo, puc;
|
||||
|
||||
sf = (struct signal_frame_64 __user *) sp;
|
||||
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
|
||||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
|
||||
return 0;
|
||||
return pinfo == (unsigned long) &sf->info &&
|
||||
puc == (unsigned long) &sf->uc;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
unsigned long next_ip;
|
||||
unsigned long lr;
|
||||
long level = 0;
|
||||
struct signal_frame_64 __user *sigframe;
|
||||
unsigned long __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
fp = (unsigned long __user *) sp;
|
||||
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, which can happen when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
|
||||
(is_sigreturn_64_address(next_ip, sp) ||
|
||||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
|
||||
sane_signal_64_frame(sp)) {
|
||||
/*
|
||||
* This looks like an signal frame
|
||||
*/
|
||||
sigframe = (struct signal_frame_64 __user *) sp;
|
||||
uregs = sigframe->uc.uc_mcontext.gp_regs;
|
||||
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_64(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int current_is_64bit(void)
|
||||
{
|
||||
/*
|
||||
* We can't use test_thread_flag() here because we may be on an
|
||||
* interrupt stack, and the thread flags don't get copied over
|
||||
* from the thread_info on the main stack to the interrupt stack.
|
||||
*/
|
||||
return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
|
||||
}
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
/*
|
||||
* On 32-bit we just access the address and let hash_page create a
|
||||
* HPTE if necessary, so there is no need to fall back to reading
|
||||
* the page tables. Since this is called at interrupt level,
|
||||
* do_page_fault() won't treat a DSI as a page fault.
|
||||
*/
|
||||
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||
((unsigned long)ptr & 3))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
rc = __get_user_inatomic(*ret, ptr);
|
||||
pagefault_enable();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int current_is_64bit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||
{
|
||||
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
|
||||
#define sigcontext32 sigcontext
|
||||
#define mcontext32 mcontext
|
||||
#define ucontext32 ucontext
|
||||
#define compat_siginfo_t struct siginfo
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/*
|
||||
* Layout for non-RT signal frames
|
||||
*/
|
||||
struct signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32];
|
||||
struct sigcontext32 sctx;
|
||||
struct mcontext32 mctx;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
/*
|
||||
* Layout for RT signal frames
|
||||
*/
|
||||
struct rt_signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32 + 16];
|
||||
compat_siginfo_t info;
|
||||
struct ucontext32 uc;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct rt_signal_frame_32,
|
||||
uc.uc_mcontext.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sane_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->mctx;
|
||||
}
|
||||
|
||||
static int sane_rt_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct rt_signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
||||
unsigned int next_sp, unsigned int next_ip)
|
||||
{
|
||||
struct mcontext32 __user *mctx = NULL;
|
||||
struct signal_frame_32 __user *sf;
|
||||
struct rt_signal_frame_32 __user *rt_sf;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, for example, when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
|
||||
is_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_signal_32_frame(sp)) {
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &sf->mctx;
|
||||
}
|
||||
|
||||
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
|
||||
is_rt_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_rt_signal_32_frame(sp)) {
|
||||
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &rt_sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
if (!mctx)
|
||||
return NULL;
|
||||
return mctx->mc_gregs;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned int sp, next_sp;
|
||||
unsigned int next_ip;
|
||||
unsigned int lr;
|
||||
long level = 0;
|
||||
unsigned int __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
fp = (unsigned int __user *) (unsigned long) sp;
|
||||
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
|
||||
return;
|
||||
|
||||
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
|
||||
if (!uregs && level <= 1)
|
||||
uregs = signal_frame_32_regs(sp, next_sp, lr);
|
||||
if (uregs) {
|
||||
/*
|
||||
* This looks like an signal frame, so restart
|
||||
* the stack trace with the values in it.
|
||||
*/
|
||||
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_32(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
if (current_is_64bit())
|
||||
perf_callchain_user_64(entry, regs);
|
||||
else
|
||||
perf_callchain_user_32(entry, regs);
|
||||
}
|
2171
arch/powerpc/perf/core-book3s.c
Normal file
2171
arch/powerpc/perf/core-book3s.c
Normal file
File diff suppressed because it is too large
Load diff
717
arch/powerpc/perf/core-fsl-emb.c
Normal file
717
arch/powerpc/perf/core-fsl-emb.c
Normal file
|
@ -0,0 +1,717 @@
|
|||
/*
|
||||
* Performance event support - Freescale Embedded Performance Monitor
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
* Copyright 2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <asm/reg_fsl_emb.h>
|
||||
#include <asm/pmc.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
struct cpu_hw_events {
|
||||
int n_events;
|
||||
int disabled;
|
||||
u8 pmcs_enabled;
|
||||
struct perf_event *event[MAX_HWEVENTS];
|
||||
};
|
||||
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||
|
||||
static struct fsl_emb_pmu *ppmu;
|
||||
|
||||
/* Number of perf_events counting hardware events */
|
||||
static atomic_t num_events;
|
||||
/* Used to avoid races in calling reserve/release_pmc_hardware */
|
||||
static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
/*
|
||||
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
|
||||
* it as an NMI.
|
||||
*/
|
||||
static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef __powerpc64__
|
||||
return !regs->softe;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void perf_event_interrupt(struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Read one performance monitor counter (PMC).
|
||||
*/
|
||||
static unsigned long read_pmc(int idx)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
switch (idx) {
|
||||
case 0:
|
||||
val = mfpmr(PMRN_PMC0);
|
||||
break;
|
||||
case 1:
|
||||
val = mfpmr(PMRN_PMC1);
|
||||
break;
|
||||
case 2:
|
||||
val = mfpmr(PMRN_PMC2);
|
||||
break;
|
||||
case 3:
|
||||
val = mfpmr(PMRN_PMC3);
|
||||
break;
|
||||
case 4:
|
||||
val = mfpmr(PMRN_PMC4);
|
||||
break;
|
||||
case 5:
|
||||
val = mfpmr(PMRN_PMC5);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
|
||||
val = 0;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write one PMC.
|
||||
*/
|
||||
static void write_pmc(int idx, unsigned long val)
|
||||
{
|
||||
switch (idx) {
|
||||
case 0:
|
||||
mtpmr(PMRN_PMC0, val);
|
||||
break;
|
||||
case 1:
|
||||
mtpmr(PMRN_PMC1, val);
|
||||
break;
|
||||
case 2:
|
||||
mtpmr(PMRN_PMC2, val);
|
||||
break;
|
||||
case 3:
|
||||
mtpmr(PMRN_PMC3, val);
|
||||
break;
|
||||
case 4:
|
||||
mtpmr(PMRN_PMC4, val);
|
||||
break;
|
||||
case 5:
|
||||
mtpmr(PMRN_PMC5, val);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
|
||||
}
|
||||
|
||||
isync();
|
||||
}
|
||||
|
||||
/*
|
||||
* Write one local control A register
|
||||
*/
|
||||
static void write_pmlca(int idx, unsigned long val)
|
||||
{
|
||||
switch (idx) {
|
||||
case 0:
|
||||
mtpmr(PMRN_PMLCA0, val);
|
||||
break;
|
||||
case 1:
|
||||
mtpmr(PMRN_PMLCA1, val);
|
||||
break;
|
||||
case 2:
|
||||
mtpmr(PMRN_PMLCA2, val);
|
||||
break;
|
||||
case 3:
|
||||
mtpmr(PMRN_PMLCA3, val);
|
||||
break;
|
||||
case 4:
|
||||
mtpmr(PMRN_PMLCA4, val);
|
||||
break;
|
||||
case 5:
|
||||
mtpmr(PMRN_PMLCA5, val);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
|
||||
}
|
||||
|
||||
isync();
|
||||
}
|
||||
|
||||
/*
|
||||
* Write one local control B register
|
||||
*/
|
||||
static void write_pmlcb(int idx, unsigned long val)
|
||||
{
|
||||
switch (idx) {
|
||||
case 0:
|
||||
mtpmr(PMRN_PMLCB0, val);
|
||||
break;
|
||||
case 1:
|
||||
mtpmr(PMRN_PMLCB1, val);
|
||||
break;
|
||||
case 2:
|
||||
mtpmr(PMRN_PMLCB2, val);
|
||||
break;
|
||||
case 3:
|
||||
mtpmr(PMRN_PMLCB3, val);
|
||||
break;
|
||||
case 4:
|
||||
mtpmr(PMRN_PMLCB4, val);
|
||||
break;
|
||||
case 5:
|
||||
mtpmr(PMRN_PMLCB5, val);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
|
||||
}
|
||||
|
||||
isync();
|
||||
}
|
||||
|
||||
static void fsl_emb_pmu_read(struct perf_event *event)
|
||||
{
|
||||
s64 val, delta, prev;
|
||||
|
||||
if (event->hw.state & PERF_HES_STOPPED)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Performance monitor interrupts come even when interrupts
|
||||
* are soft-disabled, as long as interrupts are hard-enabled.
|
||||
* Therefore we treat them like NMIs.
|
||||
*/
|
||||
do {
|
||||
prev = local64_read(&event->hw.prev_count);
|
||||
barrier();
|
||||
val = read_pmc(event->hw.idx);
|
||||
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
||||
|
||||
/* The counters are only 32 bits wide */
|
||||
delta = (val - prev) & 0xfffffffful;
|
||||
local64_add(delta, &event->count);
|
||||
local64_sub(delta, &event->hw.period_left);
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable all events to prevent PMU interrupts and to allow
|
||||
* events to be added or removed.
|
||||
*/
|
||||
static void fsl_emb_pmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!cpuhw->disabled) {
|
||||
cpuhw->disabled = 1;
|
||||
|
||||
/*
|
||||
* Check if we ever enabled the PMU on this cpu.
|
||||
*/
|
||||
if (!cpuhw->pmcs_enabled) {
|
||||
ppc_enable_pmcs();
|
||||
cpuhw->pmcs_enabled = 1;
|
||||
}
|
||||
|
||||
if (atomic_read(&num_events)) {
|
||||
/*
|
||||
* Set the 'freeze all counters' bit, and disable
|
||||
* interrupts. The barrier is to make sure the
|
||||
* mtpmr has been executed and the PMU has frozen
|
||||
* the events before we return.
|
||||
*/
|
||||
|
||||
mtpmr(PMRN_PMGC0, PMGC0_FAC);
|
||||
isync();
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-enable all events if disable == 0.
|
||||
* If we were previously disabled and events were added, then
|
||||
* put the new config on the PMU.
|
||||
*/
|
||||
static void fsl_emb_pmu_enable(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
if (!cpuhw->disabled)
|
||||
goto out;
|
||||
|
||||
cpuhw->disabled = 0;
|
||||
ppc_set_pmu_inuse(cpuhw->n_events != 0);
|
||||
|
||||
if (cpuhw->n_events > 0) {
|
||||
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
|
||||
isync();
|
||||
}
|
||||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int collect_events(struct perf_event *group, int max_count,
|
||||
struct perf_event *ctrs[])
|
||||
{
|
||||
int n = 0;
|
||||
struct perf_event *event;
|
||||
|
||||
if (!is_software_event(group)) {
|
||||
if (n >= max_count)
|
||||
return -1;
|
||||
ctrs[n] = group;
|
||||
n++;
|
||||
}
|
||||
list_for_each_entry(event, &group->sibling_list, group_entry) {
|
||||
if (!is_software_event(event) &&
|
||||
event->state != PERF_EVENT_STATE_OFF) {
|
||||
if (n >= max_count)
|
||||
return -1;
|
||||
ctrs[n] = event;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/* context locked on entry */
|
||||
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
int ret = -EAGAIN;
|
||||
int num_counters = ppmu->n_counter;
|
||||
u64 val;
|
||||
int i;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
cpuhw = &get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
|
||||
num_counters = ppmu->n_restricted;
|
||||
|
||||
/*
|
||||
* Allocate counters from top-down, so that restricted-capable
|
||||
* counters are kept free as long as possible.
|
||||
*/
|
||||
for (i = num_counters - 1; i >= 0; i--) {
|
||||
if (cpuhw->event[i])
|
||||
continue;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < 0)
|
||||
goto out;
|
||||
|
||||
event->hw.idx = i;
|
||||
cpuhw->event[i] = event;
|
||||
++cpuhw->n_events;
|
||||
|
||||
val = 0;
|
||||
if (event->hw.sample_period) {
|
||||
s64 left = local64_read(&event->hw.period_left);
|
||||
if (left < 0x80000000L)
|
||||
val = 0x80000000L - left;
|
||||
}
|
||||
local64_set(&event->hw.prev_count, val);
|
||||
|
||||
if (!(flags & PERF_EF_START)) {
|
||||
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
val = 0;
|
||||
}
|
||||
|
||||
write_pmc(i, val);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
write_pmlcb(i, event->hw.config >> 32);
|
||||
write_pmlca(i, event->hw.config_base);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
put_cpu_var(cpu_hw_events);
|
||||
perf_pmu_enable(event->pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* context locked on entry */
|
||||
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
int i = event->hw.idx;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
if (i < 0)
|
||||
goto out;
|
||||
|
||||
fsl_emb_pmu_read(event);
|
||||
|
||||
cpuhw = &get_cpu_var(cpu_hw_events);
|
||||
|
||||
WARN_ON(event != cpuhw->event[event->hw.idx]);
|
||||
|
||||
write_pmlca(i, 0);
|
||||
write_pmlcb(i, 0);
|
||||
write_pmc(i, 0);
|
||||
|
||||
cpuhw->event[i] = NULL;
|
||||
event->hw.idx = -1;
|
||||
|
||||
/*
|
||||
* TODO: if at least one restricted event exists, and we
|
||||
* just freed up a non-restricted-capable counter, and
|
||||
* there is a restricted-capable counter occupied by
|
||||
* a non-restricted event, migrate that event to the
|
||||
* vacated counter.
|
||||
*/
|
||||
|
||||
cpuhw->n_events--;
|
||||
|
||||
out:
|
||||
perf_pmu_enable(event->pmu);
|
||||
put_cpu_var(cpu_hw_events);
|
||||
}
|
||||
|
||||
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
s64 left;
|
||||
|
||||
if (event->hw.idx < 0 || !event->hw.sample_period)
|
||||
return;
|
||||
|
||||
if (!(event->hw.state & PERF_HES_STOPPED))
|
||||
return;
|
||||
|
||||
if (ef_flags & PERF_EF_RELOAD)
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
|
||||
local_irq_save(flags);
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
event->hw.state = 0;
|
||||
left = local64_read(&event->hw.period_left);
|
||||
write_pmc(event->hw.idx, left);
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
perf_pmu_enable(event->pmu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (event->hw.idx < 0 || !event->hw.sample_period)
|
||||
return;
|
||||
|
||||
if (event->hw.state & PERF_HES_STOPPED)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
fsl_emb_pmu_read(event);
|
||||
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
write_pmc(event->hw.idx, 0);
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
perf_pmu_enable(event->pmu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the PMU if this is the last perf_event.
|
||||
*/
|
||||
static void hw_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
if (!atomic_add_unless(&num_events, -1, 1)) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (atomic_dec_return(&num_events) == 0)
|
||||
release_pmc_hardware();
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate a generic cache event_id config to a raw event_id code.
|
||||
*/
|
||||
static int hw_perf_cache_event(u64 config, u64 *eventp)
|
||||
{
|
||||
unsigned long type, op, result;
|
||||
int ev;
|
||||
|
||||
if (!ppmu->cache_events)
|
||||
return -EINVAL;
|
||||
|
||||
/* unpack config */
|
||||
type = config & 0xff;
|
||||
op = (config >> 8) & 0xff;
|
||||
result = (config >> 16) & 0xff;
|
||||
|
||||
if (type >= PERF_COUNT_HW_CACHE_MAX ||
|
||||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
|
||||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
ev = (*ppmu->cache_events)[type][op][result];
|
||||
if (ev == 0)
|
||||
return -EOPNOTSUPP;
|
||||
if (ev == -1)
|
||||
return -EINVAL;
|
||||
*eventp = ev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fsl_emb_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
u64 ev;
|
||||
struct perf_event *events[MAX_HWEVENTS];
|
||||
int n;
|
||||
int err;
|
||||
int num_restricted;
|
||||
int i;
|
||||
|
||||
if (ppmu->n_counter > MAX_HWEVENTS) {
|
||||
WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
|
||||
ppmu->n_counter, MAX_HWEVENTS);
|
||||
ppmu->n_counter = MAX_HWEVENTS;
|
||||
}
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
ev = event->attr.config;
|
||||
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
|
||||
return -EOPNOTSUPP;
|
||||
ev = ppmu->generic_events[ev];
|
||||
break;
|
||||
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
err = hw_perf_cache_event(event->attr.config, &ev);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
||||
case PERF_TYPE_RAW:
|
||||
ev = event->attr.config;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
event->hw.config = ppmu->xlate_event(ev);
|
||||
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If this is in a group, check if it can go on with all the
|
||||
* other hardware events in the group. We assume the event
|
||||
* hasn't been linked into its leader's sibling list at this point.
|
||||
*/
|
||||
n = 0;
|
||||
if (event->group_leader != event) {
|
||||
n = collect_events(event->group_leader,
|
||||
ppmu->n_counter - 1, events);
|
||||
if (n < 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
|
||||
num_restricted = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
|
||||
num_restricted++;
|
||||
}
|
||||
|
||||
if (num_restricted >= ppmu->n_restricted)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
event->hw.idx = -1;
|
||||
|
||||
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
|
||||
(u32)((ev << 16) & PMLCA_EVENT_MASK);
|
||||
|
||||
if (event->attr.exclude_user)
|
||||
event->hw.config_base |= PMLCA_FCU;
|
||||
if (event->attr.exclude_kernel)
|
||||
event->hw.config_base |= PMLCA_FCS;
|
||||
if (event->attr.exclude_idle)
|
||||
return -ENOTSUPP;
|
||||
|
||||
event->hw.last_period = event->hw.sample_period;
|
||||
local64_set(&event->hw.period_left, event->hw.last_period);
|
||||
|
||||
/*
|
||||
* See if we need to reserve the PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* reserve_pmc_hardware or release_pmc_hardware.
|
||||
*/
|
||||
err = 0;
|
||||
if (!atomic_inc_not_zero(&num_events)) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
if (atomic_read(&num_events) == 0 &&
|
||||
reserve_pmc_hardware(perf_event_interrupt))
|
||||
err = -EBUSY;
|
||||
else
|
||||
atomic_inc(&num_events);
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
|
||||
mtpmr(PMRN_PMGC0, PMGC0_FAC);
|
||||
isync();
|
||||
}
|
||||
event->destroy = hw_perf_event_destroy;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct pmu fsl_emb_pmu = {
|
||||
.pmu_enable = fsl_emb_pmu_enable,
|
||||
.pmu_disable = fsl_emb_pmu_disable,
|
||||
.event_init = fsl_emb_pmu_event_init,
|
||||
.add = fsl_emb_pmu_add,
|
||||
.del = fsl_emb_pmu_del,
|
||||
.start = fsl_emb_pmu_start,
|
||||
.stop = fsl_emb_pmu_stop,
|
||||
.read = fsl_emb_pmu_read,
|
||||
};
|
||||
|
||||
/*
|
||||
* A counter has overflowed; update its count and record
|
||||
* things if requested. Note that interrupts are hard-disabled
|
||||
* here so there is no possibility of being interrupted.
|
||||
*/
|
||||
static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
u64 period = event->hw.sample_period;
|
||||
s64 prev, delta, left;
|
||||
int record = 0;
|
||||
|
||||
if (event->hw.state & PERF_HES_STOPPED) {
|
||||
write_pmc(event->hw.idx, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* we don't have to worry about interrupts here */
|
||||
prev = local64_read(&event->hw.prev_count);
|
||||
delta = (val - prev) & 0xfffffffful;
|
||||
local64_add(delta, &event->count);
|
||||
|
||||
/*
|
||||
* See if the total period for this event has expired,
|
||||
* and update for the next period.
|
||||
*/
|
||||
val = 0;
|
||||
left = local64_read(&event->hw.period_left) - delta;
|
||||
if (period) {
|
||||
if (left <= 0) {
|
||||
left += period;
|
||||
if (left <= 0)
|
||||
left = period;
|
||||
record = 1;
|
||||
event->hw.last_period = event->hw.sample_period;
|
||||
}
|
||||
if (left < 0x80000000LL)
|
||||
val = 0x80000000LL - left;
|
||||
}
|
||||
|
||||
write_pmc(event->hw.idx, val);
|
||||
local64_set(&event->hw.prev_count, val);
|
||||
local64_set(&event->hw.period_left, left);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
/*
|
||||
* Finally record data if requested.
|
||||
*/
|
||||
if (record) {
|
||||
struct perf_sample_data data;
|
||||
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
fsl_emb_pmu_stop(event, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_event_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
struct perf_event *event;
|
||||
unsigned long val;
|
||||
int found = 0;
|
||||
int nmi;
|
||||
|
||||
nmi = perf_intr_is_nmi(regs);
|
||||
if (nmi)
|
||||
nmi_enter();
|
||||
else
|
||||
irq_enter();
|
||||
|
||||
for (i = 0; i < ppmu->n_counter; ++i) {
|
||||
event = cpuhw->event[i];
|
||||
|
||||
val = read_pmc(i);
|
||||
if ((int)val < 0) {
|
||||
if (event) {
|
||||
/* event has overflowed */
|
||||
found = 1;
|
||||
record_and_restart(event, val, regs);
|
||||
} else {
|
||||
/*
|
||||
* Disabled counter is negative,
|
||||
* reset it just in case.
|
||||
*/
|
||||
write_pmc(i, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* PMM will keep counters frozen until we return from the interrupt. */
|
||||
mtmsr(mfmsr() | MSR_PMM);
|
||||
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
|
||||
isync();
|
||||
|
||||
if (nmi)
|
||||
nmi_exit();
|
||||
else
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void hw_perf_event_setup(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
memset(cpuhw, 0, sizeof(*cpuhw));
|
||||
}
|
||||
|
||||
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
|
||||
{
|
||||
if (ppmu)
|
||||
return -EBUSY; /* something's already registered */
|
||||
|
||||
ppmu = pmu;
|
||||
pr_info("%s performance monitor hardware support registered\n",
|
||||
pmu->name);
|
||||
|
||||
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
|
||||
|
||||
return 0;
|
||||
}
|
136
arch/powerpc/perf/e500-pmu.c
Normal file
136
arch/powerpc/perf/e500-pmu.c
Normal file
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Performance counter support for e500 family processors.
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
* Copyright 2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Map of generic hardware event types to hardware events
|
||||
* Zero if unsupported
|
||||
*/
|
||||
static int e500_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 1,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
/*
|
||||
* D-cache misses are not split into read/write/prefetch;
|
||||
* use raw event 41.
|
||||
*/
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 27, 0 },
|
||||
[C(OP_WRITE)] = { 28, 0 },
|
||||
[C(OP_PREFETCH)] = { 29, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 2, 60 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
/*
|
||||
* Assuming LL means L2, it's not a good match for this model.
|
||||
* It allocates only on L1 castout or explicit prefetch, and
|
||||
* does not have separate read/write events (but it does have
|
||||
* separate instruction/data events).
|
||||
*/
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
/*
|
||||
* There are data/instruction MMU misses, but that's a miss on
|
||||
* the chip's internal level-one TLB which is probably not
|
||||
* what the user wants. Instead, unified level-two TLB misses
|
||||
* are reported here.
|
||||
*/
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 26, 66 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 12, 15 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static int num_events = 128;
|
||||
|
||||
/* Upper half of event id is PMLCb, for threshold events */
|
||||
static u64 e500_xlate_event(u64 event_id)
|
||||
{
|
||||
u32 event_low = (u32)event_id;
|
||||
u64 ret;
|
||||
|
||||
if (event_low >= num_events)
|
||||
return 0;
|
||||
|
||||
ret = FSL_EMB_EVENT_VALID;
|
||||
|
||||
if (event_low >= 76 && event_low <= 81) {
|
||||
ret |= FSL_EMB_EVENT_RESTRICTED;
|
||||
ret |= event_id &
|
||||
(FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
|
||||
} else if (event_id &
|
||||
(FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
|
||||
/* Threshold requested on non-threshold event */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct fsl_emb_pmu e500_pmu = {
|
||||
.name = "e500 family",
|
||||
.n_counter = 4,
|
||||
.n_restricted = 2,
|
||||
.xlate_event = e500_xlate_event,
|
||||
.n_generic = ARRAY_SIZE(e500_generic_events),
|
||||
.generic_events = e500_generic_events,
|
||||
.cache_events = &e500_cache_events,
|
||||
};
|
||||
|
||||
static int init_e500_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type)
|
||||
return -ENODEV;
|
||||
|
||||
if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
|
||||
num_events = 256;
|
||||
else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_fsl_emb_pmu(&e500_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_e500_pmu);
|
121
arch/powerpc/perf/e6500-pmu.c
Normal file
121
arch/powerpc/perf/e6500-pmu.c
Normal file
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Performance counter support for e6500 family processors.
|
||||
*
|
||||
* Author: Priyanka Jain, Priyanka.Jain@freescale.com
|
||||
* Based on e500-pmu.c
|
||||
* Copyright 2013 Freescale Semiconductor, Inc.
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Map of generic hardware event types to hardware events
|
||||
* Zero if unsupported
|
||||
*/
|
||||
static int e6500_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 1,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 221,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 15,
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int e6500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = {
|
||||
/*RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 27, 222 },
|
||||
[C(OP_WRITE)] = { 28, 223 },
|
||||
[C(OP_PREFETCH)] = { 29, 0 },
|
||||
},
|
||||
[C(L1I)] = {
|
||||
/*RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 2, 254 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 37, 0 },
|
||||
},
|
||||
/*
|
||||
* Assuming LL means L2, it's not a good match for this model.
|
||||
* It does not have separate read/write events (but it does have
|
||||
* separate instruction/data events).
|
||||
*/
|
||||
[C(LL)] = {
|
||||
/*RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
/*
|
||||
* There are data/instruction MMU misses, but that's a miss on
|
||||
* the chip's internal level-one TLB which is probably not
|
||||
* what the user wants. Instead, unified level-two TLB misses
|
||||
* are reported here.
|
||||
*/
|
||||
[C(DTLB)] = {
|
||||
/*RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 26, 66 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = {
|
||||
/*RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 12, 15 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = {
|
||||
/* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static int num_events = 512;
|
||||
|
||||
/* Upper half of event id is PMLCb, for threshold events */
|
||||
static u64 e6500_xlate_event(u64 event_id)
|
||||
{
|
||||
u32 event_low = (u32)event_id;
|
||||
if (event_low >= num_events ||
|
||||
(event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)))
|
||||
return 0;
|
||||
|
||||
return FSL_EMB_EVENT_VALID;
|
||||
}
|
||||
|
||||
static struct fsl_emb_pmu e6500_pmu = {
|
||||
.name = "e6500 family",
|
||||
.n_counter = 6,
|
||||
.n_restricted = 0,
|
||||
.xlate_event = e6500_xlate_event,
|
||||
.n_generic = ARRAY_SIZE(e6500_generic_events),
|
||||
.generic_events = e6500_generic_events,
|
||||
.cache_events = &e6500_cache_events,
|
||||
};
|
||||
|
||||
static int init_e6500_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_fsl_emb_pmu(&e6500_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_e6500_pmu);
|
33
arch/powerpc/perf/hv-24x7-catalog.h
Normal file
33
arch/powerpc/perf/hv-24x7-catalog.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
#ifndef LINUX_POWERPC_PERF_HV_24X7_CATALOG_H_
|
||||
#define LINUX_POWERPC_PERF_HV_24X7_CATALOG_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* From document "24x7 Event and Group Catalog Formats Proposal" v0.15 */
|
||||
|
||||
struct hv_24x7_catalog_page_0 {
|
||||
#define HV_24X7_CATALOG_MAGIC 0x32347837 /* "24x7" in ASCII */
|
||||
__be32 magic;
|
||||
__be32 length; /* In 4096 byte pages */
|
||||
__be64 version; /* XXX: arbitrary? what's the meaning/useage/purpose? */
|
||||
__u8 build_time_stamp[16]; /* "YYYYMMDDHHMMSS\0\0" */
|
||||
__u8 reserved2[32];
|
||||
__be16 schema_data_offs; /* in 4096 byte pages */
|
||||
__be16 schema_data_len; /* in 4096 byte pages */
|
||||
__be16 schema_entry_count;
|
||||
__u8 reserved3[2];
|
||||
__be16 event_data_offs;
|
||||
__be16 event_data_len;
|
||||
__be16 event_entry_count;
|
||||
__u8 reserved4[2];
|
||||
__be16 group_data_offs; /* in 4096 byte pages */
|
||||
__be16 group_data_len; /* in 4096 byte pages */
|
||||
__be16 group_entry_count;
|
||||
__u8 reserved5[2];
|
||||
__be16 formula_data_offs; /* in 4096 byte pages */
|
||||
__be16 formula_data_len; /* in 4096 byte pages */
|
||||
__be16 formula_entry_count;
|
||||
__u8 reserved6[2];
|
||||
} __packed;
|
||||
|
||||
#endif
|
462
arch/powerpc/perf/hv-24x7.c
Normal file
462
arch/powerpc/perf/hv-24x7.c
Normal file
|
@ -0,0 +1,462 @@
|
|||
/*
|
||||
* Hypervisor supplied "24x7" performance counter support
|
||||
*
|
||||
* Author: Cody P Schafer <cody@linux.vnet.ibm.com>
|
||||
* Copyright 2014 IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "hv-24x7: " fmt
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "hv-24x7.h"
|
||||
#include "hv-24x7-catalog.h"
|
||||
#include "hv-common.h"
|
||||
|
||||
/*
|
||||
* TODO: Merging events:
|
||||
* - Think of the hcall as an interface to a 4d array of counters:
|
||||
* - x = domains
|
||||
* - y = indexes in the domain (core, chip, vcpu, node, etc)
|
||||
* - z = offset into the counter space
|
||||
* - w = lpars (guest vms, "logical partitions")
|
||||
* - A single request is: x,y,y_last,z,z_last,w,w_last
|
||||
* - this means we can retrieve a rectangle of counters in y,z for a single x.
|
||||
*
|
||||
* - Things to consider (ignoring w):
|
||||
* - input cost_per_request = 16
|
||||
* - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
|
||||
* - limited number of requests per hcall (must fit into 4K bytes)
|
||||
* - 4k = 16 [buffer header] - 16 [request size] * request_count
|
||||
* - 255 requests per hcall
|
||||
* - sometimes it will be more efficient to read extra data and discard
|
||||
*/
|
||||
|
||||
/*
|
||||
* Example usage:
|
||||
* perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
|
||||
*/
|
||||
|
||||
/* u3 0-6, one of HV_24X7_PERF_DOMAIN */
|
||||
EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
|
||||
/* u16 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
|
||||
/* u32, see "data_offset" */
|
||||
EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
|
||||
/* u16 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
|
||||
|
||||
EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
|
||||
EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
|
||||
EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
|
||||
|
||||
static struct attribute *format_attrs[] = {
|
||||
&format_attr_domain.attr,
|
||||
&format_attr_offset.attr,
|
||||
&format_attr_starting_index.attr,
|
||||
&format_attr_lpar.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group format_group = {
|
||||
.name = "format",
|
||||
.attrs = format_attrs,
|
||||
};
|
||||
|
||||
static struct kmem_cache *hv_page_cache;
|
||||
|
||||
static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
|
||||
unsigned long version,
|
||||
unsigned long index)
|
||||
{
|
||||
pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
|
||||
phys_4096,
|
||||
version,
|
||||
index);
|
||||
WARN_ON(!IS_ALIGNED(phys_4096, 4096));
|
||||
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
|
||||
phys_4096,
|
||||
version,
|
||||
index);
|
||||
}
|
||||
|
||||
static unsigned long h_get_24x7_catalog_page(char page[],
|
||||
u64 version, u32 index)
|
||||
{
|
||||
return h_get_24x7_catalog_page_(virt_to_phys(page),
|
||||
version, index);
|
||||
}
|
||||
|
||||
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
unsigned long hret;
|
||||
ssize_t ret = 0;
|
||||
size_t catalog_len = 0, catalog_page_len = 0;
|
||||
loff_t page_offset = 0;
|
||||
loff_t offset_in_page;
|
||||
size_t copy_len;
|
||||
uint64_t catalog_version_num = 0;
|
||||
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
|
||||
struct hv_24x7_catalog_page_0 *page_0 = page;
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
hret = h_get_24x7_catalog_page(page, 0, 0);
|
||||
if (hret) {
|
||||
ret = -EIO;
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
catalog_version_num = be64_to_cpu(page_0->version);
|
||||
catalog_page_len = be32_to_cpu(page_0->length);
|
||||
catalog_len = catalog_page_len * 4096;
|
||||
|
||||
page_offset = offset / 4096;
|
||||
offset_in_page = offset % 4096;
|
||||
|
||||
if (page_offset >= catalog_page_len)
|
||||
goto e_free;
|
||||
|
||||
if (page_offset != 0) {
|
||||
hret = h_get_24x7_catalog_page(page, catalog_version_num,
|
||||
page_offset);
|
||||
if (hret) {
|
||||
ret = -EIO;
|
||||
goto e_free;
|
||||
}
|
||||
}
|
||||
|
||||
copy_len = 4096 - offset_in_page;
|
||||
if (copy_len > count)
|
||||
copy_len = count;
|
||||
|
||||
memcpy(buf, page+offset_in_page, copy_len);
|
||||
ret = copy_len;
|
||||
|
||||
e_free:
|
||||
if (hret)
|
||||
pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
|
||||
" rc=%ld\n",
|
||||
catalog_version_num, page_offset, hret);
|
||||
kmem_cache_free(hv_page_cache, page);
|
||||
|
||||
pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
|
||||
"catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
|
||||
count, catalog_len, catalog_page_len, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define PAGE_0_ATTR(_name, _fmt, _expr) \
|
||||
static ssize_t _name##_show(struct device *dev, \
|
||||
struct device_attribute *dev_attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
unsigned long hret; \
|
||||
ssize_t ret = 0; \
|
||||
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
|
||||
struct hv_24x7_catalog_page_0 *page_0 = page; \
|
||||
if (!page) \
|
||||
return -ENOMEM; \
|
||||
hret = h_get_24x7_catalog_page(page, 0, 0); \
|
||||
if (hret) { \
|
||||
ret = -EIO; \
|
||||
goto e_free; \
|
||||
} \
|
||||
ret = sprintf(buf, _fmt, _expr); \
|
||||
e_free: \
|
||||
kfree(page); \
|
||||
return ret; \
|
||||
} \
|
||||
static DEVICE_ATTR_RO(_name)
|
||||
|
||||
PAGE_0_ATTR(catalog_version, "%lld\n",
|
||||
(unsigned long long)be64_to_cpu(page_0->version));
|
||||
PAGE_0_ATTR(catalog_len, "%lld\n",
|
||||
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
|
||||
static BIN_ATTR_RO(catalog, 0/* real length varies */);
|
||||
|
||||
static struct bin_attribute *if_bin_attrs[] = {
|
||||
&bin_attr_catalog,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *if_attrs[] = {
|
||||
&dev_attr_catalog_len.attr,
|
||||
&dev_attr_catalog_version.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group if_group = {
|
||||
.name = "interface",
|
||||
.bin_attrs = if_bin_attrs,
|
||||
.attrs = if_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *attr_groups[] = {
|
||||
&format_group,
|
||||
&if_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static bool is_physical_domain(int domain)
|
||||
{
|
||||
return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
|
||||
domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
|
||||
DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
|
||||
|
||||
static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
|
||||
u16 lpar, u64 *res,
|
||||
bool success_expected)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
/*
|
||||
* request_buffer and result_buffer are not required to be 4k aligned,
|
||||
* but are not allowed to cross any 4k boundary. Aligning them to 4k is
|
||||
* the simplest way to ensure that.
|
||||
*/
|
||||
struct reqb {
|
||||
struct hv_24x7_request_buffer buf;
|
||||
struct hv_24x7_request req;
|
||||
} __packed *request_buffer;
|
||||
|
||||
struct {
|
||||
struct hv_24x7_data_result_buffer buf;
|
||||
struct hv_24x7_result res;
|
||||
struct hv_24x7_result_element elem;
|
||||
__be64 result;
|
||||
} __packed *result_buffer;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
|
||||
BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
|
||||
|
||||
request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
|
||||
result_buffer = (void *)get_cpu_var(hv_24x7_resb);
|
||||
|
||||
memset(request_buffer, 0, 4096);
|
||||
memset(result_buffer, 0, 4096);
|
||||
|
||||
*request_buffer = (struct reqb) {
|
||||
.buf = {
|
||||
.interface_version = HV_24X7_IF_VERSION_CURRENT,
|
||||
.num_requests = 1,
|
||||
},
|
||||
.req = {
|
||||
.performance_domain = domain,
|
||||
.data_size = cpu_to_be16(8),
|
||||
.data_offset = cpu_to_be32(offset),
|
||||
.starting_lpar_ix = cpu_to_be16(lpar),
|
||||
.max_num_lpars = cpu_to_be16(1),
|
||||
.starting_ix = cpu_to_be16(ix),
|
||||
.max_ix = cpu_to_be16(1),
|
||||
}
|
||||
};
|
||||
|
||||
ret = plpar_hcall_norets(H_GET_24X7_DATA,
|
||||
virt_to_phys(request_buffer), sizeof(*request_buffer),
|
||||
virt_to_phys(result_buffer), sizeof(*result_buffer));
|
||||
|
||||
if (ret) {
|
||||
if (success_expected)
|
||||
pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
|
||||
"0x%lx (%ld) detail=0x%x failing ix=%x\n",
|
||||
domain, offset, ix, lpar, ret, ret,
|
||||
result_buffer->buf.detailed_rc,
|
||||
result_buffer->buf.failing_request_ix);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*res = be64_to_cpu(result_buffer->result);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
|
||||
bool success_expected)
|
||||
{
|
||||
return single_24x7_request(event_get_domain(event),
|
||||
event_get_offset(event),
|
||||
event_get_starting_index(event),
|
||||
event_get_lpar(event),
|
||||
res,
|
||||
success_expected);
|
||||
}
|
||||
|
||||
static int h_24x7_event_init(struct perf_event *event)
|
||||
{
|
||||
struct hv_perf_caps caps;
|
||||
unsigned domain;
|
||||
unsigned long hret;
|
||||
u64 ct;
|
||||
|
||||
/* Not our event */
|
||||
if (event->attr.type != event->pmu->type)
|
||||
return -ENOENT;
|
||||
|
||||
/* Unused areas must be 0 */
|
||||
if (event_get_reserved1(event) ||
|
||||
event_get_reserved2(event) ||
|
||||
event_get_reserved3(event)) {
|
||||
pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
|
||||
event->attr.config,
|
||||
event_get_reserved1(event),
|
||||
event->attr.config1,
|
||||
event_get_reserved2(event),
|
||||
event->attr.config2,
|
||||
event_get_reserved3(event));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unsupported modes and filters */
|
||||
if (event->attr.exclude_user ||
|
||||
event->attr.exclude_kernel ||
|
||||
event->attr.exclude_hv ||
|
||||
event->attr.exclude_idle ||
|
||||
event->attr.exclude_host ||
|
||||
event->attr.exclude_guest)
|
||||
return -EINVAL;
|
||||
|
||||
/* no branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* offset must be 8 byte aligned */
|
||||
if (event_get_offset(event) % 8) {
|
||||
pr_devel("bad alignment\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Domains above 6 are invalid */
|
||||
domain = event_get_domain(event);
|
||||
if (domain > 6) {
|
||||
pr_devel("invalid domain %d\n", domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hret = hv_perf_caps_get(&caps);
|
||||
if (hret) {
|
||||
pr_devel("could not get capabilities: rc=%ld\n", hret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* PHYSICAL domains & other lpars require extra capabilities */
|
||||
if (!caps.collect_privileged && (is_physical_domain(domain) ||
|
||||
(event_get_lpar(event) != event_get_lpar_max()))) {
|
||||
pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
|
||||
is_physical_domain(domain),
|
||||
event_get_lpar(event));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* see if the event complains */
|
||||
if (event_24x7_request(event, &ct, false)) {
|
||||
pr_devel("test hcall failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 h_24x7_get_value(struct perf_event *event)
|
||||
{
|
||||
unsigned long ret;
|
||||
u64 ct;
|
||||
ret = event_24x7_request(event, &ct, true);
|
||||
if (ret)
|
||||
/* We checked this in event init, shouldn't fail here... */
|
||||
return 0;
|
||||
|
||||
return ct;
|
||||
}
|
||||
|
||||
static void h_24x7_event_update(struct perf_event *event)
|
||||
{
|
||||
s64 prev;
|
||||
u64 now;
|
||||
now = h_24x7_get_value(event);
|
||||
prev = local64_xchg(&event->hw.prev_count, now);
|
||||
local64_add(now - prev, &event->count);
|
||||
}
|
||||
|
||||
static void h_24x7_event_start(struct perf_event *event, int flags)
|
||||
{
|
||||
if (flags & PERF_EF_RELOAD)
|
||||
local64_set(&event->hw.prev_count, h_24x7_get_value(event));
|
||||
}
|
||||
|
||||
static void h_24x7_event_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
h_24x7_event_update(event);
|
||||
}
|
||||
|
||||
static int h_24x7_event_add(struct perf_event *event, int flags)
|
||||
{
|
||||
if (flags & PERF_EF_START)
|
||||
h_24x7_event_start(event, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pmu h_24x7_pmu = {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
|
||||
.name = "hv_24x7",
|
||||
.attr_groups = attr_groups,
|
||||
.event_init = h_24x7_event_init,
|
||||
.add = h_24x7_event_add,
|
||||
.del = h_24x7_event_stop,
|
||||
.start = h_24x7_event_start,
|
||||
.stop = h_24x7_event_stop,
|
||||
.read = h_24x7_event_update,
|
||||
};
|
||||
|
||||
static int hv_24x7_init(void)
|
||||
{
|
||||
int r;
|
||||
unsigned long hret;
|
||||
struct hv_perf_caps caps;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
pr_debug("not a virtualized system, not enabling\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hret = hv_perf_caps_get(&caps);
|
||||
if (hret) {
|
||||
pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
|
||||
hret);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
|
||||
if (!hv_page_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
/* sampling not supported */
|
||||
h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(hv_24x7_init);
|
109
arch/powerpc/perf/hv-24x7.h
Normal file
109
arch/powerpc/perf/hv-24x7.h
Normal file
|
@ -0,0 +1,109 @@
|
|||
#ifndef LINUX_POWERPC_PERF_HV_24X7_H_
|
||||
#define LINUX_POWERPC_PERF_HV_24X7_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct hv_24x7_request {
|
||||
/* PHYSICAL domains require enabling via phyp/hmc. */
|
||||
#define HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP 0x01
|
||||
#define HV_24X7_PERF_DOMAIN_PHYSICAL_CORE 0x02
|
||||
#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_CORE 0x03
|
||||
#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_CHIP 0x04
|
||||
#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_HOME_NODE 0x05
|
||||
#define HV_24X7_PERF_DOMAIN_VIRTUAL_PROCESSOR_REMOTE_NODE 0x06
|
||||
__u8 performance_domain;
|
||||
__u8 reserved[0x1];
|
||||
|
||||
/* bytes to read starting at @data_offset. must be a multiple of 8 */
|
||||
__be16 data_size;
|
||||
|
||||
/*
|
||||
* byte offset within the perf domain to read from. must be 8 byte
|
||||
* aligned
|
||||
*/
|
||||
__be32 data_offset;
|
||||
|
||||
/*
|
||||
* only valid for VIRTUAL_PROCESSOR domains, ignored for others.
|
||||
* -1 means "current partition only"
|
||||
* Enabling via phyp/hmc required for non-"-1" values. 0 forbidden
|
||||
* unless requestor is 0.
|
||||
*/
|
||||
__be16 starting_lpar_ix;
|
||||
|
||||
/*
|
||||
* Ignored when @starting_lpar_ix == -1
|
||||
* Ignored when @performance_domain is not VIRTUAL_PROCESSOR_*
|
||||
* -1 means "infinite" or all
|
||||
*/
|
||||
__be16 max_num_lpars;
|
||||
|
||||
/* chip, core, or virtual processor based on @performance_domain */
|
||||
__be16 starting_ix;
|
||||
__be16 max_ix;
|
||||
} __packed;
|
||||
|
||||
struct hv_24x7_request_buffer {
|
||||
/* 0 - ? */
|
||||
/* 1 - ? */
|
||||
#define HV_24X7_IF_VERSION_CURRENT 0x01
|
||||
__u8 interface_version;
|
||||
__u8 num_requests;
|
||||
__u8 reserved[0xE];
|
||||
struct hv_24x7_request requests[];
|
||||
} __packed;
|
||||
|
||||
struct hv_24x7_result_element {
|
||||
__be16 lpar_ix;
|
||||
|
||||
/*
|
||||
* represents the core, chip, or virtual processor based on the
|
||||
* request's @performance_domain
|
||||
*/
|
||||
__be16 domain_ix;
|
||||
|
||||
/* -1 if @performance_domain does not refer to a virtual processor */
|
||||
__be32 lpar_cfg_instance_id;
|
||||
|
||||
/* size = @result_element_data_size of cointaining result. */
|
||||
__u8 element_data[];
|
||||
} __packed;
|
||||
|
||||
struct hv_24x7_result {
|
||||
__u8 result_ix;
|
||||
|
||||
/*
|
||||
* 0 = not all result elements fit into the buffer, additional requests
|
||||
* required
|
||||
* 1 = all result elements were returned
|
||||
*/
|
||||
__u8 results_complete;
|
||||
__be16 num_elements_returned;
|
||||
|
||||
/* This is a copy of @data_size from the coresponding hv_24x7_request */
|
||||
__be16 result_element_data_size;
|
||||
__u8 reserved[0x2];
|
||||
|
||||
/* WARNING: only valid for first result element due to variable sizes
|
||||
* of result elements */
|
||||
/* struct hv_24x7_result_element[@num_elements_returned] */
|
||||
struct hv_24x7_result_element elements[];
|
||||
} __packed;
|
||||
|
||||
struct hv_24x7_data_result_buffer {
|
||||
/* See versioning for request buffer */
|
||||
__u8 interface_version;
|
||||
|
||||
__u8 num_results;
|
||||
__u8 reserved[0x1];
|
||||
__u8 failing_request_ix;
|
||||
__be32 detailed_rc;
|
||||
__be64 cec_cfg_instance_id;
|
||||
__be64 catalog_version_num;
|
||||
__u8 reserved2[0x8];
|
||||
/* WARNING: only valid for the first result due to variable sizes of
|
||||
* results */
|
||||
struct hv_24x7_result results[]; /* [@num_results] */
|
||||
} __packed;
|
||||
|
||||
#endif
|
39
arch/powerpc/perf/hv-common.c
Normal file
39
arch/powerpc/perf/hv-common.c
Normal file
|
@ -0,0 +1,39 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/hvcall.h>
|
||||
|
||||
#include "hv-gpci.h"
|
||||
#include "hv-common.h"
|
||||
|
||||
unsigned long hv_perf_caps_get(struct hv_perf_caps *caps)
|
||||
{
|
||||
unsigned long r;
|
||||
struct p {
|
||||
struct hv_get_perf_counter_info_params params;
|
||||
struct cv_system_performance_capabilities caps;
|
||||
} __packed __aligned(sizeof(uint64_t));
|
||||
|
||||
struct p arg = {
|
||||
.params = {
|
||||
.counter_request = cpu_to_be32(
|
||||
CIR_SYSTEM_PERFORMANCE_CAPABILITIES),
|
||||
.starting_index = cpu_to_be32(-1),
|
||||
.counter_info_version_in = 0,
|
||||
}
|
||||
};
|
||||
|
||||
r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
|
||||
virt_to_phys(&arg), sizeof(arg));
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
pr_devel("capability_mask: 0x%x\n", arg.caps.capability_mask);
|
||||
|
||||
caps->version = arg.params.counter_info_version_out;
|
||||
caps->collect_privileged = !!arg.caps.perf_collect_privileged;
|
||||
caps->ga = !!(arg.caps.capability_mask & CV_CM_GA);
|
||||
caps->expanded = !!(arg.caps.capability_mask & CV_CM_EXPANDED);
|
||||
caps->lab = !!(arg.caps.capability_mask & CV_CM_LAB);
|
||||
|
||||
return r;
|
||||
}
|
36
arch/powerpc/perf/hv-common.h
Normal file
36
arch/powerpc/perf/hv-common.h
Normal file
|
@ -0,0 +1,36 @@
|
|||
#ifndef LINUX_POWERPC_PERF_HV_COMMON_H_
|
||||
#define LINUX_POWERPC_PERF_HV_COMMON_H_
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct hv_perf_caps {
|
||||
u16 version;
|
||||
u16 collect_privileged:1,
|
||||
ga:1,
|
||||
expanded:1,
|
||||
lab:1,
|
||||
unused:12;
|
||||
};
|
||||
|
||||
unsigned long hv_perf_caps_get(struct hv_perf_caps *caps);
|
||||
|
||||
|
||||
#define EVENT_DEFINE_RANGE_FORMAT(name, attr_var, bit_start, bit_end) \
|
||||
PMU_FORMAT_ATTR(name, #attr_var ":" #bit_start "-" #bit_end); \
|
||||
EVENT_DEFINE_RANGE(name, attr_var, bit_start, bit_end)
|
||||
|
||||
#define EVENT_DEFINE_RANGE(name, attr_var, bit_start, bit_end) \
|
||||
static u64 event_get_##name##_max(void) \
|
||||
{ \
|
||||
BUILD_BUG_ON((bit_start > bit_end) \
|
||||
|| (bit_end >= (sizeof(1ull) * 8))); \
|
||||
return (((1ull << (bit_end - bit_start)) - 1) << 1) + 1; \
|
||||
} \
|
||||
static u64 event_get_##name(struct perf_event *event) \
|
||||
{ \
|
||||
return (event->attr.attr_var >> (bit_start)) & \
|
||||
event_get_##name##_max(); \
|
||||
}
|
||||
|
||||
#endif
|
290
arch/powerpc/perf/hv-gpci.c
Normal file
290
arch/powerpc/perf/hv-gpci.c
Normal file
|
@ -0,0 +1,290 @@
|
|||
/*
|
||||
* Hypervisor supplied "gpci" ("get performance counter info") performance
|
||||
* counter support
|
||||
*
|
||||
* Author: Cody P Schafer <cody@linux.vnet.ibm.com>
|
||||
* Copyright 2014 IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "hv-gpci: " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/hvcall.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "hv-gpci.h"
|
||||
#include "hv-common.h"
|
||||
|
||||
/*
|
||||
* Example usage:
|
||||
* perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
|
||||
* secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
|
||||
*/
|
||||
|
||||
/* u32 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
|
||||
/* u32 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
|
||||
/* u16 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
|
||||
/* u8 */
|
||||
EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
|
||||
/* u8, bytes of data (1-8) */
|
||||
EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
|
||||
/* u32, byte offset */
|
||||
EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
|
||||
|
||||
static struct attribute *format_attrs[] = {
|
||||
&format_attr_request.attr,
|
||||
&format_attr_starting_index.attr,
|
||||
&format_attr_secondary_index.attr,
|
||||
&format_attr_counter_info_version.attr,
|
||||
|
||||
&format_attr_offset.attr,
|
||||
&format_attr_length.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group format_group = {
|
||||
.name = "format",
|
||||
.attrs = format_attrs,
|
||||
};
|
||||
|
||||
#define HV_CAPS_ATTR(_name, _format) \
|
||||
static ssize_t _name##_show(struct device *dev, \
|
||||
struct device_attribute *attr, \
|
||||
char *page) \
|
||||
{ \
|
||||
struct hv_perf_caps caps; \
|
||||
unsigned long hret = hv_perf_caps_get(&caps); \
|
||||
if (hret) \
|
||||
return -EIO; \
|
||||
\
|
||||
return sprintf(page, _format, caps._name); \
|
||||
} \
|
||||
static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t kernel_version_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(kernel_version);
|
||||
HV_CAPS_ATTR(version, "0x%x\n");
|
||||
HV_CAPS_ATTR(ga, "%d\n");
|
||||
HV_CAPS_ATTR(expanded, "%d\n");
|
||||
HV_CAPS_ATTR(lab, "%d\n");
|
||||
HV_CAPS_ATTR(collect_privileged, "%d\n");
|
||||
|
||||
static struct attribute *interface_attrs[] = {
|
||||
&dev_attr_kernel_version.attr,
|
||||
&hv_caps_attr_version.attr,
|
||||
&hv_caps_attr_ga.attr,
|
||||
&hv_caps_attr_expanded.attr,
|
||||
&hv_caps_attr_lab.attr,
|
||||
&hv_caps_attr_collect_privileged.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group interface_group = {
|
||||
.name = "interface",
|
||||
.attrs = interface_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *attr_groups[] = {
|
||||
&format_group,
|
||||
&interface_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
#define GPCI_MAX_DATA_BYTES \
|
||||
(1024 - sizeof(struct hv_get_perf_counter_info_params))
|
||||
|
||||
static unsigned long single_gpci_request(u32 req, u32 starting_index,
|
||||
u16 secondary_index, u8 version_in, u32 offset, u8 length,
|
||||
u64 *value)
|
||||
{
|
||||
unsigned long ret;
|
||||
size_t i;
|
||||
u64 count;
|
||||
|
||||
struct {
|
||||
struct hv_get_perf_counter_info_params params;
|
||||
uint8_t bytes[GPCI_MAX_DATA_BYTES];
|
||||
} __packed __aligned(sizeof(uint64_t)) arg = {
|
||||
.params = {
|
||||
.counter_request = cpu_to_be32(req),
|
||||
.starting_index = cpu_to_be32(starting_index),
|
||||
.secondary_index = cpu_to_be16(secondary_index),
|
||||
.counter_info_version_in = version_in,
|
||||
}
|
||||
};
|
||||
|
||||
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
|
||||
virt_to_phys(&arg), sizeof(arg));
|
||||
if (ret) {
|
||||
pr_devel("hcall failed: 0x%lx\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* we verify offset and length are within the zeroed buffer at event
|
||||
* init.
|
||||
*/
|
||||
count = 0;
|
||||
for (i = offset; i < offset + length; i++)
|
||||
count |= arg.bytes[i] << (i - offset);
|
||||
|
||||
*value = count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 h_gpci_get_value(struct perf_event *event)
|
||||
{
|
||||
u64 count;
|
||||
unsigned long ret = single_gpci_request(event_get_request(event),
|
||||
event_get_starting_index(event),
|
||||
event_get_secondary_index(event),
|
||||
event_get_counter_info_version(event),
|
||||
event_get_offset(event),
|
||||
event_get_length(event),
|
||||
&count);
|
||||
if (ret)
|
||||
return 0;
|
||||
return count;
|
||||
}
|
||||
|
||||
static void h_gpci_event_update(struct perf_event *event)
|
||||
{
|
||||
s64 prev;
|
||||
u64 now = h_gpci_get_value(event);
|
||||
prev = local64_xchg(&event->hw.prev_count, now);
|
||||
local64_add(now - prev, &event->count);
|
||||
}
|
||||
|
||||
static void h_gpci_event_start(struct perf_event *event, int flags)
|
||||
{
|
||||
local64_set(&event->hw.prev_count, h_gpci_get_value(event));
|
||||
}
|
||||
|
||||
static void h_gpci_event_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
h_gpci_event_update(event);
|
||||
}
|
||||
|
||||
static int h_gpci_event_add(struct perf_event *event, int flags)
|
||||
{
|
||||
if (flags & PERF_EF_START)
|
||||
h_gpci_event_start(event, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int h_gpci_event_init(struct perf_event *event)
|
||||
{
|
||||
u64 count;
|
||||
u8 length;
|
||||
|
||||
/* Not our event */
|
||||
if (event->attr.type != event->pmu->type)
|
||||
return -ENOENT;
|
||||
|
||||
/* config2 is unused */
|
||||
if (event->attr.config2) {
|
||||
pr_devel("config2 set when reserved\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* unsupported modes and filters */
|
||||
if (event->attr.exclude_user ||
|
||||
event->attr.exclude_kernel ||
|
||||
event->attr.exclude_hv ||
|
||||
event->attr.exclude_idle ||
|
||||
event->attr.exclude_host ||
|
||||
event->attr.exclude_guest)
|
||||
return -EINVAL;
|
||||
|
||||
/* no branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
length = event_get_length(event);
|
||||
if (length < 1 || length > 8) {
|
||||
pr_devel("length invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* last byte within the buffer? */
|
||||
if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) {
|
||||
pr_devel("request outside of buffer: %zu > %zu\n",
|
||||
(size_t)event_get_offset(event) + length,
|
||||
GPCI_MAX_DATA_BYTES);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check if the request works... */
|
||||
if (single_gpci_request(event_get_request(event),
|
||||
event_get_starting_index(event),
|
||||
event_get_secondary_index(event),
|
||||
event_get_counter_info_version(event),
|
||||
event_get_offset(event),
|
||||
length,
|
||||
&count)) {
|
||||
pr_devel("gpci hcall failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pmu h_gpci_pmu = {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
|
||||
.name = "hv_gpci",
|
||||
.attr_groups = attr_groups,
|
||||
.event_init = h_gpci_event_init,
|
||||
.add = h_gpci_event_add,
|
||||
.del = h_gpci_event_stop,
|
||||
.start = h_gpci_event_start,
|
||||
.stop = h_gpci_event_stop,
|
||||
.read = h_gpci_event_update,
|
||||
};
|
||||
|
||||
static int hv_gpci_init(void)
|
||||
{
|
||||
int r;
|
||||
unsigned long hret;
|
||||
struct hv_perf_caps caps;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
pr_debug("not a virtualized system, not enabling\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hret = hv_perf_caps_get(&caps);
|
||||
if (hret) {
|
||||
pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
|
||||
hret);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* sampling not supported */
|
||||
h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(hv_gpci_init);
|
73
arch/powerpc/perf/hv-gpci.h
Normal file
73
arch/powerpc/perf/hv-gpci.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
#ifndef LINUX_POWERPC_PERF_HV_GPCI_H_
|
||||
#define LINUX_POWERPC_PERF_HV_GPCI_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* From the document "H_GetPerformanceCounterInfo Interface" v1.07 */
|
||||
|
||||
/* H_GET_PERF_COUNTER_INFO argument */
|
||||
struct hv_get_perf_counter_info_params {
|
||||
__be32 counter_request; /* I */
|
||||
__be32 starting_index; /* IO */
|
||||
__be16 secondary_index; /* IO */
|
||||
__be16 returned_values; /* O */
|
||||
__be32 detail_rc; /* O, only needed when called via *_norets() */
|
||||
|
||||
/*
|
||||
* O, size each of counter_value element in bytes, only set for version
|
||||
* >= 0x3
|
||||
*/
|
||||
__be16 cv_element_size;
|
||||
|
||||
/* I, 0 (zero) for versions < 0x3 */
|
||||
__u8 counter_info_version_in;
|
||||
|
||||
/* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */
|
||||
__u8 counter_info_version_out;
|
||||
__u8 reserved[0xC];
|
||||
__u8 counter_value[];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* counter info version => fw version/reference (spec version)
|
||||
*
|
||||
* 8 => power8 (1.07)
|
||||
* [7 is skipped by spec 1.07]
|
||||
* 6 => TLBIE (1.07)
|
||||
* 5 => v7r7m0.phyp (1.05)
|
||||
* [4 skipped]
|
||||
* 3 => v7r6m0.phyp (?)
|
||||
* [1,2 skipped]
|
||||
* 0 => v7r{2,3,4}m0.phyp (?)
|
||||
*/
|
||||
#define COUNTER_INFO_VERSION_CURRENT 0x8
|
||||
|
||||
/*
|
||||
* These determine the counter_value[] layout and the meaning of starting_index
|
||||
* and secondary_index.
|
||||
*
|
||||
* Unless otherwise noted, @secondary_index is unused and ignored.
|
||||
*/
|
||||
enum counter_info_requests {
|
||||
|
||||
/* GENERAL */
|
||||
|
||||
/* @starting_index: must be -1 (to refer to the current partition)
|
||||
*/
|
||||
CIR_SYSTEM_PERFORMANCE_CAPABILITIES = 0X40,
|
||||
};
|
||||
|
||||
struct cv_system_performance_capabilities {
|
||||
/* If != 0, allowed to collect data from other partitions */
|
||||
__u8 perf_collect_privileged;
|
||||
|
||||
/* These following are only valid if counter_info_version >= 0x3 */
|
||||
#define CV_CM_GA (1 << 7)
|
||||
#define CV_CM_EXPANDED (1 << 6)
|
||||
#define CV_CM_LAB (1 << 5)
|
||||
/* remaining bits are reserved */
|
||||
__u8 capability_mask;
|
||||
__u8 reserved[0xE];
|
||||
} __packed;
|
||||
|
||||
#endif
|
423
arch/powerpc/perf/mpc7450-pmu.c
Normal file
423
arch/powerpc/perf/mpc7450-pmu.c
Normal file
|
@ -0,0 +1,423 @@
|
|||
/*
|
||||
* Performance counter support for MPC7450-family processors.
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
#define N_COUNTER 6 /* Number of hardware counters */
|
||||
#define MAX_ALT 3 /* Maximum number of event alternative codes */
|
||||
|
||||
/*
|
||||
* Bits in event code for MPC7450 family
|
||||
*/
|
||||
#define PM_THRMULT_MSKS 0x40000
|
||||
#define PM_THRESH_SH 12
|
||||
#define PM_THRESH_MSK 0x3f
|
||||
#define PM_PMC_SH 8
|
||||
#define PM_PMC_MSK 7
|
||||
#define PM_PMCSEL_MSK 0x7f
|
||||
|
||||
/*
|
||||
* Classify events according to how specific their PMC requirements are.
|
||||
* Result is:
|
||||
* 0: can go on any PMC
|
||||
* 1: can go on PMCs 1-4
|
||||
* 2: can go on PMCs 1,2,4
|
||||
* 3: can go on PMCs 1 or 2
|
||||
* 4: can only go on one PMC
|
||||
* -1: event code is invalid
|
||||
*/
|
||||
#define N_CLASSES 5
|
||||
|
||||
static int mpc7450_classify_event(u32 event)
|
||||
{
|
||||
int pmc;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > N_COUNTER)
|
||||
return -1;
|
||||
return 4;
|
||||
}
|
||||
event &= PM_PMCSEL_MSK;
|
||||
if (event <= 1)
|
||||
return 0;
|
||||
if (event <= 7)
|
||||
return 1;
|
||||
if (event <= 13)
|
||||
return 2;
|
||||
if (event <= 22)
|
||||
return 3;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Events using threshold and possible threshold scale:
|
||||
* code scale? name
|
||||
* 11e N PM_INSTQ_EXCEED_CYC
|
||||
* 11f N PM_ALTV_IQ_EXCEED_CYC
|
||||
* 128 Y PM_DTLB_SEARCH_EXCEED_CYC
|
||||
* 12b Y PM_LD_MISS_EXCEED_L1_CYC
|
||||
* 220 N PM_CQ_EXCEED_CYC
|
||||
* 30c N PM_GPR_RB_EXCEED_CYC
|
||||
* 30d ? PM_FPR_IQ_EXCEED_CYC ?
|
||||
* 311 Y PM_ITLB_SEARCH_EXCEED
|
||||
* 410 N PM_GPR_IQ_EXCEED_CYC
|
||||
*/
|
||||
|
||||
/*
|
||||
* Return use of threshold and threshold scale bits:
|
||||
* 0 = uses neither, 1 = uses threshold, 2 = uses both
|
||||
*/
|
||||
static int mpc7450_threshold_use(u32 event)
|
||||
{
|
||||
int pmc, sel;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
sel = event & PM_PMCSEL_MSK;
|
||||
switch (pmc) {
|
||||
case 1:
|
||||
if (sel == 0x1e || sel == 0x1f)
|
||||
return 1;
|
||||
if (sel == 0x28 || sel == 0x2b)
|
||||
return 2;
|
||||
break;
|
||||
case 2:
|
||||
if (sel == 0x20)
|
||||
return 1;
|
||||
break;
|
||||
case 3:
|
||||
if (sel == 0xc || sel == 0xd)
|
||||
return 1;
|
||||
if (sel == 0x11)
|
||||
return 2;
|
||||
break;
|
||||
case 4:
|
||||
if (sel == 0x10)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 33222222222211111111110000000000
|
||||
* 10987654321098765432109876543210
|
||||
* |< >< > < > < ><><><><><><>
|
||||
* TS TV G4 G3 G2P6P5P4P3P2P1
|
||||
*
|
||||
* P1 - P6
|
||||
* 0 - 11: Count of events needing PMC1 .. PMC6
|
||||
*
|
||||
* G2
|
||||
* 12 - 14: Count of events needing PMC1 or PMC2
|
||||
*
|
||||
* G3
|
||||
* 16 - 18: Count of events needing PMC1, PMC2 or PMC4
|
||||
*
|
||||
* G4
|
||||
* 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
|
||||
*
|
||||
* TV
|
||||
* 24 - 29: Threshold value requested
|
||||
*
|
||||
* TS
|
||||
* 30: Threshold scale value requested
|
||||
*/
|
||||
|
||||
static u32 pmcbits[N_COUNTER][2] = {
|
||||
{ 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
|
||||
{ 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
|
||||
{ 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
|
||||
{ 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
|
||||
{ 0x00000200, 0x00000100 }, /* PMC5: P5 */
|
||||
{ 0x00000800, 0x00000400 } /* PMC6: P6 */
|
||||
};
|
||||
|
||||
static u32 classbits[N_CLASSES - 1][2] = {
|
||||
{ 0x00000000, 0x00000000 }, /* class 0: no constraint */
|
||||
{ 0x00800000, 0x00100000 }, /* class 1: G4 */
|
||||
{ 0x00040000, 0x00010000 }, /* class 2: G3 */
|
||||
{ 0x00004000, 0x00001000 }, /* class 3: G2 */
|
||||
};
|
||||
|
||||
static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, class;
|
||||
u32 mask, value;
|
||||
int thresh, tuse;
|
||||
|
||||
class = mpc7450_classify_event(event);
|
||||
if (class < 0)
|
||||
return -1;
|
||||
if (class == 4) {
|
||||
pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
mask = pmcbits[pmc - 1][0];
|
||||
value = pmcbits[pmc - 1][1];
|
||||
} else {
|
||||
mask = classbits[class][0];
|
||||
value = classbits[class][1];
|
||||
}
|
||||
|
||||
tuse = mpc7450_threshold_use(event);
|
||||
if (tuse) {
|
||||
thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
|
||||
mask |= 0x3f << 24;
|
||||
value |= thresh << 24;
|
||||
if (tuse == 2) {
|
||||
mask |= 0x40000000;
|
||||
if ((unsigned int)event & PM_THRMULT_MSKS)
|
||||
value |= 0x40000000;
|
||||
}
|
||||
}
|
||||
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
|
||||
{ 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
|
||||
{ 0x502, 0x602 }, /* PM_L2_HIT */
|
||||
{ 0x503, 0x603 }, /* PM_L3_HIT */
|
||||
{ 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
|
||||
{ 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
|
||||
{ 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
|
||||
{ 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
|
||||
{ 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
|
||||
{ 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
|
||||
{ 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
|
||||
{ 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
|
||||
{ 0x512, 0x612 }, /* PM_INT_LOCAL */
|
||||
{ 0x513, 0x61d }, /* PM_L2_MISS */
|
||||
{ 0x514, 0x61e }, /* PM_L3_MISS */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scan the alternatives table for a match and return the
|
||||
* index into the alternatives table if found, else -1.
|
||||
*/
|
||||
static int find_alternative(u32 event)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
break;
|
||||
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
|
||||
if (event == event_alternatives[i][j])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, nalt = 1;
|
||||
u32 ae;
|
||||
|
||||
alt[0] = event;
|
||||
nalt = 1;
|
||||
i = find_alternative((u32)event);
|
||||
if (i >= 0) {
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
ae = event_alternatives[i][j];
|
||||
if (ae && ae != (u32)event)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
}
|
||||
return nalt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bitmaps of which PMCs each class can use for classes 0 - 3.
|
||||
* Bit i is set if PMC i+1 is usable.
|
||||
*/
|
||||
static const u8 classmap[N_CLASSES] = {
|
||||
0x3f, 0x0f, 0x0b, 0x03, 0
|
||||
};
|
||||
|
||||
/* Bit position and width of each PMCSEL field */
|
||||
static const int pmcsel_shift[N_COUNTER] = {
|
||||
6, 0, 27, 22, 17, 11
|
||||
};
|
||||
static const u32 pmcsel_mask[N_COUNTER] = {
|
||||
0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
|
||||
};
|
||||
|
||||
/*
|
||||
* Compute MMCR0/1/2 values for a set of events.
|
||||
*/
|
||||
static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
|
||||
unsigned long mmcr[],
|
||||
struct perf_event *pevents[])
|
||||
{
|
||||
u8 event_index[N_CLASSES][N_COUNTER];
|
||||
int n_classevent[N_CLASSES];
|
||||
int i, j, class, tuse;
|
||||
u32 pmc_inuse = 0, pmc_avail;
|
||||
u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
|
||||
u32 ev, pmc, thresh;
|
||||
|
||||
if (n_ev > N_COUNTER)
|
||||
return -1;
|
||||
|
||||
/* First pass: count usage in each class */
|
||||
for (i = 0; i < N_CLASSES; ++i)
|
||||
n_classevent[i] = 0;
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
class = mpc7450_classify_event(event[i]);
|
||||
if (class < 0)
|
||||
return -1;
|
||||
j = n_classevent[class]++;
|
||||
event_index[class][j] = i;
|
||||
}
|
||||
|
||||
/* Second pass: allocate PMCs from most specific event to least */
|
||||
for (class = N_CLASSES - 1; class >= 0; --class) {
|
||||
for (i = 0; i < n_classevent[class]; ++i) {
|
||||
ev = event[event_index[class][i]];
|
||||
if (class == 4) {
|
||||
pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
} else {
|
||||
/* Find a suitable PMC */
|
||||
pmc_avail = classmap[class] & ~pmc_inuse;
|
||||
if (!pmc_avail)
|
||||
return -1;
|
||||
pmc = ffs(pmc_avail);
|
||||
}
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
|
||||
tuse = mpc7450_threshold_use(ev);
|
||||
if (tuse) {
|
||||
thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
|
||||
mmcr0 |= thresh << 16;
|
||||
if (tuse == 2 && (ev & PM_THRMULT_MSKS))
|
||||
mmcr2 = 0x80000000;
|
||||
}
|
||||
ev &= pmcsel_mask[pmc - 1];
|
||||
ev <<= pmcsel_shift[pmc - 1];
|
||||
if (pmc <= 2)
|
||||
mmcr0 |= ev;
|
||||
else
|
||||
mmcr1 |= ev;
|
||||
hwc[event_index[class][i]] = pmc - 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pmc_inuse & 1)
|
||||
mmcr0 |= MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0x3e)
|
||||
mmcr0 |= MMCR0_PMCnCE;
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = mmcr0;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcr2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable counting by a PMC.
|
||||
* Note that the pmc argument is 0-based here, not 1-based.
|
||||
*/
|
||||
static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
if (pmc <= 1)
|
||||
mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
|
||||
else
|
||||
mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
|
||||
}
|
||||
|
||||
static int mpc7450_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 1,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x225 },
|
||||
[C(OP_WRITE)] = { 0, 0x227 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x129, 0x115 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0x634, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x312 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x223 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x122, 0x41c },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
struct power_pmu mpc7450_pmu = {
|
||||
.name = "MPC7450 family",
|
||||
.n_counter = N_COUNTER,
|
||||
.max_alternatives = MAX_ALT,
|
||||
.add_fields = 0x00111555ul,
|
||||
.test_adder = 0x00301000ul,
|
||||
.compute_mmcr = mpc7450_compute_mmcr,
|
||||
.get_constraint = mpc7450_get_constraint,
|
||||
.get_alternatives = mpc7450_get_alternatives,
|
||||
.disable_pmc = mpc7450_disable_pmc,
|
||||
.n_generic = ARRAY_SIZE(mpc7450_generic_events),
|
||||
.generic_events = mpc7450_generic_events,
|
||||
.cache_events = &mpc7450_cache_events,
|
||||
};
|
||||
|
||||
static int __init init_mpc7450_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&mpc7450_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_mpc7450_pmu);
|
622
arch/powerpc/perf/power4-pmu.c
Normal file
622
arch/powerpc/perf/power4-pmu.c
Normal file
|
@ -0,0 +1,622 @@
|
|||
/*
|
||||
* Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors.
|
||||
*
|
||||
* Copyright 2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for POWER4
|
||||
*/
|
||||
#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0xf
|
||||
#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_LOWER_SH 6
|
||||
#define PM_LOWER_MSK 1
|
||||
#define PM_LOWER_MSKS 0x40
|
||||
#define PM_BYTE_SH 4 /* Byte number of event bus to use */
|
||||
#define PM_BYTE_MSK 3
|
||||
#define PM_PMCSEL_MSK 7
|
||||
|
||||
/*
|
||||
* Unit code values
|
||||
*/
|
||||
#define PM_FPU 1
|
||||
#define PM_ISU1 2
|
||||
#define PM_IFU 3
|
||||
#define PM_IDU0 4
|
||||
#define PM_ISU1_ALT 6
|
||||
#define PM_ISU2 7
|
||||
#define PM_IFU_ALT 8
|
||||
#define PM_LSU0 9
|
||||
#define PM_LSU1 0xc
|
||||
#define PM_GPS 0xf
|
||||
|
||||
/*
|
||||
* Bits in MMCR0 for POWER4
|
||||
*/
|
||||
#define MMCR0_PMC1SEL_SH 8
|
||||
#define MMCR0_PMC2SEL_SH 1
|
||||
#define MMCR_PMCSEL_MSK 0x1f
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for POWER4
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 62
|
||||
#define MMCR1_TTC0SEL_SH 61
|
||||
#define MMCR1_TTM1SEL_SH 59
|
||||
#define MMCR1_TTC1SEL_SH 58
|
||||
#define MMCR1_TTM2SEL_SH 56
|
||||
#define MMCR1_TTC2SEL_SH 55
|
||||
#define MMCR1_TTM3SEL_SH 53
|
||||
#define MMCR1_TTC3SEL_SH 52
|
||||
#define MMCR1_TTMSEL_MSK 3
|
||||
#define MMCR1_TD_CP_DBG0SEL_SH 50
|
||||
#define MMCR1_TD_CP_DBG1SEL_SH 48
|
||||
#define MMCR1_TD_CP_DBG2SEL_SH 46
|
||||
#define MMCR1_TD_CP_DBG3SEL_SH 44
|
||||
#define MMCR1_DEBUG0SEL_SH 43
|
||||
#define MMCR1_DEBUG1SEL_SH 42
|
||||
#define MMCR1_DEBUG2SEL_SH 41
|
||||
#define MMCR1_DEBUG3SEL_SH 40
|
||||
#define MMCR1_PMC1_ADDER_SEL_SH 39
|
||||
#define MMCR1_PMC2_ADDER_SEL_SH 38
|
||||
#define MMCR1_PMC6_ADDER_SEL_SH 37
|
||||
#define MMCR1_PMC5_ADDER_SEL_SH 36
|
||||
#define MMCR1_PMC8_ADDER_SEL_SH 35
|
||||
#define MMCR1_PMC7_ADDER_SEL_SH 34
|
||||
#define MMCR1_PMC3_ADDER_SEL_SH 33
|
||||
#define MMCR1_PMC4_ADDER_SEL_SH 32
|
||||
#define MMCR1_PMC3SEL_SH 27
|
||||
#define MMCR1_PMC4SEL_SH 22
|
||||
#define MMCR1_PMC5SEL_SH 17
|
||||
#define MMCR1_PMC6SEL_SH 12
|
||||
#define MMCR1_PMC7SEL_SH 7
|
||||
#define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */
|
||||
|
||||
static short mmcr1_adder_bits[8] = {
|
||||
MMCR1_PMC1_ADDER_SEL_SH,
|
||||
MMCR1_PMC2_ADDER_SEL_SH,
|
||||
MMCR1_PMC3_ADDER_SEL_SH,
|
||||
MMCR1_PMC4_ADDER_SEL_SH,
|
||||
MMCR1_PMC5_ADDER_SEL_SH,
|
||||
MMCR1_PMC6_ADDER_SEL_SH,
|
||||
MMCR1_PMC7_ADDER_SEL_SH,
|
||||
MMCR1_PMC8_ADDER_SEL_SH
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits in MMCRA
|
||||
*/
|
||||
#define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 6666555555555544444444443333333333222222222211111111110000000000
|
||||
* 3210987654321098765432109876543210987654321098765432109876543210
|
||||
* |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><>
|
||||
* | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
|
||||
* \SMPL ||\TTC3SEL
|
||||
* |\TTC_IFU_SEL
|
||||
* \TTM2SEL0
|
||||
*
|
||||
* SMPL - SAMPLE_ENABLE constraint
|
||||
* 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000
|
||||
*
|
||||
* UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2
|
||||
* 55: UC1 error 0x0080_0000_0000_0000
|
||||
* 54: FPU events needed 0x0040_0000_0000_0000
|
||||
* 53: ISU1 events needed 0x0020_0000_0000_0000
|
||||
* 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000
|
||||
*
|
||||
* UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0
|
||||
* 51: UC2 error 0x0008_0000_0000_0000
|
||||
* 50: FPU events needed 0x0004_0000_0000_0000
|
||||
* 49: IFU events needed 0x0002_0000_0000_0000
|
||||
* 48: LSU0 events needed 0x0001_0000_0000_0000
|
||||
*
|
||||
* UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1
|
||||
* 47: UC3 error 0x8000_0000_0000
|
||||
* 46: LSU0 events needed 0x4000_0000_0000
|
||||
* 45: IFU events needed 0x2000_0000_0000
|
||||
* 44: IDU0|ISU2 events needed 0x1000_0000_0000
|
||||
* 43: ISU1 events needed 0x0800_0000_0000
|
||||
*
|
||||
* TTM2SEL0
|
||||
* 42: 0 = IDU0 events needed
|
||||
* 1 = ISU2 events needed 0x0400_0000_0000
|
||||
*
|
||||
* TTC_IFU_SEL
|
||||
* 41: 0 = IFU.U events needed
|
||||
* 1 = IFU.L events needed 0x0200_0000_0000
|
||||
*
|
||||
* TTC3SEL
|
||||
* 40: 0 = LSU1.U events needed
|
||||
* 1 = LSU1.L events needed 0x0100_0000_0000
|
||||
*
|
||||
* PS1
|
||||
* 39: PS1 error 0x0080_0000_0000
|
||||
* 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
|
||||
*
|
||||
* PS2
|
||||
* 35: PS2 error 0x0008_0000_0000
|
||||
* 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
|
||||
*
|
||||
* B0
|
||||
* 28-31: Byte 0 event source 0xf000_0000
|
||||
* 1 = FPU
|
||||
* 2 = ISU1
|
||||
* 3 = IFU
|
||||
* 4 = IDU0
|
||||
* 7 = ISU2
|
||||
* 9 = LSU0
|
||||
* c = LSU1
|
||||
* f = GPS
|
||||
*
|
||||
* B1, B2, B3
|
||||
* 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
|
||||
*
|
||||
* P8
|
||||
* 15: P8 error 0x8000
|
||||
* 14-15: Count of events needing PMC8
|
||||
*
|
||||
* P1..P7
|
||||
* 0-13: Count of events needing PMC1..PMC7
|
||||
*
|
||||
* Note: this doesn't allow events using IFU.U to be combined with events
|
||||
* using IFU.L, though that is feasible (using TTM0 and TTM2). However
|
||||
* there are no listed events for IFU.L (they are debug events not
|
||||
* verified for performance monitoring) so this shouldn't cause a
|
||||
* problem.
|
||||
*/
|
||||
|
||||
static struct unitinfo {
|
||||
unsigned long value, mask;
|
||||
int unit;
|
||||
int lowerbit;
|
||||
} p4_unitinfo[16] = {
|
||||
[PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 },
|
||||
[PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
|
||||
[PM_ISU1_ALT] =
|
||||
{ 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
|
||||
[PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
|
||||
[PM_IFU_ALT] =
|
||||
{ 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
|
||||
[PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 },
|
||||
[PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 },
|
||||
[PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 },
|
||||
[PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 },
|
||||
[PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 }
|
||||
};
|
||||
|
||||
static unsigned char direct_marked_event[8] = {
|
||||
(1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
|
||||
(1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
|
||||
(1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */
|
||||
(1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
|
||||
(1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */
|
||||
(1<<3) | (1<<4) | (1<<5),
|
||||
/* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
|
||||
(1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
|
||||
(1<<4), /* PMC8: PM_MRK_LSU_FIN */
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int p4_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel, unit, byte, bit;
|
||||
unsigned int mask;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = event & PM_PMCSEL_MSK;
|
||||
if (pmc) {
|
||||
if (direct_marked_event[pmc - 1] & (1 << psel))
|
||||
return 1;
|
||||
if (psel == 0) /* add events */
|
||||
bit = (pmc <= 4)? pmc - 1: 8 - pmc;
|
||||
else if (psel == 6) /* decode events */
|
||||
bit = 4;
|
||||
else
|
||||
return 0;
|
||||
} else
|
||||
bit = psel;
|
||||
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
mask = 0;
|
||||
switch (unit) {
|
||||
case PM_LSU1:
|
||||
if (event & PM_LOWER_MSKS)
|
||||
mask = 1 << 28; /* byte 7 bit 4 */
|
||||
else
|
||||
mask = 6 << 24; /* byte 3 bits 1 and 2 */
|
||||
break;
|
||||
case PM_LSU0:
|
||||
/* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */
|
||||
mask = 0x083dff00;
|
||||
}
|
||||
return (mask >> (byte * 8 + bit)) & 1;
|
||||
}
|
||||
|
||||
static int p4_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, byte, unit, lower, sh;
|
||||
unsigned long mask = 0, value = 0;
|
||||
int grp = -1;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 8)
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
grp = ((pmc - 1) >> 1) & 1;
|
||||
}
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (unit) {
|
||||
lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK;
|
||||
|
||||
/*
|
||||
* Bus events on bytes 0 and 2 can be counted
|
||||
* on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
|
||||
*/
|
||||
if (!pmc)
|
||||
grp = byte & 1;
|
||||
|
||||
if (!p4_unitinfo[unit].unit)
|
||||
return -1;
|
||||
mask |= p4_unitinfo[unit].mask;
|
||||
value |= p4_unitinfo[unit].value;
|
||||
sh = p4_unitinfo[unit].lowerbit;
|
||||
if (sh > 1)
|
||||
value |= (unsigned long)lower << sh;
|
||||
else if (lower != sh)
|
||||
return -1;
|
||||
unit = p4_unitinfo[unit].unit;
|
||||
|
||||
/* Set byte lane select field */
|
||||
mask |= 0xfULL << (28 - 4 * byte);
|
||||
value |= (unsigned long)unit << (28 - 4 * byte);
|
||||
}
|
||||
if (grp == 0) {
|
||||
/* increment PMC1/2/5/6 field */
|
||||
mask |= 0x8000000000ull;
|
||||
value |= 0x1000000000ull;
|
||||
} else {
|
||||
/* increment PMC3/4/7/8 field */
|
||||
mask |= 0x800000000ull;
|
||||
value |= 0x100000000ull;
|
||||
}
|
||||
|
||||
/* Marked instruction events need sample_enable set */
|
||||
if (p4_marked_instr_event(event)) {
|
||||
mask |= 1ull << 56;
|
||||
value |= 1ull << 56;
|
||||
}
|
||||
|
||||
/* PMCSEL=6 decode events on byte 2 need sample_enable clear */
|
||||
if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2)
|
||||
mask |= 1ull << 56;
|
||||
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int ppc_inst_cmpl[] = {
|
||||
0x1001, 0x4001, 0x6001, 0x7001, 0x8001
|
||||
};
|
||||
|
||||
static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, na;
|
||||
|
||||
alt[0] = event;
|
||||
na = 1;
|
||||
|
||||
/* 2 possibilities for PM_GRP_DISP_REJECT */
|
||||
if (event == 0x8003 || event == 0x0224) {
|
||||
alt[1] = event ^ (0x8003 ^ 0x0224);
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* 2 possibilities for PM_ST_MISS_L1 */
|
||||
if (event == 0x0c13 || event == 0x0c23) {
|
||||
alt[1] = event ^ (0x0c13 ^ 0x0c23);
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* several possibilities for PM_INST_CMPL */
|
||||
for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) {
|
||||
if (event == ppc_inst_cmpl[i]) {
|
||||
for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j)
|
||||
if (j != i)
|
||||
alt[na++] = ppc_inst_cmpl[j];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return na;
|
||||
}
|
||||
|
||||
static int p4_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
|
||||
unsigned int pmc, unit, byte, psel, lower;
|
||||
unsigned int ttm, grp;
|
||||
unsigned int pmc_inuse = 0;
|
||||
unsigned int pmc_grp_use[2];
|
||||
unsigned char busbyte[4];
|
||||
unsigned char unituse[16];
|
||||
unsigned int unitlower = 0;
|
||||
int i;
|
||||
|
||||
if (n_ev > 8)
|
||||
return -1;
|
||||
|
||||
/* First pass to count resource use */
|
||||
pmc_grp_use[0] = pmc_grp_use[1] = 0;
|
||||
memset(busbyte, 0, sizeof(busbyte));
|
||||
memset(unituse, 0, sizeof(unituse));
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
/* count 1/2/5/6 vs 3/4/7/8 use */
|
||||
++pmc_grp_use[((pmc - 1) >> 1) & 1];
|
||||
}
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK;
|
||||
if (unit) {
|
||||
if (!pmc)
|
||||
++pmc_grp_use[byte & 1];
|
||||
if (unit == 6 || unit == 8)
|
||||
/* map alt ISU1/IFU codes: 6->2, 8->3 */
|
||||
unit = (unit >> 1) - 1;
|
||||
if (busbyte[byte] && busbyte[byte] != unit)
|
||||
return -1;
|
||||
busbyte[byte] = unit;
|
||||
lower <<= unit;
|
||||
if (unituse[unit] && lower != (unitlower & lower))
|
||||
return -1;
|
||||
unituse[unit] = 1;
|
||||
unitlower |= lower;
|
||||
}
|
||||
}
|
||||
if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Assign resources and set multiplexer selects.
|
||||
*
|
||||
* Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2.
|
||||
* Each TTMx can only select one unit, but since
|
||||
* units 2 and 6 are both ISU1, and 3 and 8 are both IFU,
|
||||
* we have some choices.
|
||||
*/
|
||||
if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) {
|
||||
unituse[6] = 1; /* Move 2 to 6 */
|
||||
unituse[2] = 0;
|
||||
}
|
||||
if (unituse[3] & (unituse[1] | unituse[2])) {
|
||||
unituse[8] = 1; /* Move 3 to 8 */
|
||||
unituse[3] = 0;
|
||||
unitlower = (unitlower & ~8) | ((unitlower & 8) << 5);
|
||||
}
|
||||
/* Check only one unit per TTMx */
|
||||
if (unituse[1] + unituse[2] + unituse[3] > 1 ||
|
||||
unituse[4] + unituse[6] + unituse[7] > 1 ||
|
||||
unituse[8] + unituse[9] > 1 ||
|
||||
(unituse[5] | unituse[10] | unituse[11] |
|
||||
unituse[13] | unituse[14]))
|
||||
return -1;
|
||||
|
||||
/* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
|
||||
mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2])
|
||||
<< MMCR1_TTM0SEL_SH;
|
||||
mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2)
|
||||
<< MMCR1_TTM1SEL_SH;
|
||||
mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH;
|
||||
|
||||
/* Set TTCxSEL fields. */
|
||||
if (unitlower & 0xe)
|
||||
mmcr1 |= 1ull << MMCR1_TTC0SEL_SH;
|
||||
if (unitlower & 0xf0)
|
||||
mmcr1 |= 1ull << MMCR1_TTC1SEL_SH;
|
||||
if (unitlower & 0xf00)
|
||||
mmcr1 |= 1ull << MMCR1_TTC2SEL_SH;
|
||||
if (unitlower & 0x7000)
|
||||
mmcr1 |= 1ull << MMCR1_TTC3SEL_SH;
|
||||
|
||||
/* Set byte lane select fields. */
|
||||
for (byte = 0; byte < 4; ++byte) {
|
||||
unit = busbyte[byte];
|
||||
if (!unit)
|
||||
continue;
|
||||
if (unit == 0xf) {
|
||||
/* special case for GPS */
|
||||
mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte);
|
||||
} else {
|
||||
if (!unituse[unit])
|
||||
ttm = unit - 1; /* 2->1, 3->2 */
|
||||
else
|
||||
ttm = unit >> 2;
|
||||
mmcr1 |= (unsigned long)ttm
|
||||
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
|
||||
}
|
||||
}
|
||||
|
||||
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
psel = event[i] & PM_PMCSEL_MSK;
|
||||
if (!pmc) {
|
||||
/* Bus event or 00xxx direct event (off or cycles) */
|
||||
if (unit)
|
||||
psel |= 0x10 | ((byte & 2) << 2);
|
||||
for (pmc = 0; pmc < 8; ++pmc) {
|
||||
if (pmc_inuse & (1 << pmc))
|
||||
continue;
|
||||
grp = (pmc >> 1) & 1;
|
||||
if (unit) {
|
||||
if (grp == (byte & 1))
|
||||
break;
|
||||
} else if (pmc_grp_use[grp] < 4) {
|
||||
++pmc_grp_use[grp];
|
||||
break;
|
||||
}
|
||||
}
|
||||
pmc_inuse |= 1 << pmc;
|
||||
} else {
|
||||
/* Direct event */
|
||||
--pmc;
|
||||
if (psel == 0 && (byte & 2))
|
||||
/* add events on higher-numbered bus */
|
||||
mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
|
||||
else if (psel == 6 && byte == 3)
|
||||
/* seem to need to set sample_enable here */
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
psel |= 8;
|
||||
}
|
||||
if (pmc <= 1)
|
||||
mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc);
|
||||
else
|
||||
mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
|
||||
if (pmc == 7) /* PMC8 */
|
||||
mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH;
|
||||
hwc[i] = pmc;
|
||||
if (p4_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
}
|
||||
|
||||
if (pmc_inuse & 1)
|
||||
mmcr0 |= MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0xfe)
|
||||
mmcr0 |= MMCR0_PMCjCE;
|
||||
|
||||
mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = mmcr0;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
/*
|
||||
* Setting the PMCxSEL field to 0 disables PMC x.
|
||||
* (Note that pmc is 0-based here, not 1-based.)
|
||||
*/
|
||||
if (pmc <= 1) {
|
||||
mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc));
|
||||
} else {
|
||||
mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)));
|
||||
if (pmc == 7)
|
||||
mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH);
|
||||
}
|
||||
}
|
||||
|
||||
static int p4_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 7,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x1001,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x8c10, 0x3c10 },
|
||||
[C(OP_WRITE)] = { 0x7c10, 0xc13 },
|
||||
[C(OP_PREFETCH)] = { 0xc35, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0xc34, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x904 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x900 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x330, 0x331 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static struct power_pmu power4_pmu = {
|
||||
.name = "POWER4/4+",
|
||||
.n_counter = 8,
|
||||
.max_alternatives = 5,
|
||||
.add_fields = 0x0000001100005555ul,
|
||||
.test_adder = 0x0011083300000000ul,
|
||||
.compute_mmcr = p4_compute_mmcr,
|
||||
.get_constraint = p4_get_constraint,
|
||||
.get_alternatives = p4_get_alternatives,
|
||||
.disable_pmc = p4_disable_pmc,
|
||||
.n_generic = ARRAY_SIZE(p4_generic_events),
|
||||
.generic_events = p4_generic_events,
|
||||
.cache_events = &power4_cache_events,
|
||||
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
|
||||
};
|
||||
|
||||
static int __init init_power4_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&power4_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_power4_pmu);
|
690
arch/powerpc/perf/power5+-pmu.c
Normal file
690
arch/powerpc/perf/power5+-pmu.c
Normal file
|
@ -0,0 +1,690 @@
|
|||
/*
|
||||
* Performance counter support for POWER5+/++ (not POWER5) processors.
|
||||
*
|
||||
* Copyright 2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
|
||||
*/
|
||||
#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0xf
|
||||
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
|
||||
#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_BYTE_SH 12 /* Byte number of event bus to use */
|
||||
#define PM_BYTE_MSK 7
|
||||
#define PM_GRS_SH 8 /* Storage subsystem mux select */
|
||||
#define PM_GRS_MSK 7
|
||||
#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
|
||||
#define PM_PMCSEL_MSK 0x7f
|
||||
|
||||
/* Values in PM_UNIT field */
|
||||
#define PM_FPU 0
|
||||
#define PM_ISU0 1
|
||||
#define PM_IFU 2
|
||||
#define PM_ISU1 3
|
||||
#define PM_IDU 4
|
||||
#define PM_ISU0_ALT 6
|
||||
#define PM_GRS 7
|
||||
#define PM_LSU0 8
|
||||
#define PM_LSU1 0xc
|
||||
#define PM_LASTUNIT 0xc
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for POWER5+
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 62
|
||||
#define MMCR1_TTM1SEL_SH 60
|
||||
#define MMCR1_TTM2SEL_SH 58
|
||||
#define MMCR1_TTM3SEL_SH 56
|
||||
#define MMCR1_TTMSEL_MSK 3
|
||||
#define MMCR1_TD_CP_DBG0SEL_SH 54
|
||||
#define MMCR1_TD_CP_DBG1SEL_SH 52
|
||||
#define MMCR1_TD_CP_DBG2SEL_SH 50
|
||||
#define MMCR1_TD_CP_DBG3SEL_SH 48
|
||||
#define MMCR1_GRS_L2SEL_SH 46
|
||||
#define MMCR1_GRS_L2SEL_MSK 3
|
||||
#define MMCR1_GRS_L3SEL_SH 44
|
||||
#define MMCR1_GRS_L3SEL_MSK 3
|
||||
#define MMCR1_GRS_MCSEL_SH 41
|
||||
#define MMCR1_GRS_MCSEL_MSK 7
|
||||
#define MMCR1_GRS_FABSEL_SH 39
|
||||
#define MMCR1_GRS_FABSEL_MSK 3
|
||||
#define MMCR1_PMC1_ADDER_SEL_SH 35
|
||||
#define MMCR1_PMC2_ADDER_SEL_SH 34
|
||||
#define MMCR1_PMC3_ADDER_SEL_SH 33
|
||||
#define MMCR1_PMC4_ADDER_SEL_SH 32
|
||||
#define MMCR1_PMC1SEL_SH 25
|
||||
#define MMCR1_PMC2SEL_SH 17
|
||||
#define MMCR1_PMC3SEL_SH 9
|
||||
#define MMCR1_PMC4SEL_SH 1
|
||||
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
|
||||
#define MMCR1_PMCSEL_MSK 0x7f
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 6666555555555544444444443333333333222222222211111111110000000000
|
||||
* 3210987654321098765432109876543210987654321098765432109876543210
|
||||
* [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><>
|
||||
* NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1
|
||||
*
|
||||
* NC - number of counters
|
||||
* 51: NC error 0x0008_0000_0000_0000
|
||||
* 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
|
||||
*
|
||||
* G0..G3 - GRS mux constraints
|
||||
* 46-47: GRS_L2SEL value
|
||||
* 44-45: GRS_L3SEL value
|
||||
* 41-44: GRS_MCSEL value
|
||||
* 39-40: GRS_FABSEL value
|
||||
* Note that these match up with their bit positions in MMCR1
|
||||
*
|
||||
* T0 - TTM0 constraint
|
||||
* 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000
|
||||
*
|
||||
* T1 - TTM1 constraint
|
||||
* 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000
|
||||
*
|
||||
* UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
|
||||
* 33: UC3 error 0x02_0000_0000
|
||||
* 32: FPU|IFU|ISU1 events needed 0x01_0000_0000
|
||||
* 31: ISU0 events needed 0x01_8000_0000
|
||||
* 30: IDU|GRS events needed 0x00_4000_0000
|
||||
*
|
||||
* B0
|
||||
* 24-27: Byte 0 event source 0x0f00_0000
|
||||
* Encoding as for the event code
|
||||
*
|
||||
* B1, B2, B3
|
||||
* 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
|
||||
*
|
||||
* P6
|
||||
* 11: P6 error 0x800
|
||||
* 10-11: Count of events needing PMC6
|
||||
*
|
||||
* P1..P5
|
||||
* 0-9: Count of events needing PMC1..PMC5
|
||||
*/
|
||||
|
||||
static const int grsel_shift[8] = {
|
||||
MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
|
||||
MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
|
||||
MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
|
||||
};
|
||||
|
||||
/* Masks and values for using events from the various units */
|
||||
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
|
||||
[PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
|
||||
[PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
|
||||
[PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
|
||||
[PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
|
||||
[PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
|
||||
[PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
|
||||
};
|
||||
|
||||
static int power5p_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, byte, unit, sh;
|
||||
int bit, fmask;
|
||||
unsigned long mask = 0, value = 0;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
if (pmc >= 5 && !(event == 0x500009 || event == 0x600005))
|
||||
return -1;
|
||||
}
|
||||
if (event & PM_BUSEVENT_MSK) {
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
if (unit == PM_ISU0_ALT)
|
||||
unit = PM_ISU0;
|
||||
mask |= unit_cons[unit][0];
|
||||
value |= unit_cons[unit][1];
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (byte >= 4) {
|
||||
if (unit != PM_LSU1)
|
||||
return -1;
|
||||
/* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
|
||||
++unit;
|
||||
byte &= 3;
|
||||
}
|
||||
if (unit == PM_GRS) {
|
||||
bit = event & 7;
|
||||
fmask = (bit == 6)? 7: 3;
|
||||
sh = grsel_shift[bit];
|
||||
mask |= (unsigned long)fmask << sh;
|
||||
value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
|
||||
<< sh;
|
||||
}
|
||||
/* Set byte lane select field */
|
||||
mask |= 0xfUL << (24 - 4 * byte);
|
||||
value |= (unsigned long)unit << (24 - 4 * byte);
|
||||
}
|
||||
if (pmc < 5) {
|
||||
/* need a counter from PMC1-4 set */
|
||||
mask |= 0x8000000000000ul;
|
||||
value |= 0x1000000000000ul;
|
||||
}
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int power5p_limited_pmc_event(u64 event)
|
||||
{
|
||||
int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
|
||||
return pmc == 5 || pmc == 6;
|
||||
}
|
||||
|
||||
#define MAX_ALT 3 /* at most 3 alternatives for any event */
|
||||
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */
|
||||
{ 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
|
||||
{ 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */
|
||||
{ 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */
|
||||
{ 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
|
||||
{ 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */
|
||||
{ 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */
|
||||
{ 0x100005, 0x600005 }, /* PM_RUN_CYC */
|
||||
{ 0x100009, 0x200009 }, /* PM_INST_CMPL */
|
||||
{ 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */
|
||||
{ 0x300009, 0x400009 }, /* PM_INST_DISP */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scan the alternatives table for a match and return the
|
||||
* index into the alternatives table if found, else -1.
|
||||
*/
|
||||
static int find_alternative(unsigned int event)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
break;
|
||||
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
|
||||
if (event == event_alternatives[i][j])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static const unsigned char bytedecode_alternatives[4][4] = {
|
||||
/* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
|
||||
/* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
|
||||
/* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
|
||||
/* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
|
||||
};
|
||||
|
||||
/*
|
||||
* Some direct events for decodes of event bus byte 3 have alternative
|
||||
* PMCSEL values on other counters. This returns the alternative
|
||||
* event code for those that do, or -1 otherwise. This also handles
|
||||
* alternative PCMSEL values for add events.
|
||||
*/
|
||||
static s64 find_alternative_bdecode(u64 event)
|
||||
{
|
||||
int pmc, altpmc, pp, j;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc == 0 || pmc > 4)
|
||||
return -1;
|
||||
altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
|
||||
pp = event & PM_PMCSEL_MSK;
|
||||
for (j = 0; j < 4; ++j) {
|
||||
if (bytedecode_alternatives[pmc - 1][j] == pp) {
|
||||
return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
|
||||
(altpmc << PM_PMC_SH) |
|
||||
bytedecode_alternatives[altpmc - 1][j];
|
||||
}
|
||||
}
|
||||
|
||||
/* new decode alternatives for power5+ */
|
||||
if (pmc == 1 && (pp == 0x0d || pp == 0x0e))
|
||||
return event + (2 << PM_PMC_SH) + (0x2e - 0x0d);
|
||||
if (pmc == 3 && (pp == 0x2e || pp == 0x2f))
|
||||
return event - (2 << PM_PMC_SH) - (0x2e - 0x0d);
|
||||
|
||||
/* alternative add event encodings */
|
||||
if (pp == 0x10 || pp == 0x28)
|
||||
return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) |
|
||||
(altpmc << PM_PMC_SH);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, nalt = 1;
|
||||
int nlim;
|
||||
s64 ae;
|
||||
|
||||
alt[0] = event;
|
||||
nalt = 1;
|
||||
nlim = power5p_limited_pmc_event(event);
|
||||
i = find_alternative(event);
|
||||
if (i >= 0) {
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
ae = event_alternatives[i][j];
|
||||
if (ae && ae != event)
|
||||
alt[nalt++] = ae;
|
||||
nlim += power5p_limited_pmc_event(ae);
|
||||
}
|
||||
} else {
|
||||
ae = find_alternative_bdecode(event);
|
||||
if (ae > 0)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
|
||||
if (flags & PPMU_ONLY_COUNT_RUN) {
|
||||
/*
|
||||
* We're only counting in RUN state,
|
||||
* so PM_CYC is equivalent to PM_RUN_CYC
|
||||
* and PM_INST_CMPL === PM_RUN_INST_CMPL.
|
||||
* This doesn't include alternatives that don't provide
|
||||
* any extra flexibility in assigning PMCs (e.g.
|
||||
* 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC).
|
||||
* Note that even with these additional alternatives
|
||||
* we never end up with more than 3 alternatives for any event.
|
||||
*/
|
||||
j = nalt;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
switch (alt[i]) {
|
||||
case 0xf: /* PM_CYC */
|
||||
alt[j++] = 0x600005; /* PM_RUN_CYC */
|
||||
++nlim;
|
||||
break;
|
||||
case 0x600005: /* PM_RUN_CYC */
|
||||
alt[j++] = 0xf;
|
||||
break;
|
||||
case 0x100009: /* PM_INST_CMPL */
|
||||
alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
|
||||
++nlim;
|
||||
break;
|
||||
case 0x500009: /* PM_RUN_INST_CMPL */
|
||||
alt[j++] = 0x100009; /* PM_INST_CMPL */
|
||||
alt[j++] = 0x200009;
|
||||
break;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
}
|
||||
|
||||
if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
|
||||
/* remove the limited PMC events */
|
||||
j = 0;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
if (!power5p_limited_pmc_event(alt[i])) {
|
||||
alt[j] = alt[i];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
} else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
|
||||
/* remove all but the limited PMC events */
|
||||
j = 0;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
if (power5p_limited_pmc_event(alt[i])) {
|
||||
alt[j] = alt[i];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
}
|
||||
|
||||
return nalt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map of which direct events on which PMCs are marked instruction events.
|
||||
* Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
|
||||
* Bit 0 is set if it is marked for all PMCs.
|
||||
* The 0x80 bit indicates a byte decode PMCSEL value.
|
||||
*/
|
||||
static unsigned char direct_event_is_marked[0x28] = {
|
||||
0, /* 00 */
|
||||
0x1f, /* 01 PM_IOPS_CMPL */
|
||||
0x2, /* 02 PM_MRK_GRP_DISP */
|
||||
0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
|
||||
0, /* 04 */
|
||||
0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
|
||||
0x80, /* 06 */
|
||||
0x80, /* 07 */
|
||||
0, 0, 0,/* 08 - 0a */
|
||||
0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
|
||||
0, /* 0c */
|
||||
0x80, /* 0d */
|
||||
0x80, /* 0e */
|
||||
0, /* 0f */
|
||||
0, /* 10 */
|
||||
0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
|
||||
0, /* 12 */
|
||||
0x10, /* 13 PM_MRK_GRP_CMPL */
|
||||
0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
|
||||
0x2, /* 15 PM_MRK_GRP_ISSUED */
|
||||
0x80, /* 16 */
|
||||
0x80, /* 17 */
|
||||
0, 0, 0, 0, 0,
|
||||
0x80, /* 1d */
|
||||
0x80, /* 1e */
|
||||
0, /* 1f */
|
||||
0x80, /* 20 */
|
||||
0x80, /* 21 */
|
||||
0x80, /* 22 */
|
||||
0x80, /* 23 */
|
||||
0x80, /* 24 */
|
||||
0x80, /* 25 */
|
||||
0x80, /* 26 */
|
||||
0x80, /* 27 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int power5p_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel;
|
||||
int bit, byte, unit;
|
||||
u32 mask;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = event & PM_PMCSEL_MSK;
|
||||
if (pmc >= 5)
|
||||
return 0;
|
||||
|
||||
bit = -1;
|
||||
if (psel < sizeof(direct_event_is_marked)) {
|
||||
if (direct_event_is_marked[psel] & (1 << pmc))
|
||||
return 1;
|
||||
if (direct_event_is_marked[psel] & 0x80)
|
||||
bit = 4;
|
||||
else if (psel == 0x08)
|
||||
bit = pmc - 1;
|
||||
else if (psel == 0x10)
|
||||
bit = 4 - pmc;
|
||||
else if (psel == 0x1b && (pmc == 1 || pmc == 3))
|
||||
bit = 4;
|
||||
} else if ((psel & 0x48) == 0x40) {
|
||||
bit = psel & 7;
|
||||
} else if (psel == 0x28) {
|
||||
bit = pmc - 1;
|
||||
} else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) {
|
||||
bit = 4;
|
||||
}
|
||||
|
||||
if (!(event & PM_BUSEVENT_MSK) || bit == -1)
|
||||
return 0;
|
||||
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit == PM_LSU0) {
|
||||
/* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
|
||||
mask = 0x5dff00;
|
||||
} else if (unit == PM_LSU1 && byte >= 4) {
|
||||
byte -= 4;
|
||||
/* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */
|
||||
mask = 0x5f11c000;
|
||||
} else
|
||||
return 0;
|
||||
|
||||
return (mask >> (byte * 8 + bit)) & 1;
|
||||
}
|
||||
|
||||
static int power5p_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr1 = 0;
|
||||
unsigned long mmcra = 0;
|
||||
unsigned int pmc, unit, byte, psel;
|
||||
unsigned int ttm;
|
||||
int i, isbus, bit, grsel;
|
||||
unsigned int pmc_inuse = 0;
|
||||
unsigned char busbyte[4];
|
||||
unsigned char unituse[16];
|
||||
int ttmuse;
|
||||
|
||||
if (n_ev > 6)
|
||||
return -1;
|
||||
|
||||
/* First pass to count resource use */
|
||||
memset(busbyte, 0, sizeof(busbyte));
|
||||
memset(unituse, 0, sizeof(unituse));
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
}
|
||||
if (event[i] & PM_BUSEVENT_MSK) {
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
if (unit == PM_ISU0_ALT)
|
||||
unit = PM_ISU0;
|
||||
if (byte >= 4) {
|
||||
if (unit != PM_LSU1)
|
||||
return -1;
|
||||
++unit;
|
||||
byte &= 3;
|
||||
}
|
||||
if (busbyte[byte] && busbyte[byte] != unit)
|
||||
return -1;
|
||||
busbyte[byte] = unit;
|
||||
unituse[unit] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign resources and set multiplexer selects.
|
||||
*
|
||||
* PM_ISU0 can go either on TTM0 or TTM1, but that's the only
|
||||
* choice we have to deal with.
|
||||
*/
|
||||
if (unituse[PM_ISU0] &
|
||||
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
|
||||
unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
|
||||
unituse[PM_ISU0] = 0;
|
||||
}
|
||||
/* Set TTM[01]SEL fields. */
|
||||
ttmuse = 0;
|
||||
for (i = PM_FPU; i <= PM_ISU1; ++i) {
|
||||
if (!unituse[i])
|
||||
continue;
|
||||
if (ttmuse++)
|
||||
return -1;
|
||||
mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
|
||||
}
|
||||
ttmuse = 0;
|
||||
for (; i <= PM_GRS; ++i) {
|
||||
if (!unituse[i])
|
||||
continue;
|
||||
if (ttmuse++)
|
||||
return -1;
|
||||
mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
|
||||
}
|
||||
if (ttmuse > 1)
|
||||
return -1;
|
||||
|
||||
/* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
|
||||
for (byte = 0; byte < 4; ++byte) {
|
||||
unit = busbyte[byte];
|
||||
if (!unit)
|
||||
continue;
|
||||
if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
|
||||
/* get ISU0 through TTM1 rather than TTM0 */
|
||||
unit = PM_ISU0_ALT;
|
||||
} else if (unit == PM_LSU1 + 1) {
|
||||
/* select lower word of LSU1 for this byte */
|
||||
mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
|
||||
}
|
||||
ttm = unit >> 2;
|
||||
mmcr1 |= (unsigned long)ttm
|
||||
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
|
||||
}
|
||||
|
||||
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
psel = event[i] & PM_PMCSEL_MSK;
|
||||
isbus = event[i] & PM_BUSEVENT_MSK;
|
||||
if (!pmc) {
|
||||
/* Bus event or any-PMC direct event */
|
||||
for (pmc = 0; pmc < 4; ++pmc) {
|
||||
if (!(pmc_inuse & (1 << pmc)))
|
||||
break;
|
||||
}
|
||||
if (pmc >= 4)
|
||||
return -1;
|
||||
pmc_inuse |= 1 << pmc;
|
||||
} else if (pmc <= 4) {
|
||||
/* Direct event */
|
||||
--pmc;
|
||||
if (isbus && (byte & 2) &&
|
||||
(psel == 8 || psel == 0x10 || psel == 0x28))
|
||||
/* add events on higher-numbered bus */
|
||||
mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
|
||||
} else {
|
||||
/* Instructions or run cycles on PMC5/6 */
|
||||
--pmc;
|
||||
}
|
||||
if (isbus && unit == PM_GRS) {
|
||||
bit = psel & 7;
|
||||
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
|
||||
mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
|
||||
}
|
||||
if (power5p_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1))
|
||||
/* select alternate byte lane */
|
||||
psel |= 0x10;
|
||||
if (pmc <= 3)
|
||||
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
|
||||
hwc[i] = pmc;
|
||||
}
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = 0;
|
||||
if (pmc_inuse & 1)
|
||||
mmcr[0] = MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0x3e)
|
||||
mmcr[0] |= MMCR0_PMCjCE;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
if (pmc <= 3)
|
||||
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
|
||||
}
|
||||
|
||||
static int power5p_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0xf,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x1c10a8, 0x3c1088 },
|
||||
[C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 },
|
||||
[C(OP_PREFETCH)] = { 0xc70e7, -1 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0xc50c3, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0xc20e4, 0x800c4 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x800c0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x230e4, 0x230e5 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static struct power_pmu power5p_pmu = {
|
||||
.name = "POWER5+/++",
|
||||
.n_counter = 6,
|
||||
.max_alternatives = MAX_ALT,
|
||||
.add_fields = 0x7000000000055ul,
|
||||
.test_adder = 0x3000040000000ul,
|
||||
.compute_mmcr = power5p_compute_mmcr,
|
||||
.get_constraint = power5p_get_constraint,
|
||||
.get_alternatives = power5p_get_alternatives,
|
||||
.disable_pmc = power5p_disable_pmc,
|
||||
.limited_pmc_event = power5p_limited_pmc_event,
|
||||
.flags = PPMU_LIMITED_PMC5_6 | PPMU_HAS_SSLOT,
|
||||
.n_generic = ARRAY_SIZE(power5p_generic_events),
|
||||
.generic_events = power5p_generic_events,
|
||||
.cache_events = &power5p_cache_events,
|
||||
};
|
||||
|
||||
static int __init init_power5p_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
|
||||
&& strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&power5p_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_power5p_pmu);
|
630
arch/powerpc/perf/power5-pmu.c
Normal file
630
arch/powerpc/perf/power5-pmu.c
Normal file
|
@ -0,0 +1,630 @@
|
|||
/*
|
||||
* Performance counter support for POWER5 (not POWER5++) processors.
|
||||
*
|
||||
* Copyright 2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for POWER5 (not POWER5++)
|
||||
*/
|
||||
#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0xf
|
||||
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
|
||||
#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_BYTE_SH 12 /* Byte number of event bus to use */
|
||||
#define PM_BYTE_MSK 7
|
||||
#define PM_GRS_SH 8 /* Storage subsystem mux select */
|
||||
#define PM_GRS_MSK 7
|
||||
#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
|
||||
#define PM_PMCSEL_MSK 0x7f
|
||||
|
||||
/* Values in PM_UNIT field */
|
||||
#define PM_FPU 0
|
||||
#define PM_ISU0 1
|
||||
#define PM_IFU 2
|
||||
#define PM_ISU1 3
|
||||
#define PM_IDU 4
|
||||
#define PM_ISU0_ALT 6
|
||||
#define PM_GRS 7
|
||||
#define PM_LSU0 8
|
||||
#define PM_LSU1 0xc
|
||||
#define PM_LASTUNIT 0xc
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for POWER5
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 62
|
||||
#define MMCR1_TTM1SEL_SH 60
|
||||
#define MMCR1_TTM2SEL_SH 58
|
||||
#define MMCR1_TTM3SEL_SH 56
|
||||
#define MMCR1_TTMSEL_MSK 3
|
||||
#define MMCR1_TD_CP_DBG0SEL_SH 54
|
||||
#define MMCR1_TD_CP_DBG1SEL_SH 52
|
||||
#define MMCR1_TD_CP_DBG2SEL_SH 50
|
||||
#define MMCR1_TD_CP_DBG3SEL_SH 48
|
||||
#define MMCR1_GRS_L2SEL_SH 46
|
||||
#define MMCR1_GRS_L2SEL_MSK 3
|
||||
#define MMCR1_GRS_L3SEL_SH 44
|
||||
#define MMCR1_GRS_L3SEL_MSK 3
|
||||
#define MMCR1_GRS_MCSEL_SH 41
|
||||
#define MMCR1_GRS_MCSEL_MSK 7
|
||||
#define MMCR1_GRS_FABSEL_SH 39
|
||||
#define MMCR1_GRS_FABSEL_MSK 3
|
||||
#define MMCR1_PMC1_ADDER_SEL_SH 35
|
||||
#define MMCR1_PMC2_ADDER_SEL_SH 34
|
||||
#define MMCR1_PMC3_ADDER_SEL_SH 33
|
||||
#define MMCR1_PMC4_ADDER_SEL_SH 32
|
||||
#define MMCR1_PMC1SEL_SH 25
|
||||
#define MMCR1_PMC2SEL_SH 17
|
||||
#define MMCR1_PMC3SEL_SH 9
|
||||
#define MMCR1_PMC4SEL_SH 1
|
||||
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
|
||||
#define MMCR1_PMCSEL_MSK 0x7f
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 6666555555555544444444443333333333222222222211111111110000000000
|
||||
* 3210987654321098765432109876543210987654321098765432109876543210
|
||||
* <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><>
|
||||
* T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1
|
||||
*
|
||||
* T0 - TTM0 constraint
|
||||
* 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000
|
||||
*
|
||||
* T1 - TTM1 constraint
|
||||
* 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000
|
||||
*
|
||||
* NC - number of counters
|
||||
* 51: NC error 0x0008_0000_0000_0000
|
||||
* 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
|
||||
*
|
||||
* G0..G3 - GRS mux constraints
|
||||
* 46-47: GRS_L2SEL value
|
||||
* 44-45: GRS_L3SEL value
|
||||
* 41-44: GRS_MCSEL value
|
||||
* 39-40: GRS_FABSEL value
|
||||
* Note that these match up with their bit positions in MMCR1
|
||||
*
|
||||
* UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
|
||||
* 37: UC3 error 0x20_0000_0000
|
||||
* 36: FPU|IFU|ISU1 events needed 0x10_0000_0000
|
||||
* 35: ISU0 events needed 0x08_0000_0000
|
||||
* 34: IDU|GRS events needed 0x04_0000_0000
|
||||
*
|
||||
* PS1
|
||||
* 33: PS1 error 0x2_0000_0000
|
||||
* 31-32: count of events needing PMC1/2 0x1_8000_0000
|
||||
*
|
||||
* PS2
|
||||
* 30: PS2 error 0x4000_0000
|
||||
* 28-29: count of events needing PMC3/4 0x3000_0000
|
||||
*
|
||||
* B0
|
||||
* 24-27: Byte 0 event source 0x0f00_0000
|
||||
* Encoding as for the event code
|
||||
*
|
||||
* B1, B2, B3
|
||||
* 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
|
||||
*
|
||||
* P1..P6
|
||||
* 0-11: Count of events needing PMC1..PMC6
|
||||
*/
|
||||
|
||||
static const int grsel_shift[8] = {
|
||||
MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
|
||||
MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
|
||||
MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
|
||||
};
|
||||
|
||||
/* Masks and values for using events from the various units */
|
||||
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
|
||||
[PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
|
||||
[PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
|
||||
[PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
|
||||
[PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
|
||||
[PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
|
||||
[PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
|
||||
};
|
||||
|
||||
static int power5_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, byte, unit, sh;
|
||||
int bit, fmask;
|
||||
unsigned long mask = 0, value = 0;
|
||||
int grp = -1;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
if (pmc <= 4)
|
||||
grp = (pmc - 1) >> 1;
|
||||
else if (event != 0x500009 && event != 0x600005)
|
||||
return -1;
|
||||
}
|
||||
if (event & PM_BUSEVENT_MSK) {
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
if (unit == PM_ISU0_ALT)
|
||||
unit = PM_ISU0;
|
||||
mask |= unit_cons[unit][0];
|
||||
value |= unit_cons[unit][1];
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (byte >= 4) {
|
||||
if (unit != PM_LSU1)
|
||||
return -1;
|
||||
/* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
|
||||
++unit;
|
||||
byte &= 3;
|
||||
}
|
||||
if (unit == PM_GRS) {
|
||||
bit = event & 7;
|
||||
fmask = (bit == 6)? 7: 3;
|
||||
sh = grsel_shift[bit];
|
||||
mask |= (unsigned long)fmask << sh;
|
||||
value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
|
||||
<< sh;
|
||||
}
|
||||
/*
|
||||
* Bus events on bytes 0 and 2 can be counted
|
||||
* on PMC1/2; bytes 1 and 3 on PMC3/4.
|
||||
*/
|
||||
if (!pmc)
|
||||
grp = byte & 1;
|
||||
/* Set byte lane select field */
|
||||
mask |= 0xfUL << (24 - 4 * byte);
|
||||
value |= (unsigned long)unit << (24 - 4 * byte);
|
||||
}
|
||||
if (grp == 0) {
|
||||
/* increment PMC1/2 field */
|
||||
mask |= 0x200000000ul;
|
||||
value |= 0x080000000ul;
|
||||
} else if (grp == 1) {
|
||||
/* increment PMC3/4 field */
|
||||
mask |= 0x40000000ul;
|
||||
value |= 0x10000000ul;
|
||||
}
|
||||
if (pmc < 5) {
|
||||
/* need a counter from PMC1-4 set */
|
||||
mask |= 0x8000000000000ul;
|
||||
value |= 0x1000000000000ul;
|
||||
}
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_ALT 3 /* at most 3 alternatives for any event */
|
||||
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
|
||||
{ 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
|
||||
{ 0x100005, 0x600005 }, /* PM_RUN_CYC */
|
||||
{ 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */
|
||||
{ 0x300009, 0x400009 }, /* PM_INST_DISP */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scan the alternatives table for a match and return the
|
||||
* index into the alternatives table if found, else -1.
|
||||
*/
|
||||
static int find_alternative(u64 event)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
break;
|
||||
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
|
||||
if (event == event_alternatives[i][j])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static const unsigned char bytedecode_alternatives[4][4] = {
|
||||
/* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
|
||||
/* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
|
||||
/* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
|
||||
/* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
|
||||
};
|
||||
|
||||
/*
|
||||
* Some direct events for decodes of event bus byte 3 have alternative
|
||||
* PMCSEL values on other counters. This returns the alternative
|
||||
* event code for those that do, or -1 otherwise.
|
||||
*/
|
||||
static s64 find_alternative_bdecode(u64 event)
|
||||
{
|
||||
int pmc, altpmc, pp, j;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc == 0 || pmc > 4)
|
||||
return -1;
|
||||
altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
|
||||
pp = event & PM_PMCSEL_MSK;
|
||||
for (j = 0; j < 4; ++j) {
|
||||
if (bytedecode_alternatives[pmc - 1][j] == pp) {
|
||||
return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
|
||||
(altpmc << PM_PMC_SH) |
|
||||
bytedecode_alternatives[altpmc - 1][j];
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, nalt = 1;
|
||||
s64 ae;
|
||||
|
||||
alt[0] = event;
|
||||
nalt = 1;
|
||||
i = find_alternative(event);
|
||||
if (i >= 0) {
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
ae = event_alternatives[i][j];
|
||||
if (ae && ae != event)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
} else {
|
||||
ae = find_alternative_bdecode(event);
|
||||
if (ae > 0)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
return nalt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map of which direct events on which PMCs are marked instruction events.
|
||||
* Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
|
||||
* Bit 0 is set if it is marked for all PMCs.
|
||||
* The 0x80 bit indicates a byte decode PMCSEL value.
|
||||
*/
|
||||
static unsigned char direct_event_is_marked[0x28] = {
|
||||
0, /* 00 */
|
||||
0x1f, /* 01 PM_IOPS_CMPL */
|
||||
0x2, /* 02 PM_MRK_GRP_DISP */
|
||||
0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
|
||||
0, /* 04 */
|
||||
0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
|
||||
0x80, /* 06 */
|
||||
0x80, /* 07 */
|
||||
0, 0, 0,/* 08 - 0a */
|
||||
0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
|
||||
0, /* 0c */
|
||||
0x80, /* 0d */
|
||||
0x80, /* 0e */
|
||||
0, /* 0f */
|
||||
0, /* 10 */
|
||||
0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
|
||||
0, /* 12 */
|
||||
0x10, /* 13 PM_MRK_GRP_CMPL */
|
||||
0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
|
||||
0x2, /* 15 PM_MRK_GRP_ISSUED */
|
||||
0x80, /* 16 */
|
||||
0x80, /* 17 */
|
||||
0, 0, 0, 0, 0,
|
||||
0x80, /* 1d */
|
||||
0x80, /* 1e */
|
||||
0, /* 1f */
|
||||
0x80, /* 20 */
|
||||
0x80, /* 21 */
|
||||
0x80, /* 22 */
|
||||
0x80, /* 23 */
|
||||
0x80, /* 24 */
|
||||
0x80, /* 25 */
|
||||
0x80, /* 26 */
|
||||
0x80, /* 27 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int power5_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel;
|
||||
int bit, byte, unit;
|
||||
u32 mask;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = event & PM_PMCSEL_MSK;
|
||||
if (pmc >= 5)
|
||||
return 0;
|
||||
|
||||
bit = -1;
|
||||
if (psel < sizeof(direct_event_is_marked)) {
|
||||
if (direct_event_is_marked[psel] & (1 << pmc))
|
||||
return 1;
|
||||
if (direct_event_is_marked[psel] & 0x80)
|
||||
bit = 4;
|
||||
else if (psel == 0x08)
|
||||
bit = pmc - 1;
|
||||
else if (psel == 0x10)
|
||||
bit = 4 - pmc;
|
||||
else if (psel == 0x1b && (pmc == 1 || pmc == 3))
|
||||
bit = 4;
|
||||
} else if ((psel & 0x58) == 0x40)
|
||||
bit = psel & 7;
|
||||
|
||||
if (!(event & PM_BUSEVENT_MSK))
|
||||
return 0;
|
||||
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit == PM_LSU0) {
|
||||
/* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
|
||||
mask = 0x5dff00;
|
||||
} else if (unit == PM_LSU1 && byte >= 4) {
|
||||
byte -= 4;
|
||||
/* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */
|
||||
mask = 0x5f00c0aa;
|
||||
} else
|
||||
return 0;
|
||||
|
||||
return (mask >> (byte * 8 + bit)) & 1;
|
||||
}
|
||||
|
||||
static int power5_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr1 = 0;
|
||||
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
|
||||
unsigned int pmc, unit, byte, psel;
|
||||
unsigned int ttm, grp;
|
||||
int i, isbus, bit, grsel;
|
||||
unsigned int pmc_inuse = 0;
|
||||
unsigned int pmc_grp_use[2];
|
||||
unsigned char busbyte[4];
|
||||
unsigned char unituse[16];
|
||||
int ttmuse;
|
||||
|
||||
if (n_ev > 6)
|
||||
return -1;
|
||||
|
||||
/* First pass to count resource use */
|
||||
pmc_grp_use[0] = pmc_grp_use[1] = 0;
|
||||
memset(busbyte, 0, sizeof(busbyte));
|
||||
memset(unituse, 0, sizeof(unituse));
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
/* count 1/2 vs 3/4 use */
|
||||
if (pmc <= 4)
|
||||
++pmc_grp_use[(pmc - 1) >> 1];
|
||||
}
|
||||
if (event[i] & PM_BUSEVENT_MSK) {
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
if (unit == PM_ISU0_ALT)
|
||||
unit = PM_ISU0;
|
||||
if (byte >= 4) {
|
||||
if (unit != PM_LSU1)
|
||||
return -1;
|
||||
++unit;
|
||||
byte &= 3;
|
||||
}
|
||||
if (!pmc)
|
||||
++pmc_grp_use[byte & 1];
|
||||
if (busbyte[byte] && busbyte[byte] != unit)
|
||||
return -1;
|
||||
busbyte[byte] = unit;
|
||||
unituse[unit] = 1;
|
||||
}
|
||||
}
|
||||
if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Assign resources and set multiplexer selects.
|
||||
*
|
||||
* PM_ISU0 can go either on TTM0 or TTM1, but that's the only
|
||||
* choice we have to deal with.
|
||||
*/
|
||||
if (unituse[PM_ISU0] &
|
||||
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
|
||||
unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
|
||||
unituse[PM_ISU0] = 0;
|
||||
}
|
||||
/* Set TTM[01]SEL fields. */
|
||||
ttmuse = 0;
|
||||
for (i = PM_FPU; i <= PM_ISU1; ++i) {
|
||||
if (!unituse[i])
|
||||
continue;
|
||||
if (ttmuse++)
|
||||
return -1;
|
||||
mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
|
||||
}
|
||||
ttmuse = 0;
|
||||
for (; i <= PM_GRS; ++i) {
|
||||
if (!unituse[i])
|
||||
continue;
|
||||
if (ttmuse++)
|
||||
return -1;
|
||||
mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
|
||||
}
|
||||
if (ttmuse > 1)
|
||||
return -1;
|
||||
|
||||
/* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
|
||||
for (byte = 0; byte < 4; ++byte) {
|
||||
unit = busbyte[byte];
|
||||
if (!unit)
|
||||
continue;
|
||||
if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
|
||||
/* get ISU0 through TTM1 rather than TTM0 */
|
||||
unit = PM_ISU0_ALT;
|
||||
} else if (unit == PM_LSU1 + 1) {
|
||||
/* select lower word of LSU1 for this byte */
|
||||
mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
|
||||
}
|
||||
ttm = unit >> 2;
|
||||
mmcr1 |= (unsigned long)ttm
|
||||
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
|
||||
}
|
||||
|
||||
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
psel = event[i] & PM_PMCSEL_MSK;
|
||||
isbus = event[i] & PM_BUSEVENT_MSK;
|
||||
if (!pmc) {
|
||||
/* Bus event or any-PMC direct event */
|
||||
for (pmc = 0; pmc < 4; ++pmc) {
|
||||
if (pmc_inuse & (1 << pmc))
|
||||
continue;
|
||||
grp = (pmc >> 1) & 1;
|
||||
if (isbus) {
|
||||
if (grp == (byte & 1))
|
||||
break;
|
||||
} else if (pmc_grp_use[grp] < 2) {
|
||||
++pmc_grp_use[grp];
|
||||
break;
|
||||
}
|
||||
}
|
||||
pmc_inuse |= 1 << pmc;
|
||||
} else if (pmc <= 4) {
|
||||
/* Direct event */
|
||||
--pmc;
|
||||
if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
|
||||
/* add events on higher-numbered bus */
|
||||
mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
|
||||
} else {
|
||||
/* Instructions or run cycles on PMC5/6 */
|
||||
--pmc;
|
||||
}
|
||||
if (isbus && unit == PM_GRS) {
|
||||
bit = psel & 7;
|
||||
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
|
||||
mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
|
||||
}
|
||||
if (power5_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
if (pmc <= 3)
|
||||
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
|
||||
hwc[i] = pmc;
|
||||
}
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = 0;
|
||||
if (pmc_inuse & 1)
|
||||
mmcr[0] = MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0x3e)
|
||||
mmcr[0] |= MMCR0_PMCjCE;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
if (pmc <= 3)
|
||||
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
|
||||
}
|
||||
|
||||
static int power5_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0xf,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x4c1090, 0x3c1088 },
|
||||
[C(OP_WRITE)] = { 0x3c1090, 0xc10c3 },
|
||||
[C(OP_PREFETCH)] = { 0xc70e7, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x3c309b },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0xc50c3, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x2c4090, 0x800c4 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x800c0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x230e4, 0x230e5 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static struct power_pmu power5_pmu = {
|
||||
.name = "POWER5",
|
||||
.n_counter = 6,
|
||||
.max_alternatives = MAX_ALT,
|
||||
.add_fields = 0x7000090000555ul,
|
||||
.test_adder = 0x3000490000000ul,
|
||||
.compute_mmcr = power5_compute_mmcr,
|
||||
.get_constraint = power5_get_constraint,
|
||||
.get_alternatives = power5_get_alternatives,
|
||||
.disable_pmc = power5_disable_pmc,
|
||||
.n_generic = ARRAY_SIZE(power5_generic_events),
|
||||
.generic_events = power5_generic_events,
|
||||
.cache_events = &power5_cache_events,
|
||||
.flags = PPMU_HAS_SSLOT,
|
||||
};
|
||||
|
||||
static int __init init_power5_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&power5_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_power5_pmu);
|
552
arch/powerpc/perf/power6-pmu.c
Normal file
552
arch/powerpc/perf/power6-pmu.c
Normal file
|
@ -0,0 +1,552 @@
|
|||
/*
|
||||
* Performance counter support for POWER6 processors.
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for POWER6
|
||||
*/
|
||||
#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0x7
|
||||
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
|
||||
#define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH)
|
||||
#define PM_LLAV 0x8000 /* Load lookahead match value */
|
||||
#define PM_LLA 0x4000 /* Load lookahead match enable */
|
||||
#define PM_BYTE_SH 12 /* Byte of event bus to use */
|
||||
#define PM_BYTE_MSK 3
|
||||
#define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
|
||||
#define PM_SUBUNIT_MSK 7
|
||||
#define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH)
|
||||
#define PM_PMCSEL_MSK 0xff /* PMCxSEL value */
|
||||
#define PM_BUSEVENT_MSK 0xf3700
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for POWER6
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 60
|
||||
#define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4)
|
||||
#define MMCR1_TTMSEL_MSK 0xf
|
||||
#define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK)
|
||||
#define MMCR1_NESTSEL_SH 45
|
||||
#define MMCR1_NESTSEL_MSK 0x7
|
||||
#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
|
||||
#define MMCR1_PMC1_LLA (1ul << 44)
|
||||
#define MMCR1_PMC1_LLA_VALUE (1ul << 39)
|
||||
#define MMCR1_PMC1_ADDR_SEL (1ul << 35)
|
||||
#define MMCR1_PMC1SEL_SH 24
|
||||
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
|
||||
#define MMCR1_PMCSEL_MSK 0xff
|
||||
|
||||
/*
|
||||
* Map of which direct events on which PMCs are marked instruction events.
|
||||
* Indexed by PMCSEL value >> 1.
|
||||
* Bottom 4 bits are a map of which PMCs are interesting,
|
||||
* top 4 bits say what sort of event:
|
||||
* 0 = direct marked event,
|
||||
* 1 = byte decode event,
|
||||
* 4 = add/and event (PMC1 -> bits 0 & 4),
|
||||
* 5 = add/and event (PMC1 -> bits 1 & 5),
|
||||
* 6 = add/and event (PMC1 -> bits 2 & 6),
|
||||
* 7 = add/and event (PMC1 -> bits 3 & 7).
|
||||
*/
|
||||
static unsigned char direct_event_is_marked[0x60 >> 1] = {
|
||||
0, /* 00 */
|
||||
0, /* 02 */
|
||||
0, /* 04 */
|
||||
0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
|
||||
0x04, /* 08 PM_MRK_DFU_FIN */
|
||||
0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */
|
||||
0, /* 0c */
|
||||
0, /* 0e */
|
||||
0x02, /* 10 PM_MRK_INST_DISP */
|
||||
0x08, /* 12 PM_MRK_LSU_DERAT_MISS */
|
||||
0, /* 14 */
|
||||
0, /* 16 */
|
||||
0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */
|
||||
0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */
|
||||
0x01, /* 1c PM_MRK_INST_ISSUED */
|
||||
0, /* 1e */
|
||||
0, /* 20 */
|
||||
0, /* 22 */
|
||||
0, /* 24 */
|
||||
0, /* 26 */
|
||||
0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */
|
||||
0, /* 2a */
|
||||
0, /* 2c */
|
||||
0, /* 2e */
|
||||
0x4f, /* 30 */
|
||||
0x7f, /* 32 */
|
||||
0x4f, /* 34 */
|
||||
0x5f, /* 36 */
|
||||
0x6f, /* 38 */
|
||||
0x4f, /* 3a */
|
||||
0, /* 3c */
|
||||
0x08, /* 3e PM_MRK_INST_TIMEO */
|
||||
0x1f, /* 40 */
|
||||
0x1f, /* 42 */
|
||||
0x1f, /* 44 */
|
||||
0x1f, /* 46 */
|
||||
0x1f, /* 48 */
|
||||
0x1f, /* 4a */
|
||||
0x1f, /* 4c */
|
||||
0x1f, /* 4e */
|
||||
0, /* 50 */
|
||||
0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */
|
||||
0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */
|
||||
0x02, /* 56 PM_MRK_LD_MISS_L1 */
|
||||
0, /* 58 */
|
||||
0, /* 5a */
|
||||
0, /* 5c */
|
||||
0, /* 5e */
|
||||
};
|
||||
|
||||
/*
|
||||
* Masks showing for each unit which bits are marked events.
|
||||
* These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0.
|
||||
*/
|
||||
static u32 marked_bus_events[16] = {
|
||||
0x01000000, /* direct events set 1: byte 3 bit 0 */
|
||||
0x00010000, /* direct events set 2: byte 2 bit 0 */
|
||||
0, 0, 0, 0, /* IDU, IFU, nest: nothing */
|
||||
0x00000088, /* VMX set 1: byte 0 bits 3, 7 */
|
||||
0x000000c0, /* VMX set 2: byte 0 bits 4-7 */
|
||||
0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */
|
||||
0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */
|
||||
0, /* LSU set 3 */
|
||||
0x00000010, /* VMX set 3: byte 0 bit 4 */
|
||||
0, /* BFP set 1 */
|
||||
0x00000022, /* BFP set 2: byte 0 bits 1, 5 */
|
||||
0, 0
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int power6_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel, ptype;
|
||||
int bit, byte, unit;
|
||||
u32 mask;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */
|
||||
if (pmc >= 5)
|
||||
return 0;
|
||||
|
||||
bit = -1;
|
||||
if (psel < sizeof(direct_event_is_marked)) {
|
||||
ptype = direct_event_is_marked[psel];
|
||||
if (pmc == 0 || !(ptype & (1 << (pmc - 1))))
|
||||
return 0;
|
||||
ptype >>= 4;
|
||||
if (ptype == 0)
|
||||
return 1;
|
||||
if (ptype == 1)
|
||||
bit = 0;
|
||||
else
|
||||
bit = ptype ^ (pmc - 1);
|
||||
} else if ((psel & 0x48) == 0x40)
|
||||
bit = psel & 7;
|
||||
|
||||
if (!(event & PM_BUSEVENT_MSK) || bit == -1)
|
||||
return 0;
|
||||
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
mask = marked_bus_events[unit];
|
||||
return (mask >> (byte * 8 + bit)) & 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign PMC numbers and compute MMCR1 value for a set of events
|
||||
*/
|
||||
static int p6_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr1 = 0;
|
||||
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
|
||||
int i;
|
||||
unsigned int pmc, ev, b, u, s, psel;
|
||||
unsigned int ttmset = 0;
|
||||
unsigned int pmc_inuse = 0;
|
||||
|
||||
if (n_ev > 6)
|
||||
return -1;
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1; /* collision! */
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
ev = event[i];
|
||||
pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
--pmc;
|
||||
} else {
|
||||
/* can go on any PMC; find a free one */
|
||||
for (pmc = 0; pmc < 4; ++pmc)
|
||||
if (!(pmc_inuse & (1 << pmc)))
|
||||
break;
|
||||
if (pmc >= 4)
|
||||
return -1;
|
||||
pmc_inuse |= 1 << pmc;
|
||||
}
|
||||
hwc[i] = pmc;
|
||||
psel = ev & PM_PMCSEL_MSK;
|
||||
if (ev & PM_BUSEVENT_MSK) {
|
||||
/* this event uses the event bus */
|
||||
b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
/* check for conflict on this byte of event bus */
|
||||
if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
|
||||
return -1;
|
||||
mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
|
||||
ttmset |= 1 << b;
|
||||
if (u == 5) {
|
||||
/* Nest events have a further mux */
|
||||
s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
|
||||
if ((ttmset & 0x10) &&
|
||||
MMCR1_NESTSEL(mmcr1) != s)
|
||||
return -1;
|
||||
ttmset |= 0x10;
|
||||
mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
|
||||
}
|
||||
if (0x30 <= psel && psel <= 0x3d) {
|
||||
/* these need the PMCx_ADDR_SEL bits */
|
||||
if (b >= 2)
|
||||
mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
|
||||
}
|
||||
/* bus select values are different for PMC3/4 */
|
||||
if (pmc >= 2 && (psel & 0x90) == 0x80)
|
||||
psel ^= 0x20;
|
||||
}
|
||||
if (ev & PM_LLA) {
|
||||
mmcr1 |= MMCR1_PMC1_LLA >> pmc;
|
||||
if (ev & PM_LLAV)
|
||||
mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
|
||||
}
|
||||
if (power6_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
if (pmc < 4)
|
||||
mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
|
||||
}
|
||||
mmcr[0] = 0;
|
||||
if (pmc_inuse & 1)
|
||||
mmcr[0] = MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0xe)
|
||||
mmcr[0] |= MMCR0_PMCjCE;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
*
|
||||
* 0-1 add field: number of uses of PMC1 (max 1)
|
||||
* 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6
|
||||
* 12-15 add field: number of uses of PMC1-4 (max 4)
|
||||
* 16-19 select field: unit on byte 0 of event bus
|
||||
* 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
|
||||
* 32-34 select field: nest (subunit) event selector
|
||||
*/
|
||||
static int p6_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, byte, sh, subunit;
|
||||
unsigned long mask = 0, value = 0;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 4 && !(event == 0x500009 || event == 0x600005))
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
}
|
||||
if (event & PM_BUSEVENT_MSK) {
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
sh = byte * 4 + (16 - PM_UNIT_SH);
|
||||
mask |= PM_UNIT_MSKS << sh;
|
||||
value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
|
||||
if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
|
||||
subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
|
||||
mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
|
||||
value |= (unsigned long)subunit << 32;
|
||||
}
|
||||
}
|
||||
if (pmc <= 4) {
|
||||
mask |= 0x8000; /* add field for count of PMC1-4 uses */
|
||||
value |= 0x1000;
|
||||
}
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int p6_limited_pmc_event(u64 event)
|
||||
{
|
||||
int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
|
||||
return pmc == 5 || pmc == 6;
|
||||
}
|
||||
|
||||
#define MAX_ALT 4 /* at most 4 alternatives for any event */
|
||||
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */
|
||||
{ 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */
|
||||
{ 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */
|
||||
{ 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */
|
||||
{ 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */
|
||||
{ 0x10000e, 0x400010 }, /* PM_PURR */
|
||||
{ 0x100010, 0x4000f8 }, /* PM_FLUSH */
|
||||
{ 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */
|
||||
{ 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */
|
||||
{ 0x100054, 0x2000f0 }, /* PM_ST_FIN */
|
||||
{ 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */
|
||||
{ 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */
|
||||
{ 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */
|
||||
{ 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */
|
||||
{ 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */
|
||||
{ 0x200012, 0x300012 }, /* PM_INST_DISP */
|
||||
{ 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */
|
||||
{ 0x2000f8, 0x300010 }, /* PM_EXT_INT */
|
||||
{ 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */
|
||||
{ 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */
|
||||
{ 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */
|
||||
{ 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */
|
||||
{ 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
|
||||
};
|
||||
|
||||
/*
|
||||
* This could be made more efficient with a binary search on
|
||||
* a presorted list, if necessary
|
||||
*/
|
||||
static int find_alternatives_list(u64 event)
|
||||
{
|
||||
int i, j;
|
||||
unsigned int alt;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
return -1;
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
alt = event_alternatives[i][j];
|
||||
if (!alt || event < alt)
|
||||
break;
|
||||
if (event == alt)
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, nlim;
|
||||
unsigned int psel, pmc;
|
||||
unsigned int nalt = 1;
|
||||
u64 aevent;
|
||||
|
||||
alt[0] = event;
|
||||
nlim = p6_limited_pmc_event(event);
|
||||
|
||||
/* check the alternatives table */
|
||||
i = find_alternatives_list(event);
|
||||
if (i >= 0) {
|
||||
/* copy out alternatives from list */
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
aevent = event_alternatives[i][j];
|
||||
if (!aevent)
|
||||
break;
|
||||
if (aevent != event)
|
||||
alt[nalt++] = aevent;
|
||||
nlim += p6_limited_pmc_event(aevent);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Check for alternative ways of computing sum events */
|
||||
/* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */
|
||||
psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc && (psel == 0x32 || psel == 0x34))
|
||||
alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) |
|
||||
((5 - pmc) << PM_PMC_SH);
|
||||
|
||||
/* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */
|
||||
if (pmc && (psel == 0x38 || psel == 0x3a))
|
||||
alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) |
|
||||
((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
|
||||
}
|
||||
|
||||
if (flags & PPMU_ONLY_COUNT_RUN) {
|
||||
/*
|
||||
* We're only counting in RUN state,
|
||||
* so PM_CYC is equivalent to PM_RUN_CYC,
|
||||
* PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR.
|
||||
* This doesn't include alternatives that don't provide
|
||||
* any extra flexibility in assigning PMCs (e.g.
|
||||
* 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC).
|
||||
* Note that even with these additional alternatives
|
||||
* we never end up with more than 4 alternatives for any event.
|
||||
*/
|
||||
j = nalt;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
switch (alt[i]) {
|
||||
case 0x1e: /* PM_CYC */
|
||||
alt[j++] = 0x600005; /* PM_RUN_CYC */
|
||||
++nlim;
|
||||
break;
|
||||
case 0x10000a: /* PM_RUN_CYC */
|
||||
alt[j++] = 0x1e; /* PM_CYC */
|
||||
break;
|
||||
case 2: /* PM_INST_CMPL */
|
||||
alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
|
||||
++nlim;
|
||||
break;
|
||||
case 0x500009: /* PM_RUN_INST_CMPL */
|
||||
alt[j++] = 2; /* PM_INST_CMPL */
|
||||
break;
|
||||
case 0x10000e: /* PM_PURR */
|
||||
alt[j++] = 0x4000f4; /* PM_RUN_PURR */
|
||||
break;
|
||||
case 0x4000f4: /* PM_RUN_PURR */
|
||||
alt[j++] = 0x10000e; /* PM_PURR */
|
||||
break;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
}
|
||||
|
||||
if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
|
||||
/* remove the limited PMC events */
|
||||
j = 0;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
if (!p6_limited_pmc_event(alt[i])) {
|
||||
alt[j] = alt[i];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
} else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
|
||||
/* remove all but the limited PMC events */
|
||||
j = 0;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
if (p6_limited_pmc_event(alt[i])) {
|
||||
alt[j] = alt[i];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
}
|
||||
|
||||
return nalt;
|
||||
}
|
||||
|
||||
static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
/* Set PMCxSEL to 0 to disable PMCx */
|
||||
if (pmc <= 3)
|
||||
mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
|
||||
}
|
||||
|
||||
static int power6_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
* The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
|
||||
*/
|
||||
static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x280030, 0x80080 },
|
||||
[C(OP_WRITE)] = { 0x180032, 0x80088 },
|
||||
[C(OP_PREFETCH)] = { 0x810a4, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x100056 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0x4008c, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x150730, 0x250532 },
|
||||
[C(OP_WRITE)] = { 0x250432, 0x150432 },
|
||||
[C(OP_PREFETCH)] = { 0x810a6, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x20000e },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x420ce },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x430e6, 0x400052 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static struct power_pmu power6_pmu = {
|
||||
.name = "POWER6",
|
||||
.n_counter = 6,
|
||||
.max_alternatives = MAX_ALT,
|
||||
.add_fields = 0x1555,
|
||||
.test_adder = 0x3000,
|
||||
.compute_mmcr = p6_compute_mmcr,
|
||||
.get_constraint = p6_get_constraint,
|
||||
.get_alternatives = p6_get_alternatives,
|
||||
.disable_pmc = p6_disable_pmc,
|
||||
.limited_pmc_event = p6_limited_pmc_event,
|
||||
.flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
|
||||
.n_generic = ARRAY_SIZE(power6_generic_events),
|
||||
.generic_events = power6_generic_events,
|
||||
.cache_events = &power6_cache_events,
|
||||
};
|
||||
|
||||
static int __init init_power6_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&power6_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_power6_pmu);
|
558
arch/powerpc/perf/power7-events-list.h
Normal file
558
arch/powerpc/perf/power7-events-list.h
Normal file
|
@ -0,0 +1,558 @@
|
|||
/*
|
||||
* Performance counter support for POWER7 processors.
|
||||
*
|
||||
* Copyright 2013 Runzhen Wang, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898)
|
||||
EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0)
|
||||
EVENT(PM_PMC2_SAVED, 0x10022)
|
||||
EVENT(PM_CMPLU_STALL_DFU, 0x2003c)
|
||||
EVENT(PM_VSU0_16FLOP, 0x0a0a4)
|
||||
EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a)
|
||||
EVENT(PM_MRK_ST_CMPL, 0x10034)
|
||||
EVENT(PM_NEST_PAIR3_ADD, 0x40881)
|
||||
EVENT(PM_L2_ST_DISP, 0x46180)
|
||||
EVENT(PM_L2_CASTOUT_MOD, 0x16180)
|
||||
EVENT(PM_ISEG, 0x020a4)
|
||||
EVENT(PM_MRK_INST_TIMEO, 0x40034)
|
||||
EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282)
|
||||
EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6)
|
||||
EVENT(PM_IERAT_WR_64K, 0x040be)
|
||||
EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e)
|
||||
EVENT(PM_IERAT_MISS, 0x100f6)
|
||||
EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052)
|
||||
EVENT(PM_FLOP, 0x100f4)
|
||||
EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4)
|
||||
EVENT(PM_BR_PRED_TA, 0x040aa)
|
||||
EVENT(PM_CMPLU_STALL_FXU, 0x20014)
|
||||
EVENT(PM_EXT_INT, 0x200f8)
|
||||
EVENT(PM_VSU_FSQRT_FDIV, 0x0a888)
|
||||
EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e)
|
||||
EVENT(PM_LSU1_LDF, 0x0c086)
|
||||
EVENT(PM_IC_WRITE_ALL, 0x0488c)
|
||||
EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0)
|
||||
EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052)
|
||||
EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e)
|
||||
EVENT(PM_DATA_FROM_L21_MOD, 0x3c046)
|
||||
EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a)
|
||||
EVENT(PM_VSU0_8FLOP, 0x0a0a0)
|
||||
EVENT(PM_POWER_EVENT1, 0x1006e)
|
||||
EVENT(PM_DISP_CLB_HELD_BAL, 0x02092)
|
||||
EVENT(PM_VSU1_2FLOP, 0x0a09a)
|
||||
EVENT(PM_LWSYNC_HELD, 0x0209a)
|
||||
EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054)
|
||||
EVENT(PM_INST_FROM_L21_MOD, 0x34046)
|
||||
EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc)
|
||||
EVENT(PM_IC_REQ_ALL, 0x04888)
|
||||
EVENT(PM_DSLB_MISS, 0x0d090)
|
||||
EVENT(PM_L3_MISS, 0x1f082)
|
||||
EVENT(PM_LSU0_L1_PREF, 0x0d0b8)
|
||||
EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884)
|
||||
EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be)
|
||||
EVENT(PM_L2_INST, 0x36080)
|
||||
EVENT(PM_VSU0_FRSP, 0x0a0b4)
|
||||
EVENT(PM_FLUSH_DISP, 0x02082)
|
||||
EVENT(PM_PTEG_FROM_L2MISS, 0x4c058)
|
||||
EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a)
|
||||
EVENT(PM_CMPLU_STALL_LSU, 0x20012)
|
||||
EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a)
|
||||
EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0)
|
||||
EVENT(PM_PTEG_FROM_LMEM, 0x4c052)
|
||||
EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c)
|
||||
EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c)
|
||||
EVENT(PM_MEM0_PREFETCH_DISP, 0x20083)
|
||||
EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f)
|
||||
EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c)
|
||||
EVENT(PM_VSU_FRSP, 0x0a8b4)
|
||||
EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046)
|
||||
EVENT(PM_PMC1_OVERFLOW, 0x20010)
|
||||
EVENT(PM_VSU0_SINGLE, 0x0a0a8)
|
||||
EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058)
|
||||
EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056)
|
||||
EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090)
|
||||
EVENT(PM_VSU1_FEST, 0x0a0ba)
|
||||
EVENT(PM_MRK_INST_DISP, 0x20030)
|
||||
EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096)
|
||||
EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6)
|
||||
EVENT(PM_INST_CMPL, 0x00002)
|
||||
EVENT(PM_FXU_IDLE, 0x1000e)
|
||||
EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0)
|
||||
EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c)
|
||||
EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c)
|
||||
EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6)
|
||||
EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056)
|
||||
EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042)
|
||||
EVENT(PM_SHL_CREATED, 0x05082)
|
||||
EVENT(PM_L2_ST_HIT, 0x46182)
|
||||
EVENT(PM_DATA_FROM_DMEM, 0x1c04a)
|
||||
EVENT(PM_L3_LD_MISS, 0x2f082)
|
||||
EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e)
|
||||
EVENT(PM_DISP_CLB_HELD_RES, 0x02094)
|
||||
EVENT(PM_L2_SN_SX_I_DONE, 0x36382)
|
||||
EVENT(PM_GRP_CMPL, 0x30004)
|
||||
EVENT(PM_STCX_CMPL, 0x0c098)
|
||||
EVENT(PM_VSU0_2FLOP, 0x0a098)
|
||||
EVENT(PM_L3_PREF_MISS, 0x3f082)
|
||||
EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096)
|
||||
EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064)
|
||||
EVENT(PM_L1_ICACHE_MISS, 0x200fc)
|
||||
EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be)
|
||||
EVENT(PM_LD_REF_L1_LSU0, 0x0c080)
|
||||
EVENT(PM_VSU0_FEST, 0x0a0b8)
|
||||
EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890)
|
||||
EVENT(PM_FREQ_UP, 0x4000c)
|
||||
EVENT(PM_DATA_FROM_LMEM, 0x3c04a)
|
||||
EVENT(PM_LSU1_LDX, 0x0c08a)
|
||||
EVENT(PM_PMC3_OVERFLOW, 0x40010)
|
||||
EVENT(PM_MRK_BR_MPRED, 0x30036)
|
||||
EVENT(PM_SHL_MATCH, 0x05086)
|
||||
EVENT(PM_MRK_BR_TAKEN, 0x10036)
|
||||
EVENT(PM_CMPLU_STALL_BRU, 0x4004e)
|
||||
EVENT(PM_ISLB_MISS, 0x0d092)
|
||||
EVENT(PM_CYC, 0x0001e)
|
||||
EVENT(PM_DISP_HELD_THERMAL, 0x30006)
|
||||
EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054)
|
||||
EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2)
|
||||
EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a)
|
||||
EVENT(PM_1PLUS_PPC_CMPL, 0x100f2)
|
||||
EVENT(PM_PTEG_FROM_DMEM, 0x2c052)
|
||||
EVENT(PM_VSU_2FLOP, 0x0a898)
|
||||
EVENT(PM_GCT_FULL_CYC, 0x04086)
|
||||
EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020)
|
||||
EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d)
|
||||
EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c)
|
||||
EVENT(PM_BR_MPRED_TA, 0x040ae)
|
||||
EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058)
|
||||
EVENT(PM_DPU_HELD_POWER, 0x20006)
|
||||
EVENT(PM_RUN_INST_CMPL, 0x400fa)
|
||||
EVENT(PM_MRK_VSU_FIN, 0x30032)
|
||||
EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c)
|
||||
EVENT(PM_GCT_EMPTY_CYC, 0x20008)
|
||||
EVENT(PM_IOPS_DISP, 0x30014)
|
||||
EVENT(PM_RUN_SPURR, 0x10008)
|
||||
EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056)
|
||||
EVENT(PM_VSU0_1FLOP, 0x0a080)
|
||||
EVENT(PM_SNOOP_TLBIE, 0x0d0b2)
|
||||
EVENT(PM_DATA_FROM_L3MISS, 0x2c048)
|
||||
EVENT(PM_VSU_SINGLE, 0x0a8a8)
|
||||
EVENT(PM_DTLB_MISS_16G, 0x1c05e)
|
||||
EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c)
|
||||
EVENT(PM_FLUSH, 0x400f8)
|
||||
EVENT(PM_L2_LD_HIT, 0x36182)
|
||||
EVENT(PM_NEST_PAIR2_AND, 0x30883)
|
||||
EVENT(PM_VSU1_1FLOP, 0x0a082)
|
||||
EVENT(PM_IC_PREF_REQ, 0x0408a)
|
||||
EVENT(PM_L3_LD_HIT, 0x2f080)
|
||||
EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a)
|
||||
EVENT(PM_DISP_HELD, 0x10006)
|
||||
EVENT(PM_L2_LD, 0x16080)
|
||||
EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc)
|
||||
EVENT(PM_BC_PLUS_8_CONV, 0x040b8)
|
||||
EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026)
|
||||
EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a)
|
||||
EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282)
|
||||
EVENT(PM_TB_BIT_TRANS, 0x300f8)
|
||||
EVENT(PM_THERMAL_MAX, 0x40006)
|
||||
EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2)
|
||||
EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae)
|
||||
EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f)
|
||||
EVENT(PM_L3_CO_L31, 0x4f080)
|
||||
EVENT(PM_POWER_EVENT4, 0x4006e)
|
||||
EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e)
|
||||
EVENT(PM_BR_UNCOND, 0x0409e)
|
||||
EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa)
|
||||
EVENT(PM_PMC4_REWIND, 0x10020)
|
||||
EVENT(PM_L2_RCLD_DISP, 0x16280)
|
||||
EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2)
|
||||
EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058)
|
||||
EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098)
|
||||
EVENT(PM_LSU_DERAT_MISS, 0x200f6)
|
||||
EVENT(PM_IC_PREF_CANCEL_L2, 0x04094)
|
||||
EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d)
|
||||
EVENT(PM_BR_PRED_CCACHE, 0x040a0)
|
||||
EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c)
|
||||
EVENT(PM_MRK_ST_CMPL_INT, 0x30034)
|
||||
EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6)
|
||||
EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048)
|
||||
EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
|
||||
EVENT(PM_LSU_SET_MPRED, 0x0c0a8)
|
||||
EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a)
|
||||
EVENT(PM_VSU1_FCONV, 0x0a0b2)
|
||||
EVENT(PM_DERAT_MISS_16G, 0x4c05c)
|
||||
EVENT(PM_INST_FROM_LMEM, 0x3404a)
|
||||
EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a)
|
||||
EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018)
|
||||
EVENT(PM_INST_PTEG_FROM_L2, 0x1e050)
|
||||
EVENT(PM_PTEG_FROM_L2, 0x1c050)
|
||||
EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024)
|
||||
EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a)
|
||||
EVENT(PM_VSU0_FPSCR, 0x0b09c)
|
||||
EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082)
|
||||
EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052)
|
||||
EVENT(PM_MEM0_RQ_DISP, 0x10083)
|
||||
EVENT(PM_L2_LD_MISS, 0x26080)
|
||||
EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0)
|
||||
EVENT(PM_L1_PREF, 0x0d8b8)
|
||||
EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c)
|
||||
EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c)
|
||||
EVENT(PM_PB_NODE_PUMP, 0x10081)
|
||||
EVENT(PM_SHL_MERGED, 0x05084)
|
||||
EVENT(PM_NEST_PAIR1_ADD, 0x20881)
|
||||
EVENT(PM_DATA_FROM_L3, 0x1c048)
|
||||
EVENT(PM_LSU_FLUSH, 0x0208e)
|
||||
EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097)
|
||||
EVENT(PM_PMC2_OVERFLOW, 0x30010)
|
||||
EVENT(PM_LSU_LDF, 0x0c884)
|
||||
EVENT(PM_POWER_EVENT3, 0x3006e)
|
||||
EVENT(PM_DISP_WT, 0x30008)
|
||||
EVENT(PM_CMPLU_STALL_REJECT, 0x40016)
|
||||
EVENT(PM_IC_BANK_CONFLICT, 0x04082)
|
||||
EVENT(PM_BR_MPRED_CR_TA, 0x048ae)
|
||||
EVENT(PM_L2_INST_MISS, 0x36082)
|
||||
EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018)
|
||||
EVENT(PM_NEST_PAIR2_ADD, 0x30881)
|
||||
EVENT(PM_MRK_LSU_FLUSH, 0x0d08c)
|
||||
EVENT(PM_L2_LDST, 0x16880)
|
||||
EVENT(PM_INST_FROM_L31_SHR, 0x1404e)
|
||||
EVENT(PM_VSU0_FIN, 0x0a0bc)
|
||||
EVENT(PM_LARX_LSU, 0x0c894)
|
||||
EVENT(PM_INST_FROM_RMEM, 0x34042)
|
||||
EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096)
|
||||
EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e)
|
||||
EVENT(PM_BR_PRED_CR, 0x040a8)
|
||||
EVENT(PM_LSU_REJECT, 0x10064)
|
||||
EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e)
|
||||
EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028)
|
||||
EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4)
|
||||
EVENT(PM_VSU_FEST, 0x0a8b8)
|
||||
EVENT(PM_NEST_PAIR0_AND, 0x10883)
|
||||
EVENT(PM_PTEG_FROM_L3, 0x2c050)
|
||||
EVENT(PM_POWER_EVENT2, 0x2006e)
|
||||
EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090)
|
||||
EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088)
|
||||
EVENT(PM_MRK_GRP_CMPL, 0x40030)
|
||||
EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088)
|
||||
EVENT(PM_GRP_DISP, 0x3000a)
|
||||
EVENT(PM_LSU0_LDX, 0x0c088)
|
||||
EVENT(PM_DATA_FROM_L2, 0x1c040)
|
||||
EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042)
|
||||
EVENT(PM_LD_REF_L1, 0x0c880)
|
||||
EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080)
|
||||
EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e)
|
||||
EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6)
|
||||
EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba)
|
||||
EVENT(PM_BR_MPRED_CR, 0x040ac)
|
||||
EVENT(PM_L3_CO_MEM, 0x4f082)
|
||||
EVENT(PM_LD_MISS_L1, 0x400f0)
|
||||
EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042)
|
||||
EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a)
|
||||
EVENT(PM_TABLEWALK_CYC, 0x10026)
|
||||
EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052)
|
||||
EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0)
|
||||
EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052)
|
||||
EVENT(PM_FXU0_FIN, 0x10004)
|
||||
EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e)
|
||||
EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054)
|
||||
EVENT(PM_PMC5_OVERFLOW, 0x10024)
|
||||
EVENT(PM_LD_REF_L1_LSU1, 0x0c082)
|
||||
EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056)
|
||||
EVENT(PM_CMPLU_STALL_THRD, 0x1001c)
|
||||
EVENT(PM_DATA_FROM_RMEM, 0x3c042)
|
||||
EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084)
|
||||
EVENT(PM_BR_MPRED_LSTACK, 0x040a6)
|
||||
EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028)
|
||||
EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4)
|
||||
EVENT(PM_LSU_NCST, 0x0c090)
|
||||
EVENT(PM_BR_TAKEN, 0x20004)
|
||||
EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052)
|
||||
EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c)
|
||||
EVENT(PM_DTLB_MISS_4K, 0x2c05a)
|
||||
EVENT(PM_PMC4_SAVED, 0x30022)
|
||||
EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092)
|
||||
EVENT(PM_SLB_MISS, 0x0d890)
|
||||
EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba)
|
||||
EVENT(PM_DTLB_MISS, 0x300fc)
|
||||
EVENT(PM_VSU1_FRSP, 0x0a0b6)
|
||||
EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880)
|
||||
EVENT(PM_L2_CASTOUT_SHR, 0x16182)
|
||||
EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044)
|
||||
EVENT(PM_VSU1_STF, 0x0b08e)
|
||||
EVENT(PM_ST_FIN, 0x200f0)
|
||||
EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056)
|
||||
EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480)
|
||||
EVENT(PM_MRK_STCX_FAIL, 0x0d08e)
|
||||
EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac)
|
||||
EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092)
|
||||
EVENT(PM_L3_PREF_BUSY, 0x4f080)
|
||||
EVENT(PM_MRK_BRU_FIN, 0x2003a)
|
||||
EVENT(PM_LSU1_NCLD, 0x0c08e)
|
||||
EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054)
|
||||
EVENT(PM_LSU_NCLD, 0x0c88c)
|
||||
EVENT(PM_LSU_LDX, 0x0c888)
|
||||
EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480)
|
||||
EVENT(PM_THRESH_TIMEO, 0x10038)
|
||||
EVENT(PM_L3_PREF_ST, 0x0d0ae)
|
||||
EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098)
|
||||
EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894)
|
||||
EVENT(PM_VSU1_SINGLE, 0x0a0aa)
|
||||
EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a)
|
||||
EVENT(PM_L2_RC_ST_DONE, 0x36380)
|
||||
EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056)
|
||||
EVENT(PM_LARX_LSU1, 0x0c096)
|
||||
EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042)
|
||||
EVENT(PM_DISP_CLB_HELD, 0x02090)
|
||||
EVENT(PM_DERAT_MISS_4K, 0x1c05c)
|
||||
EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282)
|
||||
EVENT(PM_SEG_EXCEPTION, 0x028a4)
|
||||
EVENT(PM_FLUSH_DISP_SB, 0x0208c)
|
||||
EVENT(PM_L2_DC_INV, 0x26182)
|
||||
EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054)
|
||||
EVENT(PM_DSEG, 0x020a6)
|
||||
EVENT(PM_BR_PRED_LSTACK, 0x040a2)
|
||||
EVENT(PM_VSU0_STF, 0x0b08c)
|
||||
EVENT(PM_LSU_FX_FIN, 0x10066)
|
||||
EVENT(PM_DERAT_MISS_16M, 0x3c05c)
|
||||
EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054)
|
||||
EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2)
|
||||
EVENT(PM_INST_FROM_L3, 0x14048)
|
||||
EVENT(PM_MRK_IFU_FIN, 0x3003a)
|
||||
EVENT(PM_ITLB_MISS, 0x400fc)
|
||||
EVENT(PM_VSU_STF, 0x0b88c)
|
||||
EVENT(PM_LSU_FLUSH_UST, 0x0c8b4)
|
||||
EVENT(PM_L2_LDST_MISS, 0x26880)
|
||||
EVENT(PM_FXU1_FIN, 0x40004)
|
||||
EVENT(PM_SHL_DEALLOCATED, 0x05080)
|
||||
EVENT(PM_L2_SN_M_WR_DONE, 0x46382)
|
||||
EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8)
|
||||
EVENT(PM_L3_PREF_LD, 0x0d0ac)
|
||||
EVENT(PM_L2_SN_M_RD_DONE, 0x46380)
|
||||
EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c)
|
||||
EVENT(PM_VSU_FCONV, 0x0a8b0)
|
||||
EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa)
|
||||
EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4)
|
||||
EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082)
|
||||
EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e)
|
||||
EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020)
|
||||
EVENT(PM_INST_IMC_MATCH_DISP, 0x30016)
|
||||
EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c)
|
||||
EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094)
|
||||
EVENT(PM_CMPLU_STALL_DIV, 0x40014)
|
||||
EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054)
|
||||
EVENT(PM_VSU_FMA_DOUBLE, 0x0a890)
|
||||
EVENT(PM_VSU_4FLOP, 0x0a89c)
|
||||
EVENT(PM_VSU1_FIN, 0x0a0be)
|
||||
EVENT(PM_NEST_PAIR1_AND, 0x20883)
|
||||
EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052)
|
||||
EVENT(PM_RUN_CYC, 0x200f4)
|
||||
EVENT(PM_PTEG_FROM_RMEM, 0x3c052)
|
||||
EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e)
|
||||
EVENT(PM_LSU0_LDF, 0x0c084)
|
||||
EVENT(PM_FLUSH_COMPLETION, 0x30012)
|
||||
EVENT(PM_ST_MISS_L1, 0x300f0)
|
||||
EVENT(PM_L2_NODE_PUMP, 0x36480)
|
||||
EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044)
|
||||
EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e)
|
||||
EVENT(PM_VSU1_DENORM, 0x0a0ae)
|
||||
EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026)
|
||||
EVENT(PM_NEST_PAIR0_ADD, 0x10881)
|
||||
EVENT(PM_INST_FROM_L3MISS, 0x24048)
|
||||
EVENT(PM_EE_OFF_EXT_INT, 0x02080)
|
||||
EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052)
|
||||
EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c)
|
||||
EVENT(PM_PMC6_OVERFLOW, 0x30024)
|
||||
EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c)
|
||||
EVENT(PM_TLB_MISS, 0x20066)
|
||||
EVENT(PM_FXU_BUSY, 0x2000e)
|
||||
EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280)
|
||||
EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4)
|
||||
EVENT(PM_IC_RELOAD_SHR, 0x04096)
|
||||
EVENT(PM_GRP_MRK, 0x10031)
|
||||
EVENT(PM_MRK_ST_NEST, 0x20034)
|
||||
EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a)
|
||||
EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8)
|
||||
EVENT(PM_LARX_LSU0, 0x0c094)
|
||||
EVENT(PM_IBUF_FULL_CYC, 0x04084)
|
||||
EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a)
|
||||
EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8)
|
||||
EVENT(PM_GRP_MRK_CYC, 0x10030)
|
||||
EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028)
|
||||
EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482)
|
||||
EVENT(PM_LSU_REJECT_LHS, 0x0c8ac)
|
||||
EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a)
|
||||
EVENT(PM_INST_PTEG_FROM_L3, 0x2e050)
|
||||
EVENT(PM_FREQ_DOWN, 0x3000c)
|
||||
EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081)
|
||||
EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c)
|
||||
EVENT(PM_MRK_INST_ISSUED, 0x10032)
|
||||
EVENT(PM_PTEG_FROM_L3MISS, 0x2c058)
|
||||
EVENT(PM_RUN_PURR, 0x400f4)
|
||||
EVENT(PM_MRK_GRP_IC_MISS, 0x40038)
|
||||
EVENT(PM_MRK_DATA_FROM_L3, 0x1d048)
|
||||
EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016)
|
||||
EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054)
|
||||
EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8)
|
||||
EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c)
|
||||
EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054)
|
||||
EVENT(PM_L2_ST_MISS, 0x26082)
|
||||
EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056)
|
||||
EVENT(PM_LWSYNC, 0x0d094)
|
||||
EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc)
|
||||
EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088)
|
||||
EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0)
|
||||
EVENT(PM_NEST_PAIR3_AND, 0x40883)
|
||||
EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081)
|
||||
EVENT(PM_MRK_INST_FIN, 0x30030)
|
||||
EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054)
|
||||
EVENT(PM_INST_FROM_L31_MOD, 0x14044)
|
||||
EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e)
|
||||
EVENT(PM_LSU_FIN, 0x30066)
|
||||
EVENT(PM_MRK_LSU_REJECT, 0x40064)
|
||||
EVENT(PM_L2_CO_FAIL_BUSY, 0x16382)
|
||||
EVENT(PM_MEM0_WQ_DISP, 0x40083)
|
||||
EVENT(PM_DATA_FROM_L31_MOD, 0x1c044)
|
||||
EVENT(PM_THERMAL_WARN, 0x10016)
|
||||
EVENT(PM_VSU0_4FLOP, 0x0a09c)
|
||||
EVENT(PM_BR_MPRED_CCACHE, 0x040a4)
|
||||
EVENT(PM_CMPLU_STALL_IFU, 0x4004c)
|
||||
EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
|
||||
EVENT(PM_FLUSH_BR_MPRED, 0x02084)
|
||||
EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e)
|
||||
EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052)
|
||||
EVENT(PM_L2_RCST_DISP, 0x36280)
|
||||
EVENT(PM_CMPLU_STALL, 0x4000a)
|
||||
EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa)
|
||||
EVENT(PM_DISP_CLB_HELD_SB, 0x020a8)
|
||||
EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090)
|
||||
EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e)
|
||||
EVENT(PM_IC_DEMAND_CYC, 0x10018)
|
||||
EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e)
|
||||
EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086)
|
||||
EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058)
|
||||
EVENT(PM_VSU_DENORM, 0x0a8ac)
|
||||
EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080)
|
||||
EVENT(PM_INST_FROM_L21_SHR, 0x3404e)
|
||||
EVENT(PM_IC_PREF_WRITE, 0x0408e)
|
||||
EVENT(PM_BR_PRED, 0x0409c)
|
||||
EVENT(PM_INST_FROM_DMEM, 0x1404a)
|
||||
EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890)
|
||||
EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4)
|
||||
EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a)
|
||||
EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c)
|
||||
EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280)
|
||||
EVENT(PM_VSU1_DD_ISSUED, 0x0b098)
|
||||
EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056)
|
||||
EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e)
|
||||
EVENT(PM_LSU0_NCLD, 0x0c08c)
|
||||
EVENT(PM_VSU1_4FLOP, 0x0a09e)
|
||||
EVENT(PM_VSU1_8FLOP, 0x0a0a2)
|
||||
EVENT(PM_VSU_8FLOP, 0x0a8a0)
|
||||
EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e)
|
||||
EVENT(PM_DTLB_MISS_64K, 0x3c05e)
|
||||
EVENT(PM_THRD_CONC_RUN_INST, 0x300f4)
|
||||
EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050)
|
||||
EVENT(PM_PB_SYS_PUMP, 0x20081)
|
||||
EVENT(PM_VSU_FIN, 0x0a8bc)
|
||||
EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044)
|
||||
EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0)
|
||||
EVENT(PM_DERAT_MISS_64K, 0x2c05c)
|
||||
EVENT(PM_PMC2_REWIND, 0x30020)
|
||||
EVENT(PM_INST_FROM_L2, 0x14040)
|
||||
EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a)
|
||||
EVENT(PM_INST_DISP, 0x200f2)
|
||||
EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083)
|
||||
EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4)
|
||||
EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6)
|
||||
EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888)
|
||||
EVENT(PM_L3_PREF_HIT, 0x3f080)
|
||||
EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054)
|
||||
EVENT(PM_CMPLU_STALL_STORE, 0x2004a)
|
||||
EVENT(PM_MRK_FXU_FIN, 0x20038)
|
||||
EVENT(PM_PMC4_OVERFLOW, 0x10010)
|
||||
EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050)
|
||||
EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098)
|
||||
EVENT(PM_BTAC_HIT, 0x0508a)
|
||||
EVENT(PM_L3_RD_BUSY, 0x4f082)
|
||||
EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c)
|
||||
EVENT(PM_INST_FROM_L2MISS, 0x44048)
|
||||
EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8)
|
||||
EVENT(PM_L2_ST, 0x16082)
|
||||
EVENT(PM_VSU0_DENORM, 0x0a0ac)
|
||||
EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044)
|
||||
EVENT(PM_BR_PRED_CR_TA, 0x048aa)
|
||||
EVENT(PM_VSU0_FCONV, 0x0a0b0)
|
||||
EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084)
|
||||
EVENT(PM_BTAC_MISS, 0x05088)
|
||||
EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f)
|
||||
EVENT(PM_MRK_DATA_FROM_L2, 0x1d040)
|
||||
EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2)
|
||||
EVENT(PM_VSU_FMA, 0x0a884)
|
||||
EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc)
|
||||
EVENT(PM_LSU1_L1_PREF, 0x0d0ba)
|
||||
EVENT(PM_IOPS_CMPL, 0x10014)
|
||||
EVENT(PM_L2_SYS_PUMP, 0x36482)
|
||||
EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282)
|
||||
EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1)
|
||||
EVENT(PM_FLUSH_DISP_SYNC, 0x02088)
|
||||
EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a)
|
||||
EVENT(PM_L2_IC_INV, 0x26180)
|
||||
EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024)
|
||||
EVENT(PM_L3_PREF_LDST, 0x0d8ac)
|
||||
EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008)
|
||||
EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0)
|
||||
EVENT(PM_FLUSH_PARTIAL, 0x02086)
|
||||
EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092)
|
||||
EVENT(PM_1PLUS_PPC_DISP, 0x400f2)
|
||||
EVENT(PM_DATA_FROM_L2MISS, 0x200fe)
|
||||
EVENT(PM_SUSPENDED, 0x00000)
|
||||
EVENT(PM_VSU0_FMA, 0x0a084)
|
||||
EVENT(PM_CMPLU_STALL_SCALAR, 0x40012)
|
||||
EVENT(PM_STCX_FAIL, 0x0c09a)
|
||||
EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094)
|
||||
EVENT(PM_DC_PREF_DST, 0x0d0b0)
|
||||
EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086)
|
||||
EVENT(PM_L3_HIT, 0x1f080)
|
||||
EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482)
|
||||
EVENT(PM_MRK_DFU_FIN, 0x20032)
|
||||
EVENT(PM_INST_FROM_L1, 0x04080)
|
||||
EVENT(PM_BRU_FIN, 0x10068)
|
||||
EVENT(PM_IC_DEMAND_REQ, 0x04088)
|
||||
EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096)
|
||||
EVENT(PM_VSU1_FMA, 0x0a086)
|
||||
EVENT(PM_MRK_LD_MISS_L1, 0x20036)
|
||||
EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c)
|
||||
EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc)
|
||||
EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056)
|
||||
EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064)
|
||||
EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048)
|
||||
EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c)
|
||||
EVENT(PM_INST_FROM_PREF, 0x14046)
|
||||
EVENT(PM_VSU1_SQ, 0x0b09e)
|
||||
EVENT(PM_L2_LD_DISP, 0x36180)
|
||||
EVENT(PM_L2_DISP_ALL, 0x46080)
|
||||
EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012)
|
||||
EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894)
|
||||
EVENT(PM_BR_MPRED, 0x400f6)
|
||||
EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054)
|
||||
EVENT(PM_VSU_1FLOP, 0x0a880)
|
||||
EVENT(PM_HV_CYC, 0x2000a)
|
||||
EVENT(PM_MRK_LSU_FIN, 0x40032)
|
||||
EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c)
|
||||
EVENT(PM_DTLB_MISS_16M, 0x4c05e)
|
||||
EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a)
|
||||
EVENT(PM_IFU_FIN, 0x40066)
|
||||
EVENT(PM_1THRD_CON_RUN_INSTR, 0x30062)
|
||||
EVENT(PM_CMPLU_STALL_COUNT, 0x4000B)
|
||||
EVENT(PM_MEM0_PB_RD_CL, 0x30083)
|
||||
EVENT(PM_THRD_1_RUN_CYC, 0x10060)
|
||||
EVENT(PM_THRD_2_CONC_RUN_INSTR, 0x40062)
|
||||
EVENT(PM_THRD_2_RUN_CYC, 0x20060)
|
||||
EVENT(PM_THRD_3_CONC_RUN_INST, 0x10062)
|
||||
EVENT(PM_THRD_3_RUN_CYC, 0x30060)
|
||||
EVENT(PM_THRD_4_CONC_RUN_INST, 0x20062)
|
||||
EVENT(PM_THRD_4_RUN_CYC, 0x40060)
|
459
arch/powerpc/perf/power7-pmu.c
Normal file
459
arch/powerpc/perf/power7-pmu.c
Normal file
|
@ -0,0 +1,459 @@
|
|||
/*
|
||||
* Performance counter support for POWER7 processors.
|
||||
*
|
||||
* Copyright 2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for POWER7
|
||||
*/
|
||||
#define PM_PMC_SH 16 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0xf
|
||||
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
|
||||
#define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_COMBINE_SH 11 /* Combined event bit */
|
||||
#define PM_COMBINE_MSK 1
|
||||
#define PM_COMBINE_MSKS 0x800
|
||||
#define PM_L2SEL_SH 8 /* L2 event select */
|
||||
#define PM_L2SEL_MSK 7
|
||||
#define PM_PMCSEL_MSK 0xff
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for POWER7
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 60
|
||||
#define MMCR1_TTM1SEL_SH 56
|
||||
#define MMCR1_TTM2SEL_SH 52
|
||||
#define MMCR1_TTM3SEL_SH 48
|
||||
#define MMCR1_TTMSEL_MSK 0xf
|
||||
#define MMCR1_L2SEL_SH 45
|
||||
#define MMCR1_L2SEL_MSK 7
|
||||
#define MMCR1_PMC1_COMBINE_SH 35
|
||||
#define MMCR1_PMC2_COMBINE_SH 34
|
||||
#define MMCR1_PMC3_COMBINE_SH 33
|
||||
#define MMCR1_PMC4_COMBINE_SH 32
|
||||
#define MMCR1_PMC1SEL_SH 24
|
||||
#define MMCR1_PMC2SEL_SH 16
|
||||
#define MMCR1_PMC3SEL_SH 8
|
||||
#define MMCR1_PMC4SEL_SH 0
|
||||
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
|
||||
#define MMCR1_PMCSEL_MSK 0xff
|
||||
|
||||
/*
|
||||
* Power7 event codes.
|
||||
*/
|
||||
#define EVENT(_name, _code) \
|
||||
PME_##_name = _code,
|
||||
|
||||
enum {
|
||||
#include "power7-events-list.h"
|
||||
};
|
||||
#undef EVENT
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 6666555555555544444444443333333333222222222211111111110000000000
|
||||
* 3210987654321098765432109876543210987654321098765432109876543210
|
||||
* < >< ><><><><><><>
|
||||
* L2 NC P6P5P4P3P2P1
|
||||
*
|
||||
* L2 - 16-18 - Required L2SEL value (select field)
|
||||
*
|
||||
* NC - number of counters
|
||||
* 15: NC error 0x8000
|
||||
* 12-14: number of events needing PMC1-4 0x7000
|
||||
*
|
||||
* P6
|
||||
* 11: P6 error 0x800
|
||||
* 10-11: Count of events needing PMC6
|
||||
*
|
||||
* P1..P5
|
||||
* 0-9: Count of events needing PMC1..PMC5
|
||||
*/
|
||||
|
||||
static int power7_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, sh, unit;
|
||||
unsigned long mask = 0, value = 0;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4))
|
||||
return -1;
|
||||
}
|
||||
if (pmc < 5) {
|
||||
/* need a counter from PMC1-4 set */
|
||||
mask |= 0x8000;
|
||||
value |= 0x1000;
|
||||
}
|
||||
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit == 6) {
|
||||
/* L2SEL must be identical across events */
|
||||
int l2sel = (event >> PM_L2SEL_SH) & PM_L2SEL_MSK;
|
||||
mask |= 0x7 << 16;
|
||||
value |= l2sel << 16;
|
||||
}
|
||||
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_ALT 2 /* at most 2 alternatives for any event */
|
||||
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x200f2, 0x300f2 }, /* PM_INST_DISP */
|
||||
{ 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
|
||||
{ 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scan the alternatives table for a match and return the
|
||||
* index into the alternatives table if found, else -1.
|
||||
*/
|
||||
static int find_alternative(u64 event)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
break;
|
||||
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
|
||||
if (event == event_alternatives[i][j])
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static s64 find_alternative_decode(u64 event)
|
||||
{
|
||||
int pmc, psel;
|
||||
|
||||
/* this only handles the 4x decode events */
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = event & PM_PMCSEL_MSK;
|
||||
if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40)
|
||||
return event - (1 << PM_PMC_SH) + 8;
|
||||
if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48)
|
||||
return event + (1 << PM_PMC_SH) - 8;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, nalt = 1;
|
||||
s64 ae;
|
||||
|
||||
alt[0] = event;
|
||||
nalt = 1;
|
||||
i = find_alternative(event);
|
||||
if (i >= 0) {
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
ae = event_alternatives[i][j];
|
||||
if (ae && ae != event)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
} else {
|
||||
ae = find_alternative_decode(event);
|
||||
if (ae > 0)
|
||||
alt[nalt++] = ae;
|
||||
}
|
||||
|
||||
if (flags & PPMU_ONLY_COUNT_RUN) {
|
||||
/*
|
||||
* We're only counting in RUN state,
|
||||
* so PM_CYC is equivalent to PM_RUN_CYC
|
||||
* and PM_INST_CMPL === PM_RUN_INST_CMPL.
|
||||
* This doesn't include alternatives that don't provide
|
||||
* any extra flexibility in assigning PMCs.
|
||||
*/
|
||||
j = nalt;
|
||||
for (i = 0; i < nalt; ++i) {
|
||||
switch (alt[i]) {
|
||||
case 0x1e: /* PM_CYC */
|
||||
alt[j++] = 0x600f4; /* PM_RUN_CYC */
|
||||
break;
|
||||
case 0x600f4: /* PM_RUN_CYC */
|
||||
alt[j++] = 0x1e;
|
||||
break;
|
||||
case 0x2: /* PM_PPC_CMPL */
|
||||
alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
|
||||
break;
|
||||
case 0x500fa: /* PM_RUN_INST_CMPL */
|
||||
alt[j++] = 0x2; /* PM_PPC_CMPL */
|
||||
break;
|
||||
}
|
||||
}
|
||||
nalt = j;
|
||||
}
|
||||
|
||||
return nalt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int power7_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel;
|
||||
int unit;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */
|
||||
if (pmc >= 5)
|
||||
return 0;
|
||||
|
||||
switch (psel >> 4) {
|
||||
case 2:
|
||||
return pmc == 2 || pmc == 4;
|
||||
case 3:
|
||||
if (psel == 0x3c)
|
||||
return pmc == 1;
|
||||
if (psel == 0x3e)
|
||||
return pmc != 2;
|
||||
return 1;
|
||||
case 4:
|
||||
case 5:
|
||||
return unit == 0xd;
|
||||
case 6:
|
||||
if (psel == 0x64)
|
||||
return pmc >= 3;
|
||||
case 8:
|
||||
return unit == 0xd;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int power7_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr1 = 0;
|
||||
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
|
||||
unsigned int pmc, unit, combine, l2sel, psel;
|
||||
unsigned int pmc_inuse = 0;
|
||||
int i;
|
||||
|
||||
/* First pass to count resource use */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Second pass: assign PMCs, set all MMCR1 fields */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK;
|
||||
l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK;
|
||||
psel = event[i] & PM_PMCSEL_MSK;
|
||||
if (!pmc) {
|
||||
/* Bus event or any-PMC direct event */
|
||||
for (pmc = 0; pmc < 4; ++pmc) {
|
||||
if (!(pmc_inuse & (1 << pmc)))
|
||||
break;
|
||||
}
|
||||
if (pmc >= 4)
|
||||
return -1;
|
||||
pmc_inuse |= 1 << pmc;
|
||||
} else {
|
||||
/* Direct or decoded event */
|
||||
--pmc;
|
||||
}
|
||||
if (pmc <= 3) {
|
||||
mmcr1 |= (unsigned long) unit
|
||||
<< (MMCR1_TTM0SEL_SH - 4 * pmc);
|
||||
mmcr1 |= (unsigned long) combine
|
||||
<< (MMCR1_PMC1_COMBINE_SH - pmc);
|
||||
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
|
||||
if (unit == 6) /* L2 events */
|
||||
mmcr1 |= (unsigned long) l2sel
|
||||
<< MMCR1_L2SEL_SH;
|
||||
}
|
||||
if (power7_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
hwc[i] = pmc;
|
||||
}
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = 0;
|
||||
if (pmc_inuse & 1)
|
||||
mmcr[0] = MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0x3e)
|
||||
mmcr[0] |= MMCR0_PMCjCE;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
if (pmc <= 3)
|
||||
mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
|
||||
}
|
||||
|
||||
static int power7_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BR_MPRED,
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0xc880, 0x400f0 },
|
||||
[C(OP_WRITE)] = { 0, 0x300f0 },
|
||||
[C(OP_PREFETCH)] = { 0xd8b8, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x200fc },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0x408a, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x16080, 0x26080 },
|
||||
[C(OP_WRITE)] = { 0x16082, 0x26082 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x300fc },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x400fc },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x10068, 0x400f6 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
|
||||
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
|
||||
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
|
||||
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
|
||||
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
|
||||
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
|
||||
GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
|
||||
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED);
|
||||
|
||||
#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name);
|
||||
#include "power7-events-list.h"
|
||||
#undef EVENT
|
||||
|
||||
#define EVENT(_name, _code) POWER_EVENT_PTR(_name),
|
||||
|
||||
static struct attribute *power7_events_attr[] = {
|
||||
GENERIC_EVENT_PTR(PM_CYC),
|
||||
GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
|
||||
GENERIC_EVENT_PTR(PM_CMPLU_STALL),
|
||||
GENERIC_EVENT_PTR(PM_INST_CMPL),
|
||||
GENERIC_EVENT_PTR(PM_LD_REF_L1),
|
||||
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
|
||||
GENERIC_EVENT_PTR(PM_BRU_FIN),
|
||||
GENERIC_EVENT_PTR(PM_BR_MPRED),
|
||||
|
||||
#include "power7-events-list.h"
|
||||
#undef EVENT
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group power7_pmu_events_group = {
|
||||
.name = "events",
|
||||
.attrs = power7_events_attr,
|
||||
};
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-19");
|
||||
|
||||
static struct attribute *power7_pmu_format_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct attribute_group power7_pmu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = power7_pmu_format_attr,
|
||||
};
|
||||
|
||||
static const struct attribute_group *power7_pmu_attr_groups[] = {
|
||||
&power7_pmu_format_group,
|
||||
&power7_pmu_events_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct power_pmu power7_pmu = {
|
||||
.name = "POWER7",
|
||||
.n_counter = 6,
|
||||
.max_alternatives = MAX_ALT + 1,
|
||||
.add_fields = 0x1555ul,
|
||||
.test_adder = 0x3000ul,
|
||||
.compute_mmcr = power7_compute_mmcr,
|
||||
.get_constraint = power7_get_constraint,
|
||||
.get_alternatives = power7_get_alternatives,
|
||||
.disable_pmc = power7_disable_pmc,
|
||||
.flags = PPMU_ALT_SIPR,
|
||||
.attr_groups = power7_pmu_attr_groups,
|
||||
.n_generic = ARRAY_SIZE(power7_generic_events),
|
||||
.generic_events = power7_generic_events,
|
||||
.cache_events = &power7_cache_events,
|
||||
};
|
||||
|
||||
static int __init init_power7_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
|
||||
return -ENODEV;
|
||||
|
||||
if (pvr_version_is(PVR_POWER7p))
|
||||
power7_pmu.flags |= PPMU_SIAR_VALID;
|
||||
|
||||
return register_power_pmu(&power7_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_power7_pmu);
|
844
arch/powerpc/perf/power8-pmu.c
Normal file
844
arch/powerpc/perf/power8-pmu.c
Normal file
|
@ -0,0 +1,844 @@
|
|||
/*
|
||||
* Performance counter support for POWER8 processors.
|
||||
*
|
||||
* Copyright 2009 Paul Mackerras, IBM Corporation.
|
||||
* Copyright 2013 Michael Ellerman, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "power8-pmu: " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
|
||||
/*
|
||||
* Some power8 event codes.
|
||||
*/
|
||||
#define PM_CYC 0x0001e
|
||||
#define PM_GCT_NOSLOT_CYC 0x100f8
|
||||
#define PM_CMPLU_STALL 0x4000a
|
||||
#define PM_INST_CMPL 0x00002
|
||||
#define PM_BRU_FIN 0x10068
|
||||
#define PM_BR_MPRED_CMPL 0x400f6
|
||||
|
||||
/* All L1 D cache load references counted at finish, gated by reject */
|
||||
#define PM_LD_REF_L1 0x100ee
|
||||
/* Load Missed L1 */
|
||||
#define PM_LD_MISS_L1 0x3e054
|
||||
/* Store Missed L1 */
|
||||
#define PM_ST_MISS_L1 0x300f0
|
||||
/* L1 cache data prefetches */
|
||||
#define PM_L1_PREF 0x0d8b8
|
||||
/* Instruction fetches from L1 */
|
||||
#define PM_INST_FROM_L1 0x04080
|
||||
/* Demand iCache Miss */
|
||||
#define PM_L1_ICACHE_MISS 0x200fd
|
||||
/* Instruction Demand sectors wriittent into IL1 */
|
||||
#define PM_L1_DEMAND_WRITE 0x0408c
|
||||
/* Instruction prefetch written into IL1 */
|
||||
#define PM_IC_PREF_WRITE 0x0408e
|
||||
/* The data cache was reloaded from local core's L3 due to a demand load */
|
||||
#define PM_DATA_FROM_L3 0x4c042
|
||||
/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
|
||||
#define PM_DATA_FROM_L3MISS 0x300fe
|
||||
/* All successful D-side store dispatches for this thread */
|
||||
#define PM_L2_ST 0x17080
|
||||
/* All successful D-side store dispatches for this thread that were L2 Miss */
|
||||
#define PM_L2_ST_MISS 0x17082
|
||||
/* Total HW L3 prefetches(Load+store) */
|
||||
#define PM_L3_PREF_ALL 0x4e052
|
||||
/* Data PTEG reload */
|
||||
#define PM_DTLB_MISS 0x300fc
|
||||
/* ITLB Reloaded */
|
||||
#define PM_ITLB_MISS 0x400fc
|
||||
|
||||
|
||||
/*
|
||||
* Raw event encoding for POWER8:
|
||||
*
|
||||
* 60 56 52 48 44 40 36 32
|
||||
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
|
||||
* | | [ ] [ thresh_cmp ] [ thresh_ctl ]
|
||||
* | | | |
|
||||
* | | *- IFM (Linux) thresh start/stop OR FAB match -*
|
||||
* | *- BHRB (Linux)
|
||||
* *- EBB (Linux)
|
||||
*
|
||||
* 28 24 20 16 12 8 4 0
|
||||
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
|
||||
* [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
|
||||
* | | | | |
|
||||
* | | | | *- mark
|
||||
* | | *- L1/L2/L3 cache_sel |
|
||||
* | | |
|
||||
* | *- sampling mode for marked events *- combine
|
||||
* |
|
||||
* *- thresh_sel
|
||||
*
|
||||
* Below uses IBM bit numbering.
|
||||
*
|
||||
* MMCR1[x:y] = unit (PMCxUNIT)
|
||||
* MMCR1[x] = combine (PMCxCOMB)
|
||||
*
|
||||
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
|
||||
* # PM_MRK_FAB_RSP_MATCH
|
||||
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
|
||||
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
|
||||
* # PM_MRK_FAB_RSP_MATCH_CYC
|
||||
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
|
||||
* else
|
||||
* MMCRA[48:55] = thresh_ctl (THRESH START/END)
|
||||
*
|
||||
* if thresh_sel:
|
||||
* MMCRA[45:47] = thresh_sel
|
||||
*
|
||||
* if thresh_cmp:
|
||||
* MMCRA[22:24] = thresh_cmp[0:2]
|
||||
* MMCRA[25:31] = thresh_cmp[3:9]
|
||||
*
|
||||
* if unit == 6 or unit == 7
|
||||
* MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
|
||||
* else if unit == 8 or unit == 9:
|
||||
* if cache_sel[0] == 0: # L3 bank
|
||||
* MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
|
||||
* else if cache_sel[0] == 1:
|
||||
* MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
|
||||
* else if cache_sel[1]: # L1 event
|
||||
* MMCR1[16] = cache_sel[2]
|
||||
* MMCR1[17] = cache_sel[3]
|
||||
*
|
||||
* if mark:
|
||||
* MMCRA[63] = 1 (SAMPLE_ENABLE)
|
||||
* MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
|
||||
* MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
|
||||
*
|
||||
* if EBB and BHRB:
|
||||
* MMCRA[32:33] = IFM
|
||||
*
|
||||
*/
|
||||
|
||||
#define EVENT_EBB_MASK 1ull
|
||||
#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
|
||||
#define EVENT_BHRB_MASK 1ull
|
||||
#define EVENT_BHRB_SHIFT 62
|
||||
#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
|
||||
#define EVENT_IFM_MASK 3ull
|
||||
#define EVENT_IFM_SHIFT 60
|
||||
#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
|
||||
#define EVENT_THR_CMP_MASK 0x3ff
|
||||
#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
|
||||
#define EVENT_THR_CTL_MASK 0xffull
|
||||
#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
|
||||
#define EVENT_THR_SEL_MASK 0x7
|
||||
#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
|
||||
#define EVENT_THRESH_MASK 0x1fffffull
|
||||
#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
|
||||
#define EVENT_SAMPLE_MASK 0x1f
|
||||
#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
|
||||
#define EVENT_CACHE_SEL_MASK 0xf
|
||||
#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
|
||||
#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
|
||||
#define EVENT_PMC_MASK 0xf
|
||||
#define EVENT_UNIT_SHIFT 12 /* Unit */
|
||||
#define EVENT_UNIT_MASK 0xf
|
||||
#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
|
||||
#define EVENT_COMBINE_MASK 0x1
|
||||
#define EVENT_MARKED_SHIFT 8 /* Marked bit */
|
||||
#define EVENT_MARKED_MASK 0x1
|
||||
#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
|
||||
#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
|
||||
|
||||
/* Bits defined by Linux */
|
||||
#define EVENT_LINUX_MASK \
|
||||
((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
|
||||
(EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
|
||||
(EVENT_IFM_MASK << EVENT_IFM_SHIFT))
|
||||
|
||||
#define EVENT_VALID_MASK \
|
||||
((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
|
||||
(EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
|
||||
(EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
|
||||
(EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
|
||||
(EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
|
||||
(EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
|
||||
(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
|
||||
EVENT_LINUX_MASK | \
|
||||
EVENT_PSEL_MASK)
|
||||
|
||||
/* MMCRA IFM bits - POWER8 */
|
||||
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
|
||||
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
|
||||
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
|
||||
|
||||
#define ONLY_PLM \
|
||||
(PERF_SAMPLE_BRANCH_USER |\
|
||||
PERF_SAMPLE_BRANCH_KERNEL |\
|
||||
PERF_SAMPLE_BRANCH_HV)
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
*
|
||||
* 60 56 52 48 44 40 36 32
|
||||
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
|
||||
* [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
|
||||
* |
|
||||
* thresh_sel -*
|
||||
*
|
||||
* 28 24 20 16 12 8 4 0
|
||||
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
|
||||
* [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
|
||||
* | | | |
|
||||
* BHRB IFM -* | | | Count of events for each PMC.
|
||||
* EBB -* | | p1, p2, p3, p4, p5, p6.
|
||||
* L1 I/D qualifier -* |
|
||||
* nc - number of counters -*
|
||||
*
|
||||
* The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
|
||||
* we want the low bit of each field to be added to any existing value.
|
||||
*
|
||||
* Everything else is a value field.
|
||||
*/
|
||||
|
||||
#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
|
||||
#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
|
||||
|
||||
/* We just throw all the threshold bits into the constraint */
|
||||
#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
|
||||
#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
|
||||
|
||||
#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
|
||||
#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
|
||||
|
||||
#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
|
||||
#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
|
||||
|
||||
#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
|
||||
#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
|
||||
|
||||
#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
|
||||
#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
|
||||
|
||||
/*
|
||||
* For NC we are counting up to 4 events. This requires three bits, and we need
|
||||
* the fifth event to overflow and set the 4th bit. To achieve that we bias the
|
||||
* fields by 3 in test_adder.
|
||||
*/
|
||||
#define CNST_NC_SHIFT 12
|
||||
#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
|
||||
#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
|
||||
#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
|
||||
|
||||
/*
|
||||
* For the per-PMC fields we have two bits. The low bit is added, so if two
|
||||
* events ask for the same PMC the sum will overflow, setting the high bit,
|
||||
* indicating an error. So our mask sets the high bit.
|
||||
*/
|
||||
#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
|
||||
#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
|
||||
#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
|
||||
|
||||
/* Our add_fields is defined as: */
|
||||
#define POWER8_ADD_FIELDS \
|
||||
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
|
||||
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
|
||||
|
||||
|
||||
/* Bits in MMCR1 for POWER8 */
|
||||
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
|
||||
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
|
||||
#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
|
||||
#define MMCR1_FAB_SHIFT 36
|
||||
#define MMCR1_DC_QUAL_SHIFT 47
|
||||
#define MMCR1_IC_QUAL_SHIFT 46
|
||||
|
||||
/* Bits in MMCRA for POWER8 */
|
||||
#define MMCRA_SAMP_MODE_SHIFT 1
|
||||
#define MMCRA_SAMP_ELIG_SHIFT 4
|
||||
#define MMCRA_THR_CTL_SHIFT 8
|
||||
#define MMCRA_THR_SEL_SHIFT 16
|
||||
#define MMCRA_THR_CMP_SHIFT 32
|
||||
#define MMCRA_SDAR_MODE_TLB (1ull << 42)
|
||||
#define MMCRA_IFM_SHIFT 30
|
||||
|
||||
/* Bits in MMCR2 for POWER8 */
|
||||
#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
|
||||
#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
|
||||
#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
|
||||
|
||||
|
||||
static inline bool event_is_fab_match(u64 event)
|
||||
{
|
||||
/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
|
||||
event &= 0xff0fe;
|
||||
|
||||
/* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
|
||||
return (event == 0x30056 || event == 0x4f052);
|
||||
}
|
||||
|
||||
static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
||||
{
|
||||
unsigned int unit, pmc, cache, ebb;
|
||||
unsigned long mask, value;
|
||||
|
||||
mask = value = 0;
|
||||
|
||||
if (event & ~EVENT_VALID_MASK)
|
||||
return -1;
|
||||
|
||||
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
|
||||
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
|
||||
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
|
||||
ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
|
||||
|
||||
if (pmc) {
|
||||
u64 base_event;
|
||||
|
||||
if (pmc > 6)
|
||||
return -1;
|
||||
|
||||
/* Ignore Linux defined bits when checking event below */
|
||||
base_event = event & ~EVENT_LINUX_MASK;
|
||||
|
||||
if (pmc >= 5 && base_event != 0x500fa && base_event != 0x600f4)
|
||||
return -1;
|
||||
|
||||
mask |= CNST_PMC_MASK(pmc);
|
||||
value |= CNST_PMC_VAL(pmc);
|
||||
}
|
||||
|
||||
if (pmc <= 4) {
|
||||
/*
|
||||
* Add to number of counters in use. Note this includes events with
|
||||
* a PMC of 0 - they still need a PMC, it's just assigned later.
|
||||
* Don't count events on PMC 5 & 6, there is only one valid event
|
||||
* on each of those counters, and they are handled above.
|
||||
*/
|
||||
mask |= CNST_NC_MASK;
|
||||
value |= CNST_NC_VAL;
|
||||
}
|
||||
|
||||
if (unit >= 6 && unit <= 9) {
|
||||
/*
|
||||
* L2/L3 events contain a cache selector field, which is
|
||||
* supposed to be programmed into MMCRC. However MMCRC is only
|
||||
* HV writable, and there is no API for guest kernels to modify
|
||||
* it. The solution is for the hypervisor to initialise the
|
||||
* field to zeroes, and for us to only ever allow events that
|
||||
* have a cache selector of zero. The bank selector (bit 3) is
|
||||
* irrelevant, as long as the rest of the value is 0.
|
||||
*/
|
||||
if (cache & 0x7)
|
||||
return -1;
|
||||
|
||||
} else if (event & EVENT_IS_L1) {
|
||||
mask |= CNST_L1_QUAL_MASK;
|
||||
value |= CNST_L1_QUAL_VAL(cache);
|
||||
}
|
||||
|
||||
if (event & EVENT_IS_MARKED) {
|
||||
mask |= CNST_SAMPLE_MASK;
|
||||
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
|
||||
* the threshold control bits are used for the match value.
|
||||
*/
|
||||
if (event_is_fab_match(event)) {
|
||||
mask |= CNST_FAB_MATCH_MASK;
|
||||
value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
|
||||
} else {
|
||||
/*
|
||||
* Check the mantissa upper two bits are not zero, unless the
|
||||
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
|
||||
*/
|
||||
unsigned int cmp, exp;
|
||||
|
||||
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
|
||||
exp = cmp >> 7;
|
||||
|
||||
if (exp && (cmp & 0x60) == 0)
|
||||
return -1;
|
||||
|
||||
mask |= CNST_THRESH_MASK;
|
||||
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
|
||||
}
|
||||
|
||||
if (!pmc && ebb)
|
||||
/* EBB events must specify the PMC */
|
||||
return -1;
|
||||
|
||||
if (event & EVENT_WANTS_BHRB) {
|
||||
if (!ebb)
|
||||
/* Only EBB events can request BHRB */
|
||||
return -1;
|
||||
|
||||
mask |= CNST_IFM_MASK;
|
||||
value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* All events must agree on EBB, either all request it or none.
|
||||
* EBB events are pinned & exclusive, so this should never actually
|
||||
* hit, but we leave it as a fallback in case.
|
||||
*/
|
||||
mask |= CNST_EBB_VAL(ebb);
|
||||
value |= CNST_EBB_MASK;
|
||||
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int power8_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[],
|
||||
struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
|
||||
unsigned int pmc, pmc_inuse;
|
||||
int i;
|
||||
|
||||
pmc_inuse = 0;
|
||||
|
||||
/* First pass to count resource use */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
|
||||
if (pmc)
|
||||
pmc_inuse |= 1 << pmc;
|
||||
}
|
||||
|
||||
/* In continous sampling mode, update SDAR on TLB miss */
|
||||
mmcra = MMCRA_SDAR_MODE_TLB;
|
||||
mmcr1 = mmcr2 = 0;
|
||||
|
||||
/* Second pass: assign PMCs, set all MMCR1 fields */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
|
||||
unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
|
||||
combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
|
||||
psel = event[i] & EVENT_PSEL_MASK;
|
||||
|
||||
if (!pmc) {
|
||||
for (pmc = 1; pmc <= 4; ++pmc) {
|
||||
if (!(pmc_inuse & (1 << pmc)))
|
||||
break;
|
||||
}
|
||||
|
||||
pmc_inuse |= 1 << pmc;
|
||||
}
|
||||
|
||||
if (pmc <= 4) {
|
||||
mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
|
||||
mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
|
||||
mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_L1) {
|
||||
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
||||
mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
|
||||
cache >>= 1;
|
||||
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_MARKED) {
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
|
||||
if (val) {
|
||||
mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
|
||||
mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
|
||||
* the threshold bits are used for the match value.
|
||||
*/
|
||||
if (event_is_fab_match(event[i])) {
|
||||
mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
|
||||
EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
|
||||
} else {
|
||||
val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
|
||||
mmcra |= val << MMCRA_THR_CTL_SHIFT;
|
||||
val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
|
||||
mmcra |= val << MMCRA_THR_SEL_SHIFT;
|
||||
val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
|
||||
mmcra |= val << MMCRA_THR_CMP_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_WANTS_BHRB) {
|
||||
val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
|
||||
mmcra |= val << MMCRA_IFM_SHIFT;
|
||||
}
|
||||
|
||||
if (pevents[i]->attr.exclude_user)
|
||||
mmcr2 |= MMCR2_FCP(pmc);
|
||||
|
||||
if (pevents[i]->attr.exclude_hv)
|
||||
mmcr2 |= MMCR2_FCH(pmc);
|
||||
|
||||
if (pevents[i]->attr.exclude_kernel) {
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
mmcr2 |= MMCR2_FCH(pmc);
|
||||
else
|
||||
mmcr2 |= MMCR2_FCS(pmc);
|
||||
}
|
||||
|
||||
hwc[i] = pmc - 1;
|
||||
}
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = 0;
|
||||
|
||||
/* pmc_inuse is 1-based */
|
||||
if (pmc_inuse & 2)
|
||||
mmcr[0] = MMCR0_PMC1CE;
|
||||
|
||||
if (pmc_inuse & 0x7c)
|
||||
mmcr[0] |= MMCR0_PMCjCE;
|
||||
|
||||
/* If we're not using PMC 5 or 6, freeze them */
|
||||
if (!(pmc_inuse & 0x60))
|
||||
mmcr[0] |= MMCR0_FC56;
|
||||
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
mmcr[3] = mmcr2;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_ALT 2
|
||||
|
||||
/* Table of alternatives, sorted by column 0 */
|
||||
static const unsigned int event_alternatives[][MAX_ALT] = {
|
||||
{ 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
|
||||
{ 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
|
||||
{ 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
|
||||
{ 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
|
||||
{ 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
|
||||
{ 0x20036, 0x40036 }, /* PM_BR_2PATH */
|
||||
{ 0x200f2, 0x300f2 }, /* PM_INST_DISP */
|
||||
{ 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
|
||||
{ 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
|
||||
{ 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
|
||||
{ 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scan the alternatives table for a match and return the
|
||||
* index into the alternatives table if found, else -1.
|
||||
*/
|
||||
static int find_alternative(u64 event)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
|
||||
if (event < event_alternatives[i][0])
|
||||
break;
|
||||
|
||||
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
|
||||
if (event == event_alternatives[i][j])
|
||||
return i;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
int i, j, num_alt = 0;
|
||||
u64 alt_event;
|
||||
|
||||
alt[num_alt++] = event;
|
||||
|
||||
i = find_alternative(event);
|
||||
if (i >= 0) {
|
||||
/* Filter out the original event, it's already in alt[0] */
|
||||
for (j = 0; j < MAX_ALT; ++j) {
|
||||
alt_event = event_alternatives[i][j];
|
||||
if (alt_event && alt_event != event)
|
||||
alt[num_alt++] = alt_event;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & PPMU_ONLY_COUNT_RUN) {
|
||||
/*
|
||||
* We're only counting in RUN state, so PM_CYC is equivalent to
|
||||
* PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
|
||||
*/
|
||||
j = num_alt;
|
||||
for (i = 0; i < num_alt; ++i) {
|
||||
switch (alt[i]) {
|
||||
case 0x1e: /* PM_CYC */
|
||||
alt[j++] = 0x600f4; /* PM_RUN_CYC */
|
||||
break;
|
||||
case 0x600f4: /* PM_RUN_CYC */
|
||||
alt[j++] = 0x1e;
|
||||
break;
|
||||
case 0x2: /* PM_PPC_CMPL */
|
||||
alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
|
||||
break;
|
||||
case 0x500fa: /* PM_RUN_INST_CMPL */
|
||||
alt[j++] = 0x2; /* PM_PPC_CMPL */
|
||||
break;
|
||||
}
|
||||
}
|
||||
num_alt = j;
|
||||
}
|
||||
|
||||
return num_alt;
|
||||
}
|
||||
|
||||
static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
if (pmc <= 3)
|
||||
mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
|
||||
}
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-49");
|
||||
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
|
||||
PMU_FORMAT_ATTR(mark, "config:8");
|
||||
PMU_FORMAT_ATTR(combine, "config:11");
|
||||
PMU_FORMAT_ATTR(unit, "config:12-15");
|
||||
PMU_FORMAT_ATTR(pmc, "config:16-19");
|
||||
PMU_FORMAT_ATTR(cache_sel, "config:20-23");
|
||||
PMU_FORMAT_ATTR(sample_mode, "config:24-28");
|
||||
PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
|
||||
PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
|
||||
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
|
||||
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
|
||||
|
||||
static struct attribute *power8_pmu_format_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_pmcxsel.attr,
|
||||
&format_attr_mark.attr,
|
||||
&format_attr_combine.attr,
|
||||
&format_attr_unit.attr,
|
||||
&format_attr_pmc.attr,
|
||||
&format_attr_cache_sel.attr,
|
||||
&format_attr_sample_mode.attr,
|
||||
&format_attr_thresh_sel.attr,
|
||||
&format_attr_thresh_stop.attr,
|
||||
&format_attr_thresh_start.attr,
|
||||
&format_attr_thresh_cmp.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct attribute_group power8_pmu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = power8_pmu_format_attr,
|
||||
};
|
||||
|
||||
static const struct attribute_group *power8_pmu_attr_groups[] = {
|
||||
&power8_pmu_format_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int power8_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
|
||||
};
|
||||
|
||||
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
|
||||
{
|
||||
u64 pmu_bhrb_filter = 0;
|
||||
|
||||
/* BHRB and regular PMU events share the same privilege state
|
||||
* filter configuration. BHRB is always recorded along with a
|
||||
* regular PMU event. As the privilege state filter is handled
|
||||
* in the basic PMC configuration of the accompanying regular
|
||||
* PMU event, we ignore any separate BHRB specific request.
|
||||
*/
|
||||
|
||||
/* No branch filter requested */
|
||||
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
|
||||
return pmu_bhrb_filter;
|
||||
|
||||
/* Invalid branch filter options - HW does not support */
|
||||
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
|
||||
return -1;
|
||||
|
||||
if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
|
||||
return -1;
|
||||
|
||||
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
|
||||
pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
|
||||
return pmu_bhrb_filter;
|
||||
}
|
||||
|
||||
/* Every thing else is unsupported */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void power8_config_bhrb(u64 pmu_bhrb_filter)
|
||||
{
|
||||
/* Enable BHRB filter in PMU */
|
||||
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
|
||||
}
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
|
||||
[ C(RESULT_MISS) ] = PM_LD_MISS_L1,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_L1_PREF,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(L1I) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
|
||||
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(LL) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
|
||||
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_L2_ST,
|
||||
[ C(RESULT_MISS) ] = PM_L2_ST_MISS,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
|
||||
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(NODE) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#undef C
|
||||
|
||||
static struct power_pmu power8_pmu = {
|
||||
.name = "POWER8",
|
||||
.n_counter = 6,
|
||||
.max_alternatives = MAX_ALT + 1,
|
||||
.add_fields = POWER8_ADD_FIELDS,
|
||||
.test_adder = POWER8_TEST_ADDER,
|
||||
.compute_mmcr = power8_compute_mmcr,
|
||||
.config_bhrb = power8_config_bhrb,
|
||||
.bhrb_filter_map = power8_bhrb_filter_map,
|
||||
.get_constraint = power8_get_constraint,
|
||||
.get_alternatives = power8_get_alternatives,
|
||||
.disable_pmc = power8_disable_pmc,
|
||||
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
|
||||
.n_generic = ARRAY_SIZE(power8_generic_events),
|
||||
.generic_events = power8_generic_events,
|
||||
.cache_events = &power8_cache_events,
|
||||
.attr_groups = power8_pmu_attr_groups,
|
||||
.bhrb_nr = 32,
|
||||
};
|
||||
|
||||
static int __init init_power8_pmu(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
|
||||
return -ENODEV;
|
||||
|
||||
rc = register_power_pmu(&power8_pmu);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Tell userspace that EBB is supported */
|
||||
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_PMAO_BUG))
|
||||
pr_info("PMAO restore workaround active.\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(init_power8_pmu);
|
503
arch/powerpc/perf/ppc970-pmu.c
Normal file
503
arch/powerpc/perf/ppc970-pmu.c
Normal file
|
@ -0,0 +1,503 @@
|
|||
/*
|
||||
* Performance counter support for PPC970-family processors.
|
||||
*
|
||||
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/string.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
/*
|
||||
* Bits in event code for PPC970
|
||||
*/
|
||||
#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
|
||||
#define PM_PMC_MSK 0xf
|
||||
#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
|
||||
#define PM_UNIT_MSK 0xf
|
||||
#define PM_SPCSEL_SH 6
|
||||
#define PM_SPCSEL_MSK 3
|
||||
#define PM_BYTE_SH 4 /* Byte number of event bus to use */
|
||||
#define PM_BYTE_MSK 3
|
||||
#define PM_PMCSEL_MSK 0xf
|
||||
|
||||
/* Values in PM_UNIT field */
|
||||
#define PM_NONE 0
|
||||
#define PM_FPU 1
|
||||
#define PM_VPU 2
|
||||
#define PM_ISU 3
|
||||
#define PM_IFU 4
|
||||
#define PM_IDU 5
|
||||
#define PM_STS 6
|
||||
#define PM_LSU0 7
|
||||
#define PM_LSU1U 8
|
||||
#define PM_LSU1L 9
|
||||
#define PM_LASTUNIT 9
|
||||
|
||||
/*
|
||||
* Bits in MMCR0 for PPC970
|
||||
*/
|
||||
#define MMCR0_PMC1SEL_SH 8
|
||||
#define MMCR0_PMC2SEL_SH 1
|
||||
#define MMCR_PMCSEL_MSK 0x1f
|
||||
|
||||
/*
|
||||
* Bits in MMCR1 for PPC970
|
||||
*/
|
||||
#define MMCR1_TTM0SEL_SH 62
|
||||
#define MMCR1_TTM1SEL_SH 59
|
||||
#define MMCR1_TTM3SEL_SH 53
|
||||
#define MMCR1_TTMSEL_MSK 3
|
||||
#define MMCR1_TD_CP_DBG0SEL_SH 50
|
||||
#define MMCR1_TD_CP_DBG1SEL_SH 48
|
||||
#define MMCR1_TD_CP_DBG2SEL_SH 46
|
||||
#define MMCR1_TD_CP_DBG3SEL_SH 44
|
||||
#define MMCR1_PMC1_ADDER_SEL_SH 39
|
||||
#define MMCR1_PMC2_ADDER_SEL_SH 38
|
||||
#define MMCR1_PMC6_ADDER_SEL_SH 37
|
||||
#define MMCR1_PMC5_ADDER_SEL_SH 36
|
||||
#define MMCR1_PMC8_ADDER_SEL_SH 35
|
||||
#define MMCR1_PMC7_ADDER_SEL_SH 34
|
||||
#define MMCR1_PMC3_ADDER_SEL_SH 33
|
||||
#define MMCR1_PMC4_ADDER_SEL_SH 32
|
||||
#define MMCR1_PMC3SEL_SH 27
|
||||
#define MMCR1_PMC4SEL_SH 22
|
||||
#define MMCR1_PMC5SEL_SH 17
|
||||
#define MMCR1_PMC6SEL_SH 12
|
||||
#define MMCR1_PMC7SEL_SH 7
|
||||
#define MMCR1_PMC8SEL_SH 2
|
||||
|
||||
static short mmcr1_adder_bits[8] = {
|
||||
MMCR1_PMC1_ADDER_SEL_SH,
|
||||
MMCR1_PMC2_ADDER_SEL_SH,
|
||||
MMCR1_PMC3_ADDER_SEL_SH,
|
||||
MMCR1_PMC4_ADDER_SEL_SH,
|
||||
MMCR1_PMC5_ADDER_SEL_SH,
|
||||
MMCR1_PMC6_ADDER_SEL_SH,
|
||||
MMCR1_PMC7_ADDER_SEL_SH,
|
||||
MMCR1_PMC8_ADDER_SEL_SH
|
||||
};
|
||||
|
||||
/*
|
||||
* Layout of constraint bits:
|
||||
* 6666555555555544444444443333333333222222222211111111110000000000
|
||||
* 3210987654321098765432109876543210987654321098765432109876543210
|
||||
* <><><>[ >[ >[ >< >< >< >< ><><><><><><><><>
|
||||
* SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
|
||||
*
|
||||
* SP - SPCSEL constraint
|
||||
* 48-49: SPCSEL value 0x3_0000_0000_0000
|
||||
*
|
||||
* T0 - TTM0 constraint
|
||||
* 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000
|
||||
*
|
||||
* T1 - TTM1 constraint
|
||||
* 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000
|
||||
*
|
||||
* UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS
|
||||
* 43: UC3 error 0x0800_0000_0000
|
||||
* 42: FPU|IFU|VPU events needed 0x0400_0000_0000
|
||||
* 41: ISU events needed 0x0200_0000_0000
|
||||
* 40: IDU|STS events needed 0x0100_0000_0000
|
||||
*
|
||||
* PS1
|
||||
* 39: PS1 error 0x0080_0000_0000
|
||||
* 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
|
||||
*
|
||||
* PS2
|
||||
* 35: PS2 error 0x0008_0000_0000
|
||||
* 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
|
||||
*
|
||||
* B0
|
||||
* 28-31: Byte 0 event source 0xf000_0000
|
||||
* Encoding as for the event code
|
||||
*
|
||||
* B1, B2, B3
|
||||
* 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
|
||||
*
|
||||
* P1
|
||||
* 15: P1 error 0x8000
|
||||
* 14-15: Count of events needing PMC1
|
||||
*
|
||||
* P2..P8
|
||||
* 0-13: Count of events needing PMC2..PMC8
|
||||
*/
|
||||
|
||||
static unsigned char direct_marked_event[8] = {
|
||||
(1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
|
||||
(1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
|
||||
(1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */
|
||||
(1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
|
||||
(1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */
|
||||
(1<<3) | (1<<4) | (1<<5),
|
||||
/* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
|
||||
(1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
|
||||
(1<<4) /* PMC8: PM_MRK_LSU_FIN */
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns 1 if event counts things relating to marked instructions
|
||||
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
|
||||
*/
|
||||
static int p970_marked_instr_event(u64 event)
|
||||
{
|
||||
int pmc, psel, unit, byte, bit;
|
||||
unsigned int mask;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
psel = event & PM_PMCSEL_MSK;
|
||||
if (pmc) {
|
||||
if (direct_marked_event[pmc - 1] & (1 << psel))
|
||||
return 1;
|
||||
if (psel == 0) /* add events */
|
||||
bit = (pmc <= 4)? pmc - 1: 8 - pmc;
|
||||
else if (psel == 7 || psel == 13) /* decode events */
|
||||
bit = 4;
|
||||
else
|
||||
return 0;
|
||||
} else
|
||||
bit = psel;
|
||||
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
mask = 0;
|
||||
switch (unit) {
|
||||
case PM_VPU:
|
||||
mask = 0x4c; /* byte 0 bits 2,3,6 */
|
||||
break;
|
||||
case PM_LSU0:
|
||||
/* byte 2 bits 0,2,3,4,6; all of byte 1 */
|
||||
mask = 0x085dff00;
|
||||
break;
|
||||
case PM_LSU1L:
|
||||
mask = 0x50 << 24; /* byte 3 bits 4,6 */
|
||||
break;
|
||||
}
|
||||
return (mask >> (byte * 8 + bit)) & 1;
|
||||
}
|
||||
|
||||
/* Masks and values for using events from the various units */
|
||||
static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
|
||||
[PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
|
||||
[PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
|
||||
[PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
|
||||
[PM_IFU] = { 0xc80000000000ull, 0x840000000000ull },
|
||||
[PM_IDU] = { 0x380000000000ull, 0x010000000000ull },
|
||||
[PM_STS] = { 0x380000000000ull, 0x310000000000ull },
|
||||
};
|
||||
|
||||
static int p970_get_constraint(u64 event, unsigned long *maskp,
|
||||
unsigned long *valp)
|
||||
{
|
||||
int pmc, byte, unit, sh, spcsel;
|
||||
unsigned long mask = 0, value = 0;
|
||||
int grp = -1;
|
||||
|
||||
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc > 8)
|
||||
return -1;
|
||||
sh = (pmc - 1) * 2;
|
||||
mask |= 2 << sh;
|
||||
value |= 1 << sh;
|
||||
grp = ((pmc - 1) >> 1) & 1;
|
||||
}
|
||||
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
if (unit) {
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
mask |= unit_cons[unit][0];
|
||||
value |= unit_cons[unit][1];
|
||||
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
/*
|
||||
* Bus events on bytes 0 and 2 can be counted
|
||||
* on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
|
||||
*/
|
||||
if (!pmc)
|
||||
grp = byte & 1;
|
||||
/* Set byte lane select field */
|
||||
mask |= 0xfULL << (28 - 4 * byte);
|
||||
value |= (unsigned long)unit << (28 - 4 * byte);
|
||||
}
|
||||
if (grp == 0) {
|
||||
/* increment PMC1/2/5/6 field */
|
||||
mask |= 0x8000000000ull;
|
||||
value |= 0x1000000000ull;
|
||||
} else if (grp == 1) {
|
||||
/* increment PMC3/4/7/8 field */
|
||||
mask |= 0x800000000ull;
|
||||
value |= 0x100000000ull;
|
||||
}
|
||||
spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
|
||||
if (spcsel) {
|
||||
mask |= 3ull << 48;
|
||||
value |= (unsigned long)spcsel << 48;
|
||||
}
|
||||
*maskp = mask;
|
||||
*valp = value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
{
|
||||
alt[0] = event;
|
||||
|
||||
/* 2 alternatives for LSU empty */
|
||||
if (event == 0x2002 || event == 0x3002) {
|
||||
alt[1] = event ^ 0x1000;
|
||||
return 2;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int p970_compute_mmcr(u64 event[], int n_ev,
|
||||
unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
|
||||
{
|
||||
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
|
||||
unsigned int pmc, unit, byte, psel;
|
||||
unsigned int ttm, grp;
|
||||
unsigned int pmc_inuse = 0;
|
||||
unsigned int pmc_grp_use[2];
|
||||
unsigned char busbyte[4];
|
||||
unsigned char unituse[16];
|
||||
unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 };
|
||||
unsigned char ttmuse[2];
|
||||
unsigned char pmcsel[8];
|
||||
int i;
|
||||
int spcsel;
|
||||
|
||||
if (n_ev > 8)
|
||||
return -1;
|
||||
|
||||
/* First pass to count resource use */
|
||||
pmc_grp_use[0] = pmc_grp_use[1] = 0;
|
||||
memset(busbyte, 0, sizeof(busbyte));
|
||||
memset(unituse, 0, sizeof(unituse));
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
if (pmc) {
|
||||
if (pmc_inuse & (1 << (pmc - 1)))
|
||||
return -1;
|
||||
pmc_inuse |= 1 << (pmc - 1);
|
||||
/* count 1/2/5/6 vs 3/4/7/8 use */
|
||||
++pmc_grp_use[((pmc - 1) >> 1) & 1];
|
||||
}
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
if (unit) {
|
||||
if (unit > PM_LASTUNIT)
|
||||
return -1;
|
||||
if (!pmc)
|
||||
++pmc_grp_use[byte & 1];
|
||||
if (busbyte[byte] && busbyte[byte] != unit)
|
||||
return -1;
|
||||
busbyte[byte] = unit;
|
||||
unituse[unit] = 1;
|
||||
}
|
||||
}
|
||||
if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Assign resources and set multiplexer selects.
|
||||
*
|
||||
* PM_ISU can go either on TTM0 or TTM1, but that's the only
|
||||
* choice we have to deal with.
|
||||
*/
|
||||
if (unituse[PM_ISU] &
|
||||
(unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU]))
|
||||
unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */
|
||||
/* Set TTM[01]SEL fields. */
|
||||
ttmuse[0] = ttmuse[1] = 0;
|
||||
for (i = PM_FPU; i <= PM_STS; ++i) {
|
||||
if (!unituse[i])
|
||||
continue;
|
||||
ttm = unitmap[i];
|
||||
++ttmuse[(ttm >> 2) & 1];
|
||||
mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
|
||||
}
|
||||
/* Check only one unit per TTMx */
|
||||
if (ttmuse[0] > 1 || ttmuse[1] > 1)
|
||||
return -1;
|
||||
|
||||
/* Set byte lane select fields and TTM3SEL. */
|
||||
for (byte = 0; byte < 4; ++byte) {
|
||||
unit = busbyte[byte];
|
||||
if (!unit)
|
||||
continue;
|
||||
if (unit <= PM_STS)
|
||||
ttm = (unitmap[unit] >> 2) & 1;
|
||||
else if (unit == PM_LSU0)
|
||||
ttm = 2;
|
||||
else {
|
||||
ttm = 3;
|
||||
if (unit == PM_LSU1L && byte >= 2)
|
||||
mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
|
||||
}
|
||||
mmcr1 |= (unsigned long)ttm
|
||||
<< (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
|
||||
}
|
||||
|
||||
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
|
||||
memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */
|
||||
for (i = 0; i < n_ev; ++i) {
|
||||
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
|
||||
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
|
||||
byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
|
||||
psel = event[i] & PM_PMCSEL_MSK;
|
||||
if (!pmc) {
|
||||
/* Bus event or any-PMC direct event */
|
||||
if (unit)
|
||||
psel |= 0x10 | ((byte & 2) << 2);
|
||||
else
|
||||
psel |= 8;
|
||||
for (pmc = 0; pmc < 8; ++pmc) {
|
||||
if (pmc_inuse & (1 << pmc))
|
||||
continue;
|
||||
grp = (pmc >> 1) & 1;
|
||||
if (unit) {
|
||||
if (grp == (byte & 1))
|
||||
break;
|
||||
} else if (pmc_grp_use[grp] < 4) {
|
||||
++pmc_grp_use[grp];
|
||||
break;
|
||||
}
|
||||
}
|
||||
pmc_inuse |= 1 << pmc;
|
||||
} else {
|
||||
/* Direct event */
|
||||
--pmc;
|
||||
if (psel == 0 && (byte & 2))
|
||||
/* add events on higher-numbered bus */
|
||||
mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
|
||||
}
|
||||
pmcsel[pmc] = psel;
|
||||
hwc[i] = pmc;
|
||||
spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
|
||||
mmcr1 |= spcsel;
|
||||
if (p970_marked_instr_event(event[i]))
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
}
|
||||
for (pmc = 0; pmc < 2; ++pmc)
|
||||
mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
|
||||
for (; pmc < 8; ++pmc)
|
||||
mmcr1 |= (unsigned long)pmcsel[pmc]
|
||||
<< (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
|
||||
if (pmc_inuse & 1)
|
||||
mmcr0 |= MMCR0_PMC1CE;
|
||||
if (pmc_inuse & 0xfe)
|
||||
mmcr0 |= MMCR0_PMCjCE;
|
||||
|
||||
mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
|
||||
|
||||
/* Return MMCRx values */
|
||||
mmcr[0] = mmcr0;
|
||||
mmcr[1] = mmcr1;
|
||||
mmcr[2] = mmcra;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
|
||||
{
|
||||
int shift, i;
|
||||
|
||||
if (pmc <= 1) {
|
||||
shift = MMCR0_PMC1SEL_SH - 7 * pmc;
|
||||
i = 0;
|
||||
} else {
|
||||
shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
|
||||
i = 1;
|
||||
}
|
||||
/*
|
||||
* Setting the PMCxSEL field to 0x08 disables PMC x.
|
||||
*/
|
||||
mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift);
|
||||
}
|
||||
|
||||
static int ppc970_generic_events[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 7,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 1,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
|
||||
/*
|
||||
* Table of generalized cache-related events.
|
||||
* 0 means not supported, -1 means nonsensical, other values
|
||||
* are event codes.
|
||||
*/
|
||||
static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
|
||||
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x8810, 0x3810 },
|
||||
[C(OP_WRITE)] = { 0x7810, 0x813 },
|
||||
[C(OP_PREFETCH)] = { 0x731, 0 },
|
||||
},
|
||||
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { 0, 0 },
|
||||
},
|
||||
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0 },
|
||||
[C(OP_WRITE)] = { 0, 0 },
|
||||
[C(OP_PREFETCH)] = { 0x733, 0 },
|
||||
},
|
||||
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x704 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0, 0x700 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { 0x431, 0x327 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
|
||||
[C(OP_READ)] = { -1, -1 },
|
||||
[C(OP_WRITE)] = { -1, -1 },
|
||||
[C(OP_PREFETCH)] = { -1, -1 },
|
||||
},
|
||||
};
|
||||
|
||||
static struct power_pmu ppc970_pmu = {
|
||||
.name = "PPC970/FX/MP",
|
||||
.n_counter = 8,
|
||||
.max_alternatives = 2,
|
||||
.add_fields = 0x001100005555ull,
|
||||
.test_adder = 0x013300000000ull,
|
||||
.compute_mmcr = p970_compute_mmcr,
|
||||
.get_constraint = p970_get_constraint,
|
||||
.get_alternatives = p970_get_alternatives,
|
||||
.disable_pmc = p970_disable_pmc,
|
||||
.n_generic = ARRAY_SIZE(ppc970_generic_events),
|
||||
.generic_events = ppc970_generic_events,
|
||||
.cache_events = &ppc970_cache_events,
|
||||
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
|
||||
};
|
||||
|
||||
static int __init init_ppc970_pmu(void)
|
||||
{
|
||||
if (!cur_cpu_spec->oprofile_cpu_type ||
|
||||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
|
||||
&& strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")))
|
||||
return -ENODEV;
|
||||
|
||||
return register_power_pmu(&ppc970_pmu);
|
||||
}
|
||||
|
||||
early_initcall(init_ppc970_pmu);
|
Loading…
Add table
Add a link
Reference in a new issue