Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

243
drivers/trace/Kconfig Normal file
View file

@ -0,0 +1,243 @@
#
# Trace(debugging) driver configuration
#
menuconfig TRACE
bool "TRACE driver support"
default y
help
The "TRACE driver" is a kind of driver for debugging
if TRACE
config EXYNOS_DRAMTEST
bool "Exynos DRAMTEST driver"
help
This driver is to test DRAM. It write all 2MB with data 1.
config EXYNOS_ITM
bool "Exynos IPs Traffic Monitor support - (logging / timeout)"
default y
depends on ARCH_EXYNOS
menuconfig EXYNOS_SNAPSHOT
bool "Exynos SnapShot(ESS) support for debugging"
default y
depends on ARCH_EXYNOS
config EXYNOS_SNAPSHOT_CALLSTACK
int "shown callstack level"
depends on EXYNOS_SNAPSHOT
range 1 4
default 4
config EXYNOS_SNAPSHOT_KEVENT_OFF
bool "Turn off the kevent trace of Exynos SnapShot"
depends on EXYNOS_SNAPSHOT
default n
config EXYNOS_SNAPSHOT_BASIC_KEVENT
bool "Enable debugging of schedule, workqueue, cpuidle, suspend, interrupt enter"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_TRACEPRINT
bool "Enable Exynox Trace Printk"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_IRQ_EXIT
bool "Enable debugging of interrupt exit event by kevent"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_IRQ_EXIT_THRESHOLD
int "threshold of detection(microsecond)"
depends on EXYNOS_SNAPSHOT_IRQ_EXIT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE
range 0 1000000
default 5000
config EXYNOS_SNAPSHOT_IRQ_DISABLED
bool "Enable debugging of interrupt disable event by kevent(EXPERIMENTAL)"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default n
config EXYNOS_SNAPSHOT_SPINLOCK
bool "Enable debugging of spinlock event by kevent(EXPERIMENTAL)"
depends on EXYNOS_SNAPSHOT && DEBUG_SPINLOCK && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_CLK
bool "Enable debugging of clock event by kevent(EXPERIMENTAL)"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_FREQ
bool "Enable debugging of frequence event by kevent(EXPERIMENTAL)"
depends on EXYNOS_SNAPSHOT && PM_DEVFREQ && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_HRTIMER
bool "Enable debugging of hrtimer by kevent"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default n
config EXYNOS_SNAPSHOT_REG
bool "Enable debugging of accessing register by kevent"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default n
config EXYNOS_SNAPSHOT_REGULATOR
bool "Enable debugging of regulator and pmic driver"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_ACPM
bool "Enable debugging of acpm framework"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_THERMAL
bool "Enable debugging of thermal driver"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && EXYNOS_THERMAL && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_MBOX
bool "Enable debugging of mailbox for transferring with APM"
depends on EXYNOS_SNAPSHOT && EXYNOS_MBOX && !EXYNOS_SNAPSHOT_MINIMIZED_MODE && !EXYNOS_SNAPSHOT_KEVENT_OFF
default y
config EXYNOS_SNAPSHOT_I2C
bool "Enable debugging of i2c driver"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE
default y
config EXYNOS_SNAPSHOT_SPI
bool "Enable debugging of spi driver"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE
default y
config EXYNOS_SNAPSHOT_PSTORE
bool "Enable supporting ramoops driver for pstore filesystem"
depends on EXYNOS_SNAPSHOT
select PSTORE
select PSTORE_RAM
select PSTORE_PMSG
default n
config EXYNOS_SNAPSHOT_HOOK_LOGGER
bool "Enable hooking android platform log for ramdump"
depends on EXYNOS_SNAPSHOT
default y
config EXYNOS_SNAPSHOT_PANIC_REBOOT
bool "Enable forced reboot after panic for ramdump"
depends on EXYNOS_SNAPSHOT
default y
config EXYNOS_SNAPSHOT_WATCHDOG_RESET
bool "Support watchdog reset when hardlockup detect"
depends on EXYNOS_SNAPSHOT && EXYNOS_SNAPSHOT_PANIC_REBOOT
default y
config EXYNOS_SNAPSHOT_CRASH_KEY
bool "Support Crash Key to artificial panic for debugging"
depends on EXYNOS_SNAPSHOT && KEYBOARD_GPIO
default y
config EXYNOS_SNAPSHOT_SFRDUMP
bool "Support dump of sfr region for debugging"
depends on EXYNOS_SNAPSHOT && !EXYNOS_SNAPSHOT_MINIMIZED_MODE
default y
config EXYNOS_SNAPSHOT_MINIMIZED_MODE
bool "Support minimized feature configuration"
depends on EXYNOS_SNAPSHOT
default n
config EXYNOS_SNAPSHOT_LOGGING_SMC_CALL
bool "Support logging SMC call"
depends on EXYNOS_SNAPSHOT
default y
menuconfig EXYNOS_CORESIGHT
bool "Enable Coresight of exynos"
depends on ARCH_EXYNOS
default n
config EXYNOS_CORESIGHT_PC_INFO
bool "Show Porgram Counter information"
depends on EXYNOS_CORESIGHT
default y
help
Show Program Counter information When kernel panic.
config PC_ITERATION
int "Number of Program Coutner information"
depends on EXYNOS_CORESIGHT_PC_INFO
range 1 10
default 5
help
Print out Program Counter as iteration value.
config EXYNOS_CORESIGHT_MAINTAIN_DBG_REG
bool "Save/Restore Debug Registers"
depends on EXYNOS_CORESIGHT
default n
help
When core power is down, debug registers are not mainained data
and OS-lock is set. To maintain data, debug registers's data must
save in /restore from memory. If OS-lock is set, debugging to use
trace32 is impossible. So OS-lock must clear when core power is up.
This config must enable only using iTSP program.
menuconfig EXYNOS_CORESIGHT_ETM
bool "Enable ETM driver of exynos"
depends on ARCH_EXYNOS
default n
help
ETM driver is a driver for debugging. Also this driver enable ETR of
coresight feature.
config EXYNOS_CORESIGHT_ETB
bool "This system has ETB feature"
depends on EXYNOS_CORESIGHT_ETM && SOC_EXYNOS7580
default n
help
ETB driver is the sink part of coresight components. Some coresights has
ETR.
config EXYNOS_CORESIGHT_ETF
bool "This system has ETF feature"
depends on EXYNOS_CORESIGHT_ETM
default n
help
ETF driver is the sink part of coresight components. Some coresights has
ETR.
config EXYNOS_CORESIGHT_ETR
bool "This system has ETR feature"
depends on EXYNOS_CORESIGHT_ETF && EXYNOS_SNAPSHOT
default n
help
ETR driver is a driver for debugging. Also this driver is thr sink part of
coresight feature. Coresight consists of source, link, sink part.
This routes to DRAM or USB.
config EXYNOS_CORESIGHT_ETM_SYSFS
bool "This system has ETM Sysfs feature"
depends on EXYNOS_CORESIGHT_ETM
default n
help
Show ETM, FUNNEL, ETB, ETF and ETR status through sysfs.
config EXYNOS_CORESIGHT_STM
bool "Enable STM driver of exynos"
depends on ARCH_EXYNOS
default n
help
STM driver is a driver for H/W event and S/W event debugging.
endif
#

10
drivers/trace/Makefile Normal file
View file

@ -0,0 +1,10 @@
#
# Makefile for trace.
#
obj-$(CONFIG_EXYNOS_ITM) += exynos7570-itm.o
obj-$(CONFIG_EXYNOS_SNAPSHOT) += exynos-ss.o
obj-$(CONFIG_EXYNOS_CORESIGHT) += exynos-coresight.o
obj-$(CONFIG_EXYNOS_CORESIGHT_ETM) += exynos-coresight-etm.o
obj-$(CONFIG_EXYNOS_CORESIGHT_STM) += of_coresight.o coresight.o coresight-stm.o
obj-$(CONFIG_EXYNOS_DRAMTEST) += dramtest.o

View file

@ -0,0 +1,53 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CORESIGHT_PRIV_H
#define _CORESIGHT_PRIV_H
#include <linux/bitops.h>
/*
* Coresight management registers (0xF00-0xFCC)
* 0xFA0 - 0xFA4: Management registers in PFTv1.0
* Trace registers in PFTv1.1
*/
#define CORESIGHT_ITCTRL (0xF00)
#define CORESIGHT_CLAIMSET (0xFA0)
#define CORESIGHT_CLAIMCLR (0xFA4)
#define CORESIGHT_LAR (0xFB0)
#define CORESIGHT_LSR (0xFB4)
#define CORESIGHT_AUTHSTATUS (0xFB8)
#define CORESIGHT_DEVID (0xFC8)
#define CORESIGHT_DEVTYPE (0xFCC)
#define CORESIGHT_UNLOCK (0xC5ACCE55)
#define TIMEOUT_US (100)
#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
#define BMVAL(val, lsb, msb) ((val & BM(lsb, msb)) >> lsb)
#define BVAL(val, n) ((val & BIT(n)) >> n)
#ifdef CONFIG_CORESIGHT_SOURCE_ETM
extern unsigned int etm_readl_cp14(uint32_t off);
extern void etm_writel_cp14(uint32_t val, uint32_t off);
#else
static inline unsigned int etm_readl_cp14(uint32_t off) { return 0; }
static inline void etm_writel_cp14(uint32_t val, uint32_t off) {}
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETB
extern void exynos_etb_etm(void);
extern void exynos_etb_stm(void);
#endif
#endif

View file

@ -0,0 +1,894 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/coresight.h>
#include <linux/coresight-stm.h>
#include <asm/unaligned.h>
#include "of_coresight.h"
#include "coresight-priv.h"
#define stm_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
#define stm_readl(drvdata, off) __raw_readl(drvdata->base + off)
#define txa_writel(drvdata, val, off) __raw_writel((val), drvdata->txa.base + off)
#define txa_readl(drvdata, off) __raw_readl(drvdata->txa.base + off)
#define STM_LOCK(drvdata) \
do { \
mb(); \
stm_writel(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define STM_UNLOCK(drvdata) \
do { \
stm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); \
} while (0)
#define STMDMASTARTR (0xC04)
#define STMDMASTOPR (0xC08)
#define STMDMASTATR (0xC0C)
#define STMDMACTLR (0xC10)
#define STMDMAIDR (0xCFC)
#define STMHEER (0xD00)
#define STMHETER (0xD20)
#define STMHEMCR (0xD64)
#define STMHEMASTR (0xDF4)
#define STMHEFEAT1R (0xDF8)
#define STMHEIDR (0xDFC)
#define STMSPER (0xE00)
#define STMSPTER (0xE20)
#define STMSPSCR (0xE60)
#define STMSPMSCR (0xE64)
#define STMSPOVERRIDER (0xE68)
#define STMSPMOVERRIDER (0xE6C)
#define STMSPTRIGCSR (0xE70)
#define STMTCSR (0xE80)
#define STMTSSTIMR (0xE84)
#define STMTSFREQR (0xE8C)
#define STMSYNCR (0xE90)
#define STMAUXCR (0xE94)
#define STMSPFEAT1R (0xEA0)
#define STMSPFEAT2R (0xEA4)
#define STMSPFEAT3R (0xEA8)
#define STMITTRIGGER (0xEE8)
#define STMITATBDATA0 (0xEEC)
#define STMITATBCTR2 (0xEF0)
#define STMITATBID (0xEF4)
#define STMITATBCTR0 (0xEF8)
#define NR_STM_CHANNEL (32)
#define BYTES_PER_CHANNEL (256)
#define STM_TRACE_BUF_SIZE (4096)
#define STM_USERSPACE_HEADER_SIZE (8)
#define STM_USERSPACE_MAGIC1_VAL (0xf0)
#define STM_USERSPACE_MAGIC2_VAL (0xf1)
#define OST_START_TOKEN (0x30)
#define OST_VERSION (0x1)
#define STM_TXOR_CNTRL (0x0)
#define STM_TXOR_TARGET_BADDR (0x4)
#define STM_TXOR_EVENT_CONF0 (0x8)
#define STM_TXOR_EVENT_CONF1 (0xC)
#define STM_TXOR_EVENT_CONF2 (0x10)
#define STM_TXOR_EVENT_CONF3 (0x14)
#define STM_TXOR_STATUS0 (0x18)
#define STM_TXOR_STATUS1 (0x1C)
#define STM_TXOR_STATUS2 (0x20)
#define STM_TXOR_STATUS3 (0x24)
#define STM_TXOR_TX_CTR (0x28)
#define STM_FIFO_LEVEL (0x2C)
#define STM_B_COUNT_ZERO (0x34)
#define STM_TXOR_INT_SEL (0x38)
#define STM_TXOR_TRACE_INT (0x3C)
#define STM_TXOR_TRACE_FINT (0x40)
enum stm_pkt_type {
STM_PKT_TYPE_DATA = 0x98,
STM_PKT_TYPE_FLAG = 0xE8,
STM_PKT_TYPE_TRIG = 0xF8,
};
enum {
STM_OPTION_MARKED = 0x10,
};
#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \
(ch * BYTES_PER_CHANNEL))
#define stm_channel_off(type, opts) (type & ~opts)
#ifdef CONFIG_EXYNOS_CORESIGHT_STM_DEFAULT_ENABLE
static int boot_enable = 1;
#else
static int boot_enable;
#endif
module_param_named(
boot_enable, boot_enable, int, S_IRUGO
);
static int boot_nr_channel;
module_param_named(
boot_nr_channel, boot_nr_channel, int, S_IRUGO
);
struct channel_space {
void __iomem *base;
unsigned long *bitmap;
};
struct txa_space {
void __iomem *base;
unsigned long *bitmap;
};
struct stm_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
struct clk *clk;
spinlock_t spinlock;
struct channel_space chs;
struct txa_space txa;
uint32_t hwevt_stm_port;
uint32_t hwevt_mode;
bool enable;
DECLARE_BITMAP(entities, OST_ENTITY_MAX);
};
static struct stm_drvdata *stmdrvdata;
static void __stm_hwevent_enable(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
/* Transactor enable */
txa_writel(drvdata, drvdata->hwevt_stm_port, STM_TXOR_TARGET_BADDR);
/* intial value: interupt group 0, all event mode */
txa_writel(drvdata, 0x0, STM_TXOR_INT_SEL);
txa_writel(drvdata, 10, STM_TXOR_TRACE_INT);
txa_writel(drvdata, 0x1, STM_TXOR_CNTRL);
STM_LOCK(drvdata);
}
static void __stm_hwevt_mode(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
txa_writel(drvdata, ((drvdata->hwevt_mode)>>31)&0x1, STM_TXOR_INT_SEL);
txa_writel(drvdata, drvdata->hwevt_mode, STM_TXOR_TRACE_INT);
STM_LOCK(drvdata);
}
static unsigned long stm_hwevt_mode_read(struct stm_drvdata *drvdata)
{
int ret = 0;
spin_lock(&drvdata->spinlock);
STM_UNLOCK(drvdata);
if (drvdata->enable) {
ret = txa_readl(drvdata, STM_TXOR_INT_SEL) << 31;
ret |= txa_readl(drvdata, STM_TXOR_TRACE_INT);
}
STM_LOCK(drvdata);
spin_unlock(&drvdata->spinlock);
return ret;
}
static int stm_hwevt_mode(struct stm_drvdata *drvdata)
{
int ret = 0;
spin_lock(&drvdata->spinlock);
if (drvdata->enable)
__stm_hwevt_mode(drvdata);
else
ret = -EINVAL;
spin_unlock(&drvdata->spinlock);
return ret;
}
static int stm_port_isenable(struct stm_drvdata *drvdata)
{
int ret = 0;
spin_lock(&drvdata->spinlock);
STM_UNLOCK(drvdata);
if (drvdata->enable)
ret = stm_readl(drvdata, STMSPER) == 0 ? 0 : 1;
STM_LOCK(drvdata);
spin_unlock(&drvdata->spinlock);
return ret;
}
static void __stm_port_enable(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
stm_writel(drvdata, 0x10, STMSPTRIGCSR);
stm_writel(drvdata, 0xFFFFFFFF, STMSPER);
STM_LOCK(drvdata);
}
static int stm_port_enable(struct stm_drvdata *drvdata)
{
int ret = 0;
spin_lock(&drvdata->spinlock);
if (drvdata->enable)
__stm_port_enable(drvdata);
else
ret = -EINVAL;
spin_unlock(&drvdata->spinlock);
return ret;
}
static void __stm_enable(struct stm_drvdata *drvdata)
{
__stm_port_enable(drvdata);
STM_UNLOCK(drvdata);
stm_writel(drvdata, 0xFFF, STMSYNCR);
/* SYNCEN is read-only and HWTEN is not implemented */
stm_writel(drvdata, 0x100003, STMTCSR);
__stm_hwevent_enable(drvdata);
STM_LOCK(drvdata);
}
static int stm_enable(struct coresight_device *csdev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
exynos_etb_stm();
spin_lock(&drvdata->spinlock);
__stm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "STM tracing enabled\n");
return 0;
}
static void __stm_hwevent_disable(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
stm_writel(drvdata, 0x0, STMHEMCR);
stm_writel(drvdata, 0x0, STMHEER);
stm_writel(drvdata, 0x0, STMHETER);
STM_LOCK(drvdata);
}
static void __stm_port_disable(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
stm_writel(drvdata, 0x0, STMSPER);
stm_writel(drvdata, 0x0, STMSPTRIGCSR);
STM_LOCK(drvdata);
}
static void stm_port_disable(struct stm_drvdata *drvdata)
{
spin_lock(&drvdata->spinlock);
if (drvdata->enable)
__stm_port_disable(drvdata);
spin_unlock(&drvdata->spinlock);
}
static void __stm_disable(struct stm_drvdata *drvdata)
{
STM_UNLOCK(drvdata);
stm_writel(drvdata, 0x100000, STMTCSR);
STM_LOCK(drvdata);
__stm_hwevent_disable(drvdata);
__stm_port_disable(drvdata);
exynos_etb_etm();
}
static void stm_disable(struct coresight_device *csdev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
__stm_disable(drvdata);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
/* Wait for 100ms so that pending data has been written to HW */
msleep(100);
dev_info(drvdata->dev, "STM tracing disabled\n");
}
static const struct coresight_ops_source stm_source_ops = {
.enable = stm_enable,
.disable = stm_disable,
};
static const struct coresight_ops stm_cs_ops = {
.source_ops = &stm_source_ops,
};
static uint32_t stm_channel_alloc(uint32_t off)
{
struct stm_drvdata *drvdata = stmdrvdata;
uint32_t ch;
do {
ch = find_next_zero_bit(drvdata->chs.bitmap,
NR_STM_CHANNEL, off);
} while ((ch < NR_STM_CHANNEL) &&
test_and_set_bit(ch, drvdata->chs.bitmap));
return ch;
}
static void stm_channel_free(uint32_t ch)
{
struct stm_drvdata *drvdata = stmdrvdata;
clear_bit(ch, drvdata->chs.bitmap);
}
static int stm_send(void *addr, const void *data, uint32_t size)
{
uint64_t prepad = 0;
uint64_t postpad = 0;
char *pad;
uint8_t off, endoff;
uint32_t len = size;
/*
* Only 64bit writes are supported. We rely on the compiler to
* generate STRD instruction for the casted 64bit assignments.
*/
off = (unsigned long)data & 0x7;
if (off) {
endoff = 8 - off;
pad = (char *)&prepad;
pad += off;
while (endoff && size) {
*pad++ = *(char *)data++;
endoff--;
size--;
}
*(volatile uint64_t __force *)addr = prepad;
}
/* Now we are 64bit aligned */
while (size >= 8) {
*(volatile uint64_t __force *)addr = *(uint64_t *)data;
data += 8;
size -= 8;
}
if (size) {
pad = (char *)&postpad;
while (size) {
*pad++ = *(char *)data++;
size--;
}
*(volatile uint64_t __force *)addr = postpad;
}
return roundup(len + off, 8);
}
static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
uint8_t entity_id, uint8_t proto_id,
const void *payload_data, uint32_t payload_size)
{
void *addr;
uint8_t prepad_size;
uint64_t header;
char *hdr;
hdr = (char *)&header;
hdr[0] = OST_START_TOKEN;
hdr[1] = OST_VERSION;
hdr[2] = entity_id;
hdr[3] = proto_id;
prepad_size = (unsigned long)payload_data & 0x7;
*(uint32_t *)(hdr + 4) = (prepad_size << 24) | payload_size;
/* For 64bit writes, header is expected to be of the D32M, D32M */
options |= STM_OPTION_MARKED;
options &= ~STM_OPTION_TIMESTAMPED;
addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
return stm_send(addr, &header, sizeof(header));
}
static int stm_trace_data(unsigned long ch_addr, uint32_t options,
const void *data, uint32_t size)
{
void *addr;
options &= ~STM_OPTION_TIMESTAMPED;
addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
return stm_send(addr, data, size);
}
static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t options)
{
void *addr;
uint64_t tail = 0x0;
addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
return stm_send(addr, &tail, sizeof(tail));
}
static inline int __stm_trace(uint32_t options, uint8_t entity_id,
uint8_t proto_id, const void *data, uint32_t size)
{
struct stm_drvdata *drvdata = stmdrvdata;
int len = 0;
uint32_t ch;
unsigned long ch_addr;
/* Allocate channel and get the channel address */
ch = stm_channel_alloc(0);
ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
/* Send the ost header */
len += stm_trace_ost_header(ch_addr, options, entity_id, proto_id, data,
size);
/* Send the payload data */
len += stm_trace_data(ch_addr, options, data, size);
/* Send the ost tail */
len += stm_trace_ost_tail(ch_addr, options);
/* We are done, free the channel */
stm_channel_free(ch);
return len;
}
/**
* stm_trace - trace the binary or string data through STM
* @options: tracing options - guaranteed, timestamped, etc
* @entity_id: entity representing the trace data
* @proto_id: protocol id to distinguish between different binary formats
* @data: pointer to binary or string data buffer
* @size: size of data to send
*
* Packetizes the data as the payload to an OST packet and sends it over STM
*
* CONTEXT:
* Can be called from any context.
*
* RETURNS:
* number of bytes transfered over STM
*/
int stm_trace(uint32_t options, uint8_t entity_id, uint8_t proto_id,
const void *data, uint32_t size)
{
struct stm_drvdata *drvdata = stmdrvdata;
/* We don't support sizes more than 24bits (0 to 23) */
if (!(drvdata && drvdata->enable &&
test_bit(entity_id, drvdata->entities) && size &&
(size < 0x1000000)))
return 0;
return __stm_trace(options, entity_id, proto_id, data, size);
}
EXPORT_SYMBOL_GPL(stm_trace);
static ssize_t stm_write(struct file *file, const char __user *data,
size_t size, loff_t *ppos)
{
struct stm_drvdata *drvdata = container_of(file->private_data,
struct stm_drvdata, miscdev);
char *buf;
uint8_t entity_id, proto_id;
uint32_t options;
if (!drvdata->enable || !size)
return -EINVAL;
if (size > STM_TRACE_BUF_SIZE)
size = STM_TRACE_BUF_SIZE;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, data, size)) {
kfree(buf);
dev_dbg(drvdata->dev, "%s: copy_from_user failed\n", __func__);
return -EFAULT;
}
if (size >= STM_USERSPACE_HEADER_SIZE &&
buf[0] == STM_USERSPACE_MAGIC1_VAL &&
buf[1] == STM_USERSPACE_MAGIC2_VAL) {
entity_id = buf[2];
proto_id = buf[3];
options = *(uint32_t *)(buf + 4);
if (!test_bit(entity_id, drvdata->entities) ||
!(size - STM_USERSPACE_HEADER_SIZE)) {
kfree(buf);
return size;
}
__stm_trace(options, entity_id, proto_id,
buf + STM_USERSPACE_HEADER_SIZE,
size - STM_USERSPACE_HEADER_SIZE);
} else {
if (!test_bit(OST_ENTITY_DEV_NODE, drvdata->entities)) {
kfree(buf);
return size;
}
__stm_trace(STM_OPTION_TIMESTAMPED, OST_ENTITY_DEV_NODE, 0,
buf, size);
}
kfree(buf);
return size;
}
static const struct file_operations stm_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.write = stm_write,
.llseek = no_llseek,
};
static ssize_t stm_show_stm_en(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = stm_port_isenable(drvdata);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t stm_store_stm_en(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
uint32_t val;
int ret = 0;
if (sscanf(buf, "%x", &val) != 1)
return -EINVAL;
if (val) {
exynos_etb_stm();
spin_lock(&drvdata->spinlock);
__stm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
} else {
spin_lock(&drvdata->spinlock);
__stm_disable(drvdata);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
/* Wait for 100ms so that pending data has been written to HW */
msleep(100);
}
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(stm_enable, S_IRUGO | S_IWUSR, stm_show_stm_en,
stm_store_stm_en);
static ssize_t stm_show_port_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = stm_port_isenable(drvdata);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t stm_store_port_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
int ret = 0;
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val)
ret = stm_port_enable(drvdata);
else
stm_port_disable(drvdata);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(port_enable, S_IRUGO | S_IWUSR, stm_show_port_enable,
stm_store_port_enable);
static ssize_t stm_show_entities(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
ssize_t len;
len = bitmap_scnprintf(buf, PAGE_SIZE, drvdata->entities,
OST_ENTITY_MAX);
if (PAGE_SIZE - len < 2)
len = -EINVAL;
else
len += scnprintf(buf + len, 2, "\n");
return len;
}
static ssize_t stm_store_entities(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val1, val2;
if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
return -EINVAL;
if (val1 >= OST_ENTITY_MAX)
return -EINVAL;
if (val2)
__set_bit(val1, drvdata->entities);
else
__clear_bit(val1, drvdata->entities);
return size;
}
static DEVICE_ATTR(entities, S_IRUGO | S_IWUSR, stm_show_entities,
stm_store_entities);
static ssize_t stm_show_hwevt_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
uint32_t val = stm_hwevt_mode_read(drvdata);
return scnprintf(buf, PAGE_SIZE, "%x\n", val);
}
static ssize_t stm_store_hwevt_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
uint32_t val;
int ret = 0;
sscanf(buf, "%x", &val);
drvdata->hwevt_mode = val;
if (val)
ret = stm_hwevt_mode(drvdata);
else
stm_hwevt_mode(drvdata);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(hwevt_mode, S_IRUGO | S_IWUSR, stm_show_hwevt_mode,
stm_store_hwevt_mode);
static struct attribute *stm_attrs[] = {
&dev_attr_stm_enable.attr,
&dev_attr_port_enable.attr,
&dev_attr_entities.attr,
&dev_attr_hwevt_mode.attr,
NULL,
};
static struct attribute_group stm_attr_grp = {
.attrs = stm_attrs,
};
static const struct attribute_group *stm_attr_grps[] = {
&stm_attr_grp,
NULL,
};
static int __devinit stm_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata;
struct stm_drvdata *drvdata;
struct resource *res;
size_t res_size, bitmap_size;
struct coresight_desc *desc;
if (pdev->dev.of_node) {
pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
pdev->dev.platform_data = pdata;
}
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
/* Store the driver data pointer for use in exported functions */
stmdrvdata = drvdata;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
if (!drvdata->base)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
if (boot_nr_channel) {
res_size = min((resource_size_t)(boot_nr_channel *
BYTES_PER_CHANNEL), resource_size(res));
bitmap_size = boot_nr_channel * sizeof(long);
} else {
res_size = min((resource_size_t)(NR_STM_CHANNEL *
BYTES_PER_CHANNEL), resource_size(res));
bitmap_size = NR_STM_CHANNEL * sizeof(long);
}
drvdata->hwevt_stm_port = res->start + 0x100;
drvdata->chs.base = devm_ioremap(dev, res->start, res_size);
if (!drvdata->chs.base)
return -ENOMEM;
drvdata->chs.bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!drvdata->chs.bitmap)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res)
return -ENODEV;
drvdata->txa.base = devm_ioremap(dev, res->start, res_size);
if (!drvdata->txa.base)
return -ENOMEM;
drvdata->txa.bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!drvdata->txa.bitmap)
return -ENOMEM;
spin_lock_init(&drvdata->spinlock);
bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
desc->ops = &stm_cs_ops;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
desc->groups = stm_attr_grps;
desc->owner = THIS_MODULE;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
drvdata->miscdev.name = ((struct coresight_platform_data *)
(pdev->dev.platform_data))->name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &stm_fops;
ret = misc_register(&drvdata->miscdev);
if (ret)
goto err;
dev_info(drvdata->dev, "STM initialized\n");
if (boot_enable)
stm_enable(drvdata->csdev);
return 0;
err:
coresight_unregister(drvdata->csdev);
return ret;
}
static int __devexit stm_remove(struct platform_device *pdev)
{
struct stm_drvdata *drvdata = platform_get_drvdata(pdev);
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
return 0;
}
static struct of_device_id stm_match[] = {
{.compatible = "arm,coresight-stm"},
{}
};
static struct platform_driver stm_driver = {
.probe = stm_probe,
.remove = __devexit_p(stm_remove),
.driver = {
.name = "coresight-stm",
.owner = THIS_MODULE,
.of_match_table = stm_match,
},
};
static int __init stm_init(void)
{
return platform_driver_register(&stm_driver);
}
late_initcall(stm_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CoreSight System Trace Macrocell driver");

681
drivers/trace/coresight.c Normal file
View file

@ -0,0 +1,681 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/semaphore.h>
#include <linux/clk.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
#define NO_SINK (-1)
static int curr_sink = NO_SINK;
static LIST_HEAD(coresight_orph_conns);
static LIST_HEAD(coresight_devs);
static DEFINE_SEMAPHORE(coresight_mutex);
static int coresight_find_link_inport(struct coresight_device *csdev)
{
int i;
struct coresight_device *parent;
struct coresight_connection *conn;
parent = container_of(csdev->path_link.next, struct coresight_device,
path_link);
for (i = 0; i < parent->nr_conns; i++) {
conn = &parent->conns[i];
if (conn->child_dev == csdev)
return conn->child_port;
}
pr_err("coresight: couldn't find inport, parent: %d, child: %d\n",
parent->id, csdev->id);
return 0;
}
static int coresight_find_link_outport(struct coresight_device *csdev)
{
int i;
struct coresight_device *child;
struct coresight_connection *conn;
child = container_of(csdev->path_link.prev, struct coresight_device,
path_link);
for (i = 0; i < csdev->nr_conns; i++) {
conn = &csdev->conns[i];
if (conn->child_dev == child)
return conn->outport;
}
pr_err("coresight: couldn't find outport, parent: %d, child: %d\n",
csdev->id, child->id);
return 0;
}
static int coresight_enable_sink(struct coresight_device *csdev)
{
int ret;
if (csdev->refcnt.sink_refcnt == 0) {
if (csdev->ops->sink_ops->enable) {
ret = csdev->ops->sink_ops->enable(csdev);
if (ret)
goto err;
csdev->enable = true;
}
}
csdev->refcnt.sink_refcnt++;
return 0;
err:
return ret;
}
static void coresight_disable_sink(struct coresight_device *csdev)
{
if (csdev->refcnt.sink_refcnt == 1) {
if (csdev->ops->sink_ops->disable) {
csdev->ops->sink_ops->disable(csdev);
csdev->enable = false;
}
}
csdev->refcnt.sink_refcnt--;
}
static int coresight_enable_link(struct coresight_device *csdev)
{
int ret;
int link_subtype;
int refport, inport, outport;
inport = coresight_find_link_inport(csdev);
outport = coresight_find_link_outport(csdev);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
refport = inport;
else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
refport = outport;
else
refport = 0;
if (csdev->refcnt.link_refcnts[refport] == 0) {
if (csdev->ops->link_ops->enable) {
ret = csdev->ops->link_ops->enable(csdev, inport,
outport);
if (ret)
goto err;
csdev->enable = true;
}
}
csdev->refcnt.link_refcnts[refport]++;
return 0;
err:
return ret;
}
static void coresight_disable_link(struct coresight_device *csdev)
{
int link_subtype;
int refport, inport, outport;
inport = coresight_find_link_inport(csdev);
outport = coresight_find_link_outport(csdev);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
refport = inport;
else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
refport = outport;
else
refport = 0;
if (csdev->refcnt.link_refcnts[refport] == 1) {
if (csdev->ops->link_ops->disable) {
csdev->ops->link_ops->disable(csdev, inport, outport);
csdev->enable = false;
}
}
csdev->refcnt.link_refcnts[refport]--;
}
static int coresight_enable_source(struct coresight_device *csdev)
{
int ret;
if (csdev->refcnt.source_refcnt == 0) {
if (csdev->ops->source_ops->enable) {
ret = csdev->ops->source_ops->enable(csdev);
if (ret)
goto err;
csdev->enable = true;
}
}
csdev->refcnt.source_refcnt++;
return 0;
err:
return ret;
}
static void coresight_disable_source(struct coresight_device *csdev)
{
if (csdev->refcnt.source_refcnt == 1) {
if (csdev->ops->source_ops->disable) {
csdev->ops->source_ops->disable(csdev);
csdev->enable = false;
}
}
csdev->refcnt.source_refcnt--;
}
static struct list_head *coresight_build_path(struct coresight_device *csdev,
struct list_head *path)
{
int i;
struct list_head *p;
struct coresight_connection *conn;
if (csdev->id == curr_sink) {
list_add_tail(&csdev->path_link, path);
return path;
}
for (i = 0; i < csdev->nr_conns; i++) {
conn = &csdev->conns[i];
p = coresight_build_path(conn->child_dev, path);
if (p) {
list_add_tail(&csdev->path_link, p);
return p;
}
}
return NULL;
}
static void coresight_release_path(struct list_head *path)
{
struct coresight_device *cd, *temp;
list_for_each_entry_safe(cd, temp, path, path_link)
list_del(&cd->path_link);
}
static int coresight_enable_path(struct list_head *path, bool incl_source)
{
int ret = 0;
struct coresight_device *cd;
list_for_each_entry(cd, path, path_link) {
if (cd == list_first_entry(path, struct coresight_device,
path_link)) {
ret = coresight_enable_sink(cd);
} else if (list_is_last(&cd->path_link, path)) {
if (incl_source)
ret = coresight_enable_source(cd);
} else {
ret = coresight_enable_link(cd);
}
if (ret)
goto err;
}
return 0;
err:
list_for_each_entry_continue_reverse(cd, path, path_link) {
if (cd == list_first_entry(path, struct coresight_device,
path_link)) {
coresight_disable_sink(cd);
} else if (list_is_last(&cd->path_link, path)) {
if (incl_source)
coresight_disable_source(cd);
} else {
coresight_disable_link(cd);
}
}
return ret;
}
static void coresight_disable_path(struct list_head *path, bool incl_source)
{
struct coresight_device *cd;
list_for_each_entry(cd, path, path_link) {
if (cd == list_first_entry(path, struct coresight_device,
path_link)) {
coresight_disable_sink(cd);
} else if (list_is_last(&cd->path_link, path)) {
if (incl_source)
coresight_disable_source(cd);
} else {
coresight_disable_link(cd);
}
}
}
static int coresight_switch_sink(struct coresight_device *csdev)
{
int ret = 0;
LIST_HEAD(path);
struct coresight_device *cd;
if (IS_ERR_OR_NULL(csdev))
return -EINVAL;
down(&coresight_mutex);
if (csdev->id == curr_sink)
goto out;
list_for_each_entry(cd, &coresight_devs, dev_link) {
if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
coresight_build_path(cd, &path);
coresight_disable_path(&path, false);
coresight_release_path(&path);
}
}
curr_sink = csdev->id;
list_for_each_entry(cd, &coresight_devs, dev_link) {
if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
coresight_build_path(cd, &path);
ret = coresight_enable_path(&path, false);
coresight_release_path(&path);
if (ret)
goto err;
}
}
out:
up(&coresight_mutex);
return 0;
err:
list_for_each_entry(cd, &coresight_devs, dev_link) {
if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable)
coresight_disable_source(cd);
}
pr_err("coresight: sink switch failed, sources disabled; try again\n");
return ret;
}
int coresight_enable(struct coresight_device *csdev)
{
int ret = 0;
LIST_HEAD(path);
if (IS_ERR_OR_NULL(csdev))
return -EINVAL;
down(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
ret = -EINVAL;
pr_err("coresight: wrong device type in %s\n", __func__);
goto out;
}
if (csdev->enable)
goto out;
coresight_build_path(csdev, &path);
ret = coresight_enable_path(&path, true);
coresight_release_path(&path);
if (ret)
pr_err("coresight: enable failed\n");
out:
up(&coresight_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
LIST_HEAD(path);
if (IS_ERR_OR_NULL(csdev))
return;
down(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
pr_err("coresight: wrong device type in %s\n", __func__);
goto out;
}
if (!csdev->enable)
goto out;
coresight_build_path(csdev, &path);
coresight_disable_path(&path, true);
coresight_release_path(&path);
out:
up(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_disable);
void coresight_abort(void)
{
struct coresight_device *cd;
if (down_trylock(&coresight_mutex)) {
pr_err("coresight: abort could not be processed\n");
return;
}
if (curr_sink == NO_SINK)
goto out;
list_for_each_entry(cd, &coresight_devs, dev_link) {
if (cd->id == curr_sink) {
if (cd->enable && cd->ops->sink_ops->abort) {
cd->ops->sink_ops->abort(cd);
cd->enable = false;
}
}
}
out:
up(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_abort);
static ssize_t coresight_show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", dev->type->name);
}
static struct device_attribute coresight_dev_attrs[] = {
__ATTR(type, S_IRUGO, coresight_show_type, NULL),
{ },
};
struct bus_type coresight_bus_type = {
.name = "coresight",
.dev_attrs = coresight_dev_attrs,
};
static ssize_t coresight_show_curr_sink(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct coresight_device *csdev = to_coresight_device(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
csdev->id == curr_sink ? 1 : 0);
}
static ssize_t coresight_store_curr_sink(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = 0;
unsigned long val;
struct coresight_device *csdev = to_coresight_device(dev);
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val)
ret = coresight_switch_sink(csdev);
else
ret = -EINVAL;
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(curr_sink, S_IRUGO | S_IWUSR, coresight_show_curr_sink,
coresight_store_curr_sink);
static ssize_t coresight_show_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct coresight_device *csdev = to_coresight_device(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
}
static ssize_t coresight_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
int ret = 0;
unsigned long val;
struct coresight_device *csdev = to_coresight_device(dev);
if (sscanf(buf, "%lx", &val) != 1)
return -EINVAL;
if (val)
ret = coresight_enable(csdev);
else
coresight_disable(csdev);
if (ret)
return ret;
return size;
}
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
coresight_store_enable);
static struct attribute *coresight_attrs_sink[] = {
&dev_attr_curr_sink.attr,
NULL,
};
static struct attribute_group coresight_attr_grp_sink = {
.attrs = coresight_attrs_sink,
};
static const struct attribute_group *coresight_attr_grps_sink[] = {
&coresight_attr_grp_sink,
NULL,
};
static struct attribute *coresight_attrs_source[] = {
&dev_attr_enable.attr,
NULL,
};
static struct attribute_group coresight_attr_grp_source = {
.attrs = coresight_attrs_source,
};
static const struct attribute_group *coresight_attr_grps_source[] = {
&coresight_attr_grp_source,
NULL,
};
static struct device_type coresight_dev_type[] = {
{
.name = "none",
},
{
.name = "sink",
.groups = coresight_attr_grps_sink,
},
{
.name = "link",
},
{
.name = "linksink",
.groups = coresight_attr_grps_sink,
},
{
.name = "source",
.groups = coresight_attr_grps_source,
},
};
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
kfree(csdev);
}
static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
{
struct coresight_connection *conn, *temp;
list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
if (conn->child_id == csdev->id) {
conn->child_dev = csdev;
list_del(&conn->link);
}
}
}
static void coresight_fixup_device_conns(struct coresight_device *csdev)
{
int i;
struct coresight_device *cd;
bool found;
for (i = 0; i < csdev->nr_conns; i++) {
found = false;
list_for_each_entry(cd, &coresight_devs, dev_link) {
if (csdev->conns[i].child_id == cd->id) {
csdev->conns[i].child_dev = cd;
found = true;
break;
}
}
if (!found)
list_add_tail(&csdev->conns[i].link,
&coresight_orph_conns);
}
}
struct coresight_device *coresight_register(struct coresight_desc *desc)
{
int i;
int ret;
int link_subtype;
int nr_refcnts;
int *refcnts = NULL;
struct coresight_device *csdev;
struct coresight_connection *conns;
if (IS_ERR_OR_NULL(desc))
return ERR_PTR(-EINVAL);
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
ret = -ENOMEM;
goto err_kzalloc_csdev;
}
csdev->id = desc->pdata->id;
if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
link_subtype = desc->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
nr_refcnts = desc->pdata->nr_inports;
else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
nr_refcnts = desc->pdata->nr_outports;
else
nr_refcnts = 1;
refcnts = kzalloc(sizeof(*refcnts) * nr_refcnts, GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
goto err_kzalloc_refcnts;
}
csdev->refcnt.link_refcnts = refcnts;
}
csdev->nr_conns = desc->pdata->nr_outports;
conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
if (!conns) {
ret = -ENOMEM;
goto err_kzalloc_conns;
}
for (i = 0; i < csdev->nr_conns; i++) {
conns[i].outport = desc->pdata->outports[i];
conns[i].child_id = desc->pdata->child_ids[i];
conns[i].child_port = desc->pdata->child_ports[i];
}
csdev->conns = conns;
csdev->type = desc->type;
csdev->subtype = desc->subtype;
csdev->ops = desc->ops;
csdev->owner = desc->owner;
csdev->dev.type = &coresight_dev_type[desc->type];
csdev->dev.groups = desc->groups;
csdev->dev.parent = desc->dev;
csdev->dev.bus = &coresight_bus_type;
csdev->dev.release = coresight_device_release;
dev_set_name(&csdev->dev, "%s", desc->pdata->name);
down(&coresight_mutex);
if (desc->pdata->default_sink) {
if (curr_sink == NO_SINK) {
curr_sink = csdev->id;
} else {
ret = -EINVAL;
goto err_default_sink;
}
}
coresight_fixup_device_conns(csdev);
ret = device_register(&csdev->dev);
if (ret)
goto err_dev_reg;
coresight_fixup_orphan_conns(csdev);
list_add_tail(&csdev->dev_link, &coresight_devs);
up(&coresight_mutex);
return csdev;
err_dev_reg:
put_device(&csdev->dev);
err_default_sink:
up(&coresight_mutex);
kfree(conns);
err_kzalloc_conns:
kfree(refcnts);
err_kzalloc_refcnts:
kfree(csdev);
err_kzalloc_csdev:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
if (IS_ERR_OR_NULL(csdev))
return;
if (get_device(&csdev->dev)) {
device_unregister(&csdev->dev);
put_device(&csdev->dev);
}
}
EXPORT_SYMBOL_GPL(coresight_unregister);
static int __init coresight_init(void)
{
return bus_register(&coresight_bus_type);
}
subsys_initcall(coresight_init);
static void __exit coresight_exit(void)
{
bus_unregister(&coresight_bus_type);
}
module_exit(coresight_exit);
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,694 @@
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* BUS Monitor Debugging Driver for Samsung EXYNOS SoC
* By Hosung Kim (hosung0.kim@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/exynos-busmon.h>
#define BUSMON_REG_FAULTEN (0x08)
#define BUSMON_REG_ERRVLD (0x0C)
#define BUSMON_REG_ERRCLR (0x10)
#define BUSMON_REG_ERRLOG0 (0x14)
#define BUSMON_REG_ERRLOG1 (0x18)
#define BUSMON_REG_ERRLOG2 (0x1C)
#define BUSMON_REG_ERRLOG3 (0x20)
#define BUSMON_REG_ERRLOG4 (0x24)
#define BUSMON_REG_ERRLOG5 (0x28)
#define BUSMON_EINVAL (99)
#define START (0)
#define END (1)
#define ARRAY_BITS (2)
#define ARRAY_SUBRANGE_MAX (1024)
#define NEED_TO_CHECK (0xCAFE)
/* Error Code Description */
static char *busmon_errcode[] = {
"0x0, SLV (Error Detect by the Slave)",
"0x1, DEC (Decode error)",
"0x2, UNS (Access type unsupported in target NIU)",
"0x3, DISC(Disconnected Target or NOC domain)",
"0x4, SEC (Security error)",
"0x5, HIDE(Hidden security error)",
"0x6, TMO (Timeout error)",
"Invalid errorcode",
};
/* Opcode Description */
static char *busmon_opcode[] = {
"0x0, RD (INCR Read)",
"0x1, RDW (WRAP Read)",
"0x2, RDEX(Exclusive Read)",
"0x3, RDLK(Locked Read)",
"0x4, WR (INCR Write)",
"0x5, WRW (WRAP Write)",
"0x6, WREX(Exclusive Write)",
"0x7, WRLK(Locked Write)",
"Invalid opcode",
};
#define BUSMON_INIT_DESC_STRING "init-desc"
#define BUSMON_TARGET_DESC_STRING "target-desc"
#define BUSMON_USERSIGNAL_DESC_STRING "usersignal-desc"
#define BUSMON_UNSUPPORTED_STRING "unsupported"
struct busmon_timeout {
char *name;
void __iomem *regs;
u32 enabled;
u32 enable_bit;
u32 range_bits[ARRAY_BITS];
struct list_head list;
};
struct busmon_platdata {
/* RouteID Information Bits */
u32 init_bits[ARRAY_BITS];
u32 target_bits[ARRAY_BITS];
u32 sub_bits[ARRAY_BITS];
u32 seq_bits[ARRAY_BITS];
/* Registers Bits */
u32 faulten_bits[ARRAY_BITS];
u32 errvld_bits[ARRAY_BITS];
u32 errclr_bits[ARRAY_BITS];
u32 errlog0_lock_bits[ARRAY_BITS];
u32 errlog0_opc_bits[ARRAY_BITS];
u32 errlog0_errcode_bits[ARRAY_BITS];
u32 errlog0_len1_bits[ARRAY_BITS];
u32 errlog0_format_bits[ARRAY_BITS];
u32 errlog1_bits[ARRAY_BITS];
u32 errlog2_bits[ARRAY_BITS];
u32 errlog3_bits[ARRAY_BITS];
u32 errlog4_bits[ARRAY_BITS];
u32 errlog5_bits[ARRAY_BITS];
u32 errlog5_axcache_bits[ARRAY_BITS];
u32 errlog5_axdomain_bits[ARRAY_BITS];
u32 errlog5_axuser_bits[ARRAY_BITS];
u32 errlog5_axprot_bits[ARRAY_BITS];
u32 errlog5_axqos_bits[ARRAY_BITS];
u32 errlog5_axsnoop_bits[ARRAY_BITS];
u32 init_num;
u32 target_num;
u32 sub_num;
u32 sub_array;
u32 init_flow;
u32 target_flow;
u32 subrange;
u64 target_addr;
u32 enabled;
u32 sub_index[ARRAY_SUBRANGE_MAX];
u32 sub_addr[ARRAY_SUBRANGE_MAX];
struct busmon_notifier notifier_info;
/* timeout block list */
struct list_head timeout_list;
};
struct busmon_dev {
struct device *dev;
struct busmon_platdata *pdata;
struct of_device_id *match;
int irq;
int id;
void __iomem *regs;
spinlock_t ctrl_lock;
};
struct busmon_panic_block {
struct notifier_block nb_panic_block;
struct busmon_dev *pdev;
};
/* declare notifier_list */
static ATOMIC_NOTIFIER_HEAD(busmon_notifier_list);
static const struct of_device_id busmon_dt_match[] = {
{ .compatible = "samsung,exynos-busmonitor",
.data = NULL, },
{},
};
MODULE_DEVICE_TABLE(of, busmon_dt_match);
static char* busmon_get_string(struct device_node *np,
const char* desc_str,
unsigned int desc_num)
{
const char *desc_ret;
int ret;
ret = of_property_read_string_index(np, desc_str, desc_num,
(const char **)&desc_ret);
if (ret)
desc_ret = NULL;
return (char *)desc_ret;
}
static unsigned int busmon_get_bits(u32 *bits, unsigned int val)
{
unsigned int ret = 0, i;
/* If bits[START] has BUSMON_EINVAL value, it must be exit */
if (bits[END] != BUSMON_EINVAL) {
/* Make masking value by checking from start-bit to end-bit */
for (i = bits[START]; i <= bits[END]; i++)
ret = (ret | (1 << i));
}
return ret & val;
}
static void busmon_logging_dump_raw(struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
unsigned int errlog0, errlog1, errlog2, errlog3, errlog4, errlog5, opcode, errcode;
unsigned int axcache, axdomain, axuser, axprot, axqos, axsnoop;
char *init_desc, *target_desc, *user_desc;
errlog0 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG0);
errlog1 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG1);
errlog2 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG2);
errlog3 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG3);
errlog4 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG4);
errlog5 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG5);
init_desc = busmon_get_string(busmon->dev->of_node,
BUSMON_INIT_DESC_STRING, pdata->init_flow);
target_desc = busmon_get_string(busmon->dev->of_node,
BUSMON_TARGET_DESC_STRING, pdata->target_flow);
opcode = busmon_get_bits(pdata->errlog0_opc_bits, errlog0) >>
pdata->errlog0_opc_bits[START];
errcode = busmon_get_bits(pdata->errlog0_errcode_bits, errlog0) >>
pdata->errlog0_errcode_bits[START];
axcache = busmon_get_bits(pdata->errlog5_axcache_bits, errlog5) >>
pdata->errlog5_axcache_bits[START];
axdomain = busmon_get_bits(pdata->errlog5_axdomain_bits, errlog5) >>
pdata->errlog5_axdomain_bits[START];
axuser = busmon_get_bits(pdata->errlog5_axuser_bits, errlog5) >>
pdata->errlog5_axuser_bits[START];
user_desc = busmon_get_string(busmon->dev->of_node,
BUSMON_USERSIGNAL_DESC_STRING,
(pdata->init_flow << 4) | axuser);
axprot = busmon_get_bits(pdata->errlog5_axprot_bits, errlog5) >>
pdata->errlog5_axprot_bits[START];
axqos = busmon_get_bits(pdata->errlog5_axqos_bits, errlog5) >>
pdata->errlog5_axqos_bits[START];
axsnoop = busmon_get_bits(pdata->errlog5_axsnoop_bits, errlog5) >>
pdata->errlog5_axsnoop_bits[START];
/* Check overflow */
if (ARRAY_SIZE(busmon_opcode) <= opcode)
opcode = ARRAY_SIZE(busmon_opcode) - 1;
if (ARRAY_SIZE(busmon_errcode) <= errcode)
errcode = ARRAY_SIZE(busmon_errcode) - 1;
dev_err(busmon->dev, "Error detected by BUS Monitor\n"
"=======================================================\n");
dev_err(busmon->dev,
"\nDebugging Information (1)\n"
"\tPath : %s -> %s\n"
"\topcode : %s\n"
"\tErrorCode : %s\n"
"\tLength : 0x%x (bytes)\n"
"\tAddress : 0x%llx\n"
"\tFormat : 0x%x\n"
"\tinitflow : 0x%x\n"
"\ttargetflow : 0x%x\n"
"\tsubrange : 0x%x\n"
"=======================================================\n",
IS_ERR_OR_NULL(init_desc) ? BUSMON_UNSUPPORTED_STRING : init_desc,
IS_ERR_OR_NULL(target_desc) ? BUSMON_UNSUPPORTED_STRING : target_desc,
busmon_opcode[opcode], busmon_errcode[errcode],
(busmon_get_bits(pdata->errlog0_len1_bits, errlog0) >>
pdata->errlog0_len1_bits[START]) + 1,
pdata->target_addr,
busmon_get_bits(pdata->errlog0_format_bits, errlog0) >>
pdata->errlog0_format_bits[START],
pdata->init_flow, pdata->target_flow, pdata->subrange);
dev_err(busmon->dev,
"\nDebugging information (2)\n"
"\tAXUSER : 0x%x, Master IP: %s\n"
"\tAXCACHE : 0x%x\n"
"\tAXDOMAIN : 0x%x\n"
"\tAXPROT : 0x%x\n"
"\tAXQOS : 0x%x\n"
"\tAXSNOOP : 0x%x\n"
"=======================================================\n",
axuser, IS_ERR_OR_NULL(user_desc) ? BUSMON_UNSUPPORTED_STRING : user_desc,
axcache, axdomain, axprot, axqos, axsnoop);
dev_err(busmon->dev,
"\nErrlog Raw Registers\n"
"\tErrLog0 : 0x%x\n"
"\tErrLog1 : 0x%x\n"
"\tErrLog2 : 0x%x\n"
"\tErrLog3 : 0x%x\n"
"\tErrLog4 : 0x%x\n"
"\tErrLog5 : 0x%x\n"
"=======================================================\n",
errlog0, errlog1, errlog2, errlog3, errlog4, errlog5);
if (!pdata->target_addr)
dev_err(busmon->dev, "Address is not valid, Needs to check\n");
/* Fill the information for notifier call funcion */
pdata->notifier_info.init_desc = init_desc;
pdata->notifier_info.target_desc = target_desc;
pdata->notifier_info.masterip_desc = user_desc;
pdata->notifier_info.masterip_idx = axuser;
pdata->notifier_info.target_addr = pdata->target_addr;
}
static void busmon_logging_parse_route(struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
unsigned int init_id, target_id, sub_id, val, bits;
unsigned int errlog3 = 0, errlog4 = 0, i;
val = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG1);
bits = busmon_get_bits(pdata->errlog1_bits, val);
init_id = busmon_get_bits(pdata->init_bits, bits) >> pdata->init_bits[START];
target_id = busmon_get_bits(pdata->target_bits, bits) >> pdata->target_bits[START];
sub_id = busmon_get_bits(pdata->sub_bits, bits) >> pdata->sub_bits[START];
pdata->init_flow = init_id;
pdata->target_flow = target_id;
pdata->subrange = sub_id;
/* Calculate target address */
errlog3 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG3);
errlog4 = __raw_readl(busmon->regs + BUSMON_REG_ERRLOG4);
errlog3 = busmon_get_bits(pdata->errlog3_bits, errlog3) >> pdata->errlog3_bits[START];
errlog4 = busmon_get_bits(pdata->errlog4_bits, errlog4) >> pdata->errlog4_bits[START];
val = (init_id * (pdata->target_num * pdata->sub_num)) +
(target_id * pdata->sub_num) + sub_id;
for (i = 0; i < pdata->sub_array; i++) {
if (pdata->sub_index[i] == val) {
if (pdata->sub_addr[i] == NEED_TO_CHECK) {
pdata->target_addr = 0;
} else {
pdata->target_addr = ((u64)errlog4 << 32);
pdata->target_addr |= (errlog3 + pdata->sub_addr[i]);
}
break;
}
}
}
static void busmon_logging_dump(struct busmon_dev *busmon)
{
busmon_logging_parse_route(busmon);
busmon_logging_dump_raw(busmon);
}
static irqreturn_t busmon_logging_irq(int irq, void *data)
{
struct busmon_dev *busmon = (struct busmon_dev *)data;
struct busmon_platdata *pdata = busmon->pdata;
unsigned int bits;
unsigned int val;
/* Check error has been logged */
val = __raw_readl(busmon->regs + BUSMON_REG_ERRVLD);
bits = busmon_get_bits(pdata->errvld_bits, val);
if (bits) {
char *init_desc;
dev_info(busmon->dev, "BUS monitor information: %d interrupt occurs.\n", (irq - 32));
busmon_logging_dump(busmon);
/* error clear */
bits = busmon_get_bits(pdata->errclr_bits, 1);
__raw_writel(bits, busmon->regs + BUSMON_REG_ERRCLR);
/* This code is for finding out the source */
init_desc = busmon_get_string(busmon->dev->of_node,
BUSMON_INIT_DESC_STRING, pdata->init_flow);
/* call notifier_call_chain of busmon */
atomic_notifier_call_chain(&busmon_notifier_list, 0, &pdata->notifier_info);
if (init_desc && !strncmp(init_desc, "CPU", strlen("CPU")))
dev_err(busmon->dev, "Error detected by BUS monitor.\n");
else
panic("Error detected by BUS monitor.");
}
return IRQ_HANDLED;
}
void busmon_notifier_chain_register(struct notifier_block *block)
{
atomic_notifier_chain_register(&busmon_notifier_list, block);
}
static int busmon_logging_panic_handler(struct notifier_block *nb,
unsigned long l, void *buf)
{
struct busmon_panic_block *busmon_panic = (struct busmon_panic_block *)nb;
struct busmon_dev *busmon = busmon_panic->pdev;
struct busmon_platdata *pdata = busmon->pdata;
unsigned int bits;
unsigned int val;
if (!IS_ERR_OR_NULL(busmon)) {
/* Check error has been logged */
val = __raw_readl(busmon->regs + BUSMON_REG_ERRVLD);
bits = busmon_get_bits(pdata->errvld_bits, val);
if (bits)
busmon_logging_dump(busmon);
else
dev_info(busmon->dev,
"BUS monitor did not detect any error.\n");
}
return 0;
}
static void busmon_timeout_init(struct busmon_dev *busmon)
{
struct busmon_timeout *timeout;
struct list_head *entry;
u32 val;
if (list_empty(&busmon->pdata->timeout_list))
return;
list_for_each(entry, &busmon->pdata->timeout_list) {
timeout = list_entry(entry, struct busmon_timeout, list);
if (timeout && timeout->enabled) {
val = __raw_readl(timeout->regs);
val |= (0x1) << timeout->enable_bit;
__raw_writel(val, timeout->regs);
dev_dbg(busmon->dev,
"Exynos Bus Monitor timeout enabled(%s, bit:%d)\n",
timeout->name, timeout->enable_bit);
}
}
}
static void busmon_logging_init(struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
unsigned int bits;
if (pdata->enabled) {
/* first of all, error clear at occurs previous */
bits = busmon_get_bits(pdata->errclr_bits, 1);
__raw_writel(bits, busmon->regs + BUSMON_REG_ERRCLR);
/* enable logging init */
bits = busmon_get_bits(pdata->faulten_bits, 1);
__raw_writel(bits, busmon->regs + BUSMON_REG_FAULTEN);
}
dev_dbg(busmon->dev, "Exynos BUS Monitor logging %s\n",
pdata->enabled ? "enabled" : "disabled");
}
static int busmon_dt_parse(struct device_node *np,
struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
struct device_node *time_np, *time_child_np = NULL;
struct busmon_timeout *timeout;
u32 regs[2];
int ret;
if (!np || !pdata) {
ret = -EINVAL;
goto out;
}
/* Error logging enabled */
of_property_read_u32(np, "enabled", &pdata->enabled);
/* Read BUS Logging setting */
of_property_read_u32_array(np, "seq-bits", pdata->seq_bits, 2);
of_property_read_u32_array(np, "sub-bits", pdata->sub_bits, 2);
of_property_read_u32_array(np, "target-bits", pdata->target_bits, 2);
of_property_read_u32_array(np, "init-bits", pdata->init_bits, 2);
of_property_read_u32_array(np, "faulten-bits", pdata->faulten_bits, 2);
of_property_read_u32_array(np, "errvld-bits", pdata->errvld_bits, 2);
of_property_read_u32_array(np, "errclr-bits", pdata->errclr_bits, 2);
of_property_read_u32_array(np, "errlog0-lock-bits", pdata->errlog0_lock_bits, 2);
of_property_read_u32_array(np, "errlog0-opc-bits", pdata->errlog0_opc_bits, 2);
of_property_read_u32_array(np, "errlog0-errcode-bits", pdata->errlog0_errcode_bits, 2);
of_property_read_u32_array(np, "errlog0-len1-bits", pdata->errlog0_len1_bits, 2);
of_property_read_u32_array(np, "errlog0-format-bits", pdata->errlog0_format_bits, 2);
of_property_read_u32_array(np, "errlog1-bits", pdata->errlog1_bits, 2);
of_property_read_u32_array(np, "errlog2-bits", pdata->errlog2_bits, 2);
of_property_read_u32_array(np, "errlog3-bits", pdata->errlog3_bits, 2);
of_property_read_u32_array(np, "errlog4-bits", pdata->errlog4_bits, 2);
of_property_read_u32_array(np, "errlog5-bits", pdata->errlog5_bits, 2);
/* errlog5's slot bits are different for each */
ret = of_property_read_u32_array(np, "errlog5-axcache-bits",
pdata->errlog5_axcache_bits, 2);
if (ret) {
pdata->errlog5_axcache_bits[START] = 0;
pdata->errlog5_axcache_bits[END] = BUSMON_EINVAL;
}
ret = of_property_read_u32_array(np, "errlog5-axdomain-bits",
pdata->errlog5_axdomain_bits, 2);
if (ret) {
pdata->errlog5_axdomain_bits[START] = 0;
pdata->errlog5_axdomain_bits[END] = BUSMON_EINVAL;
}
ret = of_property_read_u32_array(np, "errlog5-axuser-bits",
pdata->errlog5_axuser_bits, 2);
if (ret) {
pdata->errlog5_axuser_bits[START] = 0;
pdata->errlog5_axuser_bits[END] = BUSMON_EINVAL;
}
ret = of_property_read_u32_array(np, "errlog5-axprot-bits",
pdata->errlog5_axprot_bits, 2);
if (ret) {
pdata->errlog5_axprot_bits[START] = 0;
pdata->errlog5_axprot_bits[END] = BUSMON_EINVAL;
}
ret = of_property_read_u32_array(np, "errlog5-axqos-bits",
pdata->errlog5_axqos_bits, 2);
if (ret) {
pdata->errlog5_axqos_bits[START] = 0;
pdata->errlog5_axqos_bits[END] = BUSMON_EINVAL;
}
ret = of_property_read_u32_array(np, "errlog5-axsnoop-bits",
pdata->errlog5_axsnoop_bits, 2);
if (ret) {
pdata->errlog5_axsnoop_bits[START] = 0;
pdata->errlog5_axsnoop_bits[END] = BUSMON_EINVAL;
}
of_property_read_u32(np, "init-num", &pdata->init_num);
of_property_read_u32(np, "target-num", &pdata->target_num);
of_property_read_u32(np, "sub-num", &pdata->sub_num);
of_property_read_u32(np, "sub-array", &pdata->sub_array);
of_property_read_u32_array(np, "sub-index", pdata->sub_index, pdata->sub_array);
of_property_read_u32_array(np, "sub-addr", pdata->sub_addr, pdata->sub_array);
/* Mandatory parsing is done */
ret = 0;
/* Check BUS Timeout setting(Option) */
INIT_LIST_HEAD(&pdata->timeout_list);
time_np = of_get_child_by_name(np, "timeout");
if (!time_np)
goto out;
/* BUS timeout setting */
while ((time_child_np = of_get_next_child(time_np, time_child_np)) != NULL) {
timeout = devm_kzalloc(busmon->dev,
sizeof(struct busmon_timeout), GFP_KERNEL);
if (!timeout) {
dev_err(busmon->dev,
"failed to allocate memory for busmon-timeout\n");
continue;
}
if (of_property_read_string(time_child_np, "nickname",
(const char **)&timeout->name)) {
dev_err(busmon->dev,
"failed to get nickname property\n");
continue;
}
of_property_read_u32_array(time_child_np, "reg", regs, 2);
timeout->regs = ioremap(regs[0], regs[1]);
if (!timeout->regs) {
dev_err(busmon->dev,
"failed to ioremap for busmon-timeout: %s\n",
timeout->name);
devm_kfree(busmon->dev, timeout);
continue;
}
of_property_read_u32(time_child_np, "enabled",
&timeout->enabled);
of_property_read_u32(time_child_np, "enable-bit",
&timeout->enable_bit);
of_property_read_u32_array(time_child_np, "range-bits",
timeout->range_bits, 2);
list_add(&timeout->list, &pdata->timeout_list);
}
of_node_put(time_np);
out:
return ret;
}
static int busmon_probe(struct platform_device *pdev)
{
struct busmon_dev *busmon;
struct busmon_platdata *pdata = NULL;
struct busmon_panic_block *busmon_panic = NULL;
const struct of_device_id *match;
struct resource *res;
int ret;
busmon = devm_kzalloc(&pdev->dev, sizeof(struct busmon_dev), GFP_KERNEL);
if (!busmon) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"private data\n");
return -ENOMEM;
}
busmon->dev = &pdev->dev;
match = of_match_node(busmon_dt_match, pdev->dev.of_node);
busmon->match = (struct of_device_id *)match;
spin_lock_init(&busmon->ctrl_lock);
pdata = devm_kzalloc(&pdev->dev, sizeof(struct busmon_platdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"platform data\n");
return -ENOMEM;
}
busmon->pdata = pdata;
ret = busmon_dt_parse(pdev->dev.of_node, busmon);
if (ret) {
dev_err(&pdev->dev, "failed to assign device tree parsing\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
busmon->regs = devm_ioremap_resource(&pdev->dev, res);
if (busmon->regs == NULL) {
dev_err(&pdev->dev, "failed to claim register region\n");
return -ENOENT;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res)
return -ENXIO;
busmon->irq = res->start;
ret = devm_request_irq(&pdev->dev, busmon->irq, busmon_logging_irq,
0, dev_name(&pdev->dev), busmon);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
return -ENXIO;
}
busmon_panic = devm_kzalloc(&pdev->dev,
sizeof(struct busmon_panic_block), GFP_KERNEL);
if (!busmon_panic) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"panic handler data\n");
} else {
busmon_panic->nb_panic_block.notifier_call =
busmon_logging_panic_handler;
busmon_panic->pdev = busmon;
atomic_notifier_chain_register(&panic_notifier_list,
&busmon_panic->nb_panic_block);
}
platform_set_drvdata(pdev, busmon);
busmon_timeout_init(busmon);
busmon_logging_init(busmon);
return 0;
}
static int busmon_remove(struct platform_device *pdev)
{
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int busmon_suspend(struct device *dev)
{
return 0;
}
static int busmon_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct busmon_dev *busmon = platform_get_drvdata(pdev);
busmon_timeout_init(busmon);
busmon_logging_init(busmon);
return 0;
}
static SIMPLE_DEV_PM_OPS(busmon_pm_ops,
busmon_suspend,
busmon_resume);
#define BUSMON_PM (busmon_pm_ops)
#else
#define BUSMON_PM NULL
#endif
static struct platform_driver exynos_busmon_driver = {
.probe = busmon_probe,
.remove = busmon_remove,
.driver = {
.name = "exynos-busmon",
.of_match_table = busmon_dt_match,
.pm = &busmon_pm_ops,
},
};
module_platform_driver(exynos_busmon_driver);
MODULE_DESCRIPTION("Samsung Exynos BUS MONITOR DRIVER");
MODULE_AUTHOR("Hosung Kim <hosung0.kim@samsung.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos-busmon");

View file

@ -0,0 +1,816 @@
/*
* linux/arch/arm/mach-exynos/exynos-coresight.c
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/smp.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/suspend.h>
#include <linux/smpboot.h>
#include <linux/delay.h>
#include <linux/exynos-ss.h>
#include <asm/core_regs.h>
#include "coresight-priv.h"
#define CHANNEL (0)
#define PORT (1)
#define NONE (-1)
#define ARR_SZ (2)
#define etm_writel(base, val, off) __raw_writel((val), base + off)
#define etm_readl(base, off) __raw_readl(base + off)
#define SOFT_LOCK(base) \
do { mb(); isb(); etm_writel(base, 0x0, LAR); } while (0)
#define SOFT_UNLOCK(base) \
do { etm_writel(base, OSLOCK_MAGIC, LAR); mb(); isb(); } while (0)
struct cpu_etm_info {
void __iomem *base;
u32 enabled;
u32 f_port[ARR_SZ];
};
struct funnel_info {
void __iomem *base;
u32 port_status;
u32 f_port[ARR_SZ];
};
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
struct etf_info {
void __iomem *base;
u32 f_port[ARR_SZ];
};
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
struct etr_info {
void __iomem *base;
struct clk *etr_clk;
u32 enabled;
u32 buf_addr;
u32 buf_size;
u32 buf_pointer;
};
#endif
struct exynos_trace_info {
struct cpu_etm_info cpu[NR_CPUS];
spinlock_t trace_lock;
u32 enabled;
u32 procsel;
u32 config;
u32 sync_period;
u32 victlr;
u32 funnel_num;
struct funnel_info *funnel;
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
u32 etf_num;
struct etf_info *etf;
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
struct etr_info etr;
#endif
};
static struct exynos_trace_info *g_trace_info;
#ifdef CONFIG_EXYNOS_CORESIGHT_ETB
static void __iomem *g_etb_base;
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_STM
static u32 stm_funnel_port;
#endif
static DEFINE_PER_CPU(struct task_struct *, etm_task);
static void exynos_funnel_init(void)
{
unsigned int i, port, channel;
struct funnel_info *funnel;
for (i = 0; i < g_trace_info->funnel_num; i++) {
funnel = &g_trace_info->funnel[i];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status = (funnel->port_status & 0x3ff) | 0x300;
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
etm_writel(funnel->base, 0x0, FUNPRIORCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
if (funnel->f_port[CHANNEL] != NONE) {
channel = funnel->f_port[CHANNEL];
port = funnel->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status |= BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
}
}
static void exynos_funnel_close(void)
{
unsigned int i;
struct funnel_info *funnel;
for (i = 0; i < g_trace_info->funnel_num; i++) {
funnel = &g_trace_info->funnel[i];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
etm_writel(funnel->base, 0x300, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
}
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
static void exynos_etf_enable(void)
{
unsigned int i, port, channel;
struct etf_info *etf;
struct funnel_info *funnel;
for (i = 0; i < g_trace_info->etf_num; i++) {
etf = &g_trace_info->etf[i];
SOFT_UNLOCK(etf->base);
etm_writel(etf->base, 0x0, TMCCTL);
etm_writel(etf->base, 0x800, TMCRSZ);
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
etm_writel(etf->base, 0x2, TMCMODE);
#else
etm_writel(etf->base, 0x0, TMCMODE);
#endif
etm_writel(etf->base, 0x0, TMCTGR);
etm_writel(etf->base, 0x0, TMCFFCR);
etm_writel(etf->base, 0x1, TMCCTL);
SOFT_LOCK(etf->base);
if (etf->f_port[CHANNEL] != NONE) {
channel = etf->f_port[CHANNEL];
port = etf->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status |= BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
}
}
static void exynos_etf_disable(void)
{
unsigned int i, port, channel;
struct etf_info *etf;
struct funnel_info *funnel;
for (i = 0; i < g_trace_info->etf_num; i++) {
etf = &g_trace_info->etf[i];
SOFT_UNLOCK(etf->base);
etm_writel(etf->base, 0x0, TMCCTL);
SOFT_LOCK(etf->base);
if (etf->f_port[CHANNEL] != NONE) {
channel = etf->f_port[CHANNEL];
port = etf->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status &= ~BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
}
}
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
static void exynos_etr_enable(void)
{
struct etr_info *etr = &g_trace_info->etr;
SOFT_UNLOCK(etr->base);
etm_writel(etr->base, 0x0, TMCCTL);
etm_writel(etr->base, etr->buf_size, TMCRSZ);
etm_writel(etr->base, 0x4, TMCTGR);
etm_writel(etr->base, 0x0, TMCAXICTL);
etm_writel(etr->base, etr->buf_addr, TMCDBALO);
etm_writel(etr->base, 0x0, TMCDBAHI);
etm_writel(etr->base, etr->buf_pointer, TMCRWP);
etm_writel(etr->base, 0x0, TMCMODE);
etm_writel(etr->base, 0x2001, TMCFFCR);
etm_writel(etr->base, 0x1, TMCCTL);
SOFT_LOCK(etr->base);
}
static void exynos_etr_disable(void)
{
struct etr_info *etr = &g_trace_info->etr;
SOFT_UNLOCK(etr->base);
etm_writel(etr->base, 0x0, TMCCTL);
etr->buf_pointer = etm_readl(etr->base, TMCRWP);
SOFT_LOCK(etr->base);
}
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETB
static void exynos_etb_enable(void __iomem *etb_base, int src)
{
int i;
unsigned int depth = etm_readl(etb_base, TMCRSZ);
SOFT_UNLOCK(etb_base);
etm_writel(etb_base, 0x0, TMCCTL);
etm_writel(etb_base, 0x0, TMCRWP);
/* clear entire RAM buffer */
for (i = 0; i < depth; i++)
etm_writel(etb_base, 0x0, TMCRWD);
/* reset write RAM pointer address */
etm_writel(etb_base, 0x0, TMCRWP);
/* reset read RAM pointer address */
etm_writel(etb_base, 0x0, TMCRRP);
etm_writel(etb_base, 0x1, TMCTGR);
if (src) {
etm_writel(etb_base, 0x0, TMCFFCR);
pr_info("Data formatter disabled!\n");
} else {
etm_writel(etb_base, 0x2001, TMCFFCR);
pr_info("Data formatter enabled!\n");
}
/* ETB trace capture enable */
etm_writel(etb_base, 0x1, TMCCTL);
SOFT_LOCK(etb_base);
}
static void exynos_etb_disable(void __iomem *etb_base, int src)
{
uint32_t ffcr;
SOFT_UNLOCK(etb_base);
if (src) {
etm_writel(etb_base, 0x2001, TMCFFCR);
pr_info("Data formatter enabled!\n");
} else {
etm_writel(etb_base, 0x0, TMCFFCR);
pr_info("Data formatter disabled!\n");
}
ffcr = etm_readl(etb_base, TMCFFCR);
ffcr |= BIT(6);
etm_writel(etb_base, ffcr, TMCFFCR);
udelay(1500);
etm_writel(etb_base, 0x0, TMCCTL);
udelay(1500);
SOFT_LOCK(etb_base);
}
extern void exynos_etb_etm(void)
{
struct funnel_info *funnel;
unsigned int channel, port;
exynos_etb_disable(g_etb_base, 0);
if (stm_funnel_port[CHANNEL] != NONE) {
channel = stm_funnel_port[CHANNEL];
port = stm_funnel_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status &= ~BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
exynos_etb_enable(g_etb_base, 0);
}
extern void exynos_etb_stm(void)
{
struct funnel_info *funnel;
unsigned int channel, port;
exynos_etb_disable(g_etb_base, 1);
if (stm_funnel_port[CHANNEL] != NONE) {
channel = stm_funnel_port[CHANNEL];
port = stm_funnel_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status |= BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
exynos_etb_enable(g_etb_base, 1);
}
#endif
static int etm_info_init(void)
{
/* Main control and Configuration */
spin_lock_init(&g_trace_info->trace_lock);
g_trace_info->funnel_num = 0;
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
g_trace_info->etf_num = 0;
#endif
g_trace_info->procsel = 0;
g_trace_info->config = 0;
g_trace_info->sync_period = 0x8;
g_trace_info->victlr = 0x0;
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
g_trace_info->etr.buf_addr = exynos_ss_get_item_paddr("log_etm");
if (!g_trace_info->etr.buf_addr)
return -ENOMEM;
g_trace_info->etr.buf_size = exynos_ss_get_item_size("log_etm") / 4;
if (!g_trace_info->etr.buf_size)
return -ENOMEM;
g_trace_info->etr.buf_pointer = 0;
#endif
return 0;
}
static void etm_enable(unsigned int cpu)
{
struct cpu_etm_info *cinfo = &g_trace_info->cpu[cpu];
struct funnel_info *funnel;
unsigned int channel, port;
SOFT_UNLOCK(cinfo->base);
etm_writel(cinfo->base, 0x1, ETMOSLAR);
etm_writel(cinfo->base, 0x0, ETMCTLR);
/* Main control and Configuration */
etm_writel(cinfo->base, g_trace_info->procsel, ETMPROCSELR);
etm_writel(cinfo->base, g_trace_info->config, ETMCONFIG);
etm_writel(cinfo->base, g_trace_info->sync_period, ETMSYNCPR);
etm_writel(cinfo->base, cpu+1, ETMTRACEIDR);
/* additional register setting */
etm_writel(cinfo->base, 0x1000, ETMEVENTCTL0R);
etm_writel(cinfo->base, 0x0, ETMEVENTCTL1R);
etm_writel(cinfo->base, 0xc, ETMSTALLCTLR);
etm_writel(cinfo->base, 0x801, ETMCONFIG);
etm_writel(cinfo->base, 0x0, ETMTSCTLR);
etm_writel(cinfo->base, 0x4, ETMCCCCTLR);
etm_writel(cinfo->base, 0x201, ETMVICTLR);
etm_writel(cinfo->base, 0x0, ETMVIIECTLR);
etm_writel(cinfo->base, 0x0, ETMVISSCTLR);
etm_writel(cinfo->base, 0x2, ETMAUXCTLR);
etm_writel(cinfo->base, 0x1, ETMCTLR);
etm_writel(cinfo->base, 0x0, ETMOSLAR);
SOFT_LOCK(cinfo->base);
channel = cinfo->f_port[CHANNEL];
port = cinfo->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
cinfo->enabled = 1;
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status |= BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
}
static void etm_disable(unsigned int cpu)
{
struct cpu_etm_info *cinfo = &g_trace_info->cpu[cpu];
struct funnel_info *funnel;
unsigned int channel, port;
channel = cinfo->f_port[CHANNEL];
port = cinfo->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
cinfo->enabled = 0;
SOFT_UNLOCK(funnel->base);
funnel->port_status = etm_readl(funnel->base, FUNCTRL);
funnel->port_status &= ~BIT(port);
etm_writel(funnel->base, funnel->port_status, FUNCTRL);
SOFT_LOCK(funnel->base);
spin_unlock(&g_trace_info->trace_lock);
SOFT_UNLOCK(cinfo->base);
etm_writel(cinfo->base, 0x0, ETMCTLR);
etm_writel(cinfo->base, 0x1, ETMOSLAR);
SOFT_LOCK(cinfo->base);
}
extern void exynos_trace_start(void)
{
g_trace_info->enabled = 1;
exynos_funnel_init();
#ifdef CONFIG_EXYNOS_CORESIGHT_ETB
exynos_etb_enable(g_etb_base, 0);
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
exynos_etf_enable();
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
clk_prepare_enable(g_trace_info->etr.etr_clk);
exynos_etr_enable();
#endif
pr_info("coresight: %s.\n", __func__);
}
extern void exynos_trace_stop(void)
{
if (!g_trace_info->enabled)
return;
exynos_funnel_close();
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
exynos_etf_disable();
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
exynos_etr_disable();
clk_disable_unprepare(g_trace_info->etr.etr_clk);
#endif
etm_disable(raw_smp_processor_id());
g_trace_info->enabled = 0;
pr_info("coresight: %s.\n", __func__);
}
static void exynos_trace_ipi(void *info)
{
int *hcpu = (int *)info;
etm_disable(*hcpu);
}
static int __cpuinit core_notify(struct notifier_block *self,
unsigned long action, void *data)
{
int hcpu = (unsigned long)data;
switch (action) {
case CPU_DYING:
smp_call_function_single(hcpu, exynos_trace_ipi, &hcpu, 0);
break;
};
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata core_nb = {
.notifier_call = core_notify,
};
static int exynos_c2_etm_pm_notifier(struct notifier_block *self,
unsigned long action, void *v)
{
int cpu = raw_smp_processor_id();
switch (action) {
case CPU_PM_ENTER:
etm_disable(cpu);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
etm_enable(cpu);
break;
case CPU_CLUSTER_PM_ENTER:
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata exynos_c2_etm_pm_nb = {
.notifier_call = exynos_c2_etm_pm_notifier,
};
static int exynos_etm_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
switch (pm_event) {
case PM_SUSPEND_PREPARE:
exynos_trace_stop();
break;
case PM_POST_SUSPEND:
exynos_trace_start();
break;
}
return NOTIFY_OK;
}
static struct notifier_block exynos_etm_pm_nb = {
.notifier_call = exynos_etm_pm_notifier,
};
#ifdef CONFIG_OF
static const struct of_device_id etm_dt_match[] = {
{ .compatible = "exynos,coresight",},
};
#endif
static int exynos_cs_etm_init_dt(void)
{
struct device_node *cs_np, *np = NULL;
unsigned int offset, cs_reg_base;
int i = 0;
g_trace_info = kzalloc(sizeof(struct exynos_trace_info), GFP_KERNEL);
if (!g_trace_info)
return -ENOMEM;
if (etm_info_init())
return -ENOMEM;
cs_np = of_find_matching_node(NULL, etm_dt_match);
if (of_property_read_u32(cs_np, "funnel-num", &g_trace_info->funnel_num))
return -EINVAL;
g_trace_info->funnel = kzalloc(sizeof(struct funnel_info) *
g_trace_info->funnel_num, GFP_KERNEL);
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
if (of_property_read_u32(cs_np, "etf-num", &g_trace_info->etf_num))
return -EINVAL;
g_trace_info->etf = kzalloc(sizeof(struct etf_info) *
g_trace_info->etf_num, GFP_KERNEL);
#endif
if (of_property_read_u32(cs_np, "base", &cs_reg_base))
return -EINVAL;
while ((np = of_find_node_by_type(np, "cs"))) {
if (of_property_read_u32(np, "etm-offset", &offset))
return -EINVAL;
g_trace_info->cpu[i].base = ioremap(cs_reg_base + offset, SZ_4K);
if (!g_trace_info->cpu[i].base)
return -ENOMEM;
if (of_property_read_u32_array(np, "funnel-port",
g_trace_info->cpu[i].f_port, 2))
g_trace_info->cpu[i].f_port[CHANNEL] = NONE;
i++;
}
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
i = 0;
while ((np = of_find_node_by_type(np, "etf"))) {
if (of_property_read_u32(np, "offset", &offset))
return -EINVAL;
g_trace_info->etf[i].base = ioremap(cs_reg_base + offset, SZ_4K);
if (!g_trace_info->etf[i].base)
return -ENOMEM;
if (of_property_read_u32_array(np, "funnel-port",
g_trace_info->etf[i].f_port, 2))
g_trace_info->etf[i].f_port[CHANNEL] = NONE;
i++;
}
#endif
i = 0;
while ((np = of_find_node_by_type(np, "funnel"))) {
if (of_property_read_u32(np, "offset", &offset))
return -EINVAL;
g_trace_info->funnel[i].base = ioremap(cs_reg_base + offset, SZ_4K);
if (!g_trace_info->funnel[i].base)
return -ENOMEM;
if (of_property_read_u32_array(np, "funnel-port",
g_trace_info->funnel[i].f_port, 2))
g_trace_info->funnel[i].f_port[CHANNEL] = NONE;
i++;
}
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
if (!(np = of_find_node_by_type(np, "etr")))
return -EINVAL;
if (of_property_read_u32(np, "offset", &offset))
return -EINVAL;
g_trace_info->etr.base = ioremap(cs_reg_base + offset, SZ_4K);
if (!g_trace_info->etr.base)
return -ENOMEM;
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETB
if (!(np = of_find_node_by_type(np, "etb")))
return -EINVAL;
if (of_property_read_u32(np, "offset", &offset))
return -EINVAL;
g_etb_base = ioremap(cs_reg_base + cs_offset, SZ_4K);
if (!g_etb_base)
return -ENOMEM;
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_STM
if (!(np = of_find_node_by_type(np, "stm")))
return -EINVAL;
if (of_property_read_u32_array(np, "funnel-port",
&stm_funnel_port, 2))
stm_funnel_port[CHANNEL] = NONE;
#endif
return 0;
}
static void etm_hotplug_out(unsigned int cpu)
{
etm_disable(cpu);
}
static void etm_hotplug_in(unsigned int cpu)
{
etm_enable(cpu);
}
static int etm_should_run(unsigned int cpu) { return 0; }
static void etm_thread_fn(unsigned int cpu) { }
static struct smp_hotplug_thread etm_threads = {
.store = &etm_task,
.thread_should_run = etm_should_run,
.thread_fn = etm_thread_fn,
.thread_comm = "etm/%u",
.setup = etm_hotplug_in,
.park = etm_hotplug_out,
.unpark = etm_hotplug_in,
};
static int __init exynos_etm_init(void)
{
int ret;
ret = exynos_cs_etm_init_dt();
if (ret < 0)
goto err;
ret = smpboot_register_percpu_thread(&etm_threads);
if (ret < 0)
goto err;
register_pm_notifier(&exynos_etm_pm_nb);
register_cpu_notifier(&core_nb);
cpu_pm_register_notifier(&exynos_c2_etm_pm_nb);
g_trace_info->enabled = 1;
pr_info("coresight: ETM enable.\n");
return 0;
err:
g_trace_info->enabled = 0;
pr_err("coresight: ETM enable FAILED!!! : ret = %d\n", ret);
return ret;
}
early_initcall(exynos_etm_init);
static int __init exynos_tmc_init(void)
{
if (!g_trace_info->enabled) {
pr_err("coresight TMC init FAILED!!!\n");
return -ENODEV;
}
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
g_trace_info->etr.etr_clk = clk_get(NULL, "etr_clk");
#endif
exynos_trace_start();
return 0;
}
postcore_initcall(exynos_tmc_init);
#ifdef CONFIG_EXYNOS_CORESIGHT_ETM_SYSFS
static struct bus_type etm_subsys = {
.name = "exynos-etm",
.dev_name = "exynos-etm",
};
static ssize_t etm_show_all_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct cpu_etm_info *cinfo;
struct funnel_info *funnel;
struct etf_info *etf;
unsigned long tmp, port_status, read_p;
int i, channel, port, size = 0;
size += scnprintf(buf + size, 15,"ETM Status\n");
size += scnprintf(buf + size, 80,
"-------------------------------------------------------------\n");
size += scnprintf(buf + size, 80, " %-8s | %-10s | %-14s | %-4s | %-11s\n",
"Core Num", "ETM status", "Funnel_channel", "Port", "Port Status");
for (i = 0; i < NR_CPUS; i++) {
cinfo = &g_trace_info->cpu[i];
channel = cinfo->f_port[CHANNEL];
port = cinfo->f_port[PORT];
funnel = &g_trace_info->funnel[channel];
spin_lock(&g_trace_info->trace_lock);
port_status = (funnel->port_status >> port) & 0x1;
spin_unlock(&g_trace_info->trace_lock);
size += scnprintf(buf + size, 80,
" %-8d | %10s | %-14u | %-4d | %-11s\n",
i, cinfo->enabled ? "enabled" : "disabled", channel, port,
port_status ? "open" : "close");
}
size += scnprintf(buf + size, 80,
"-------------------------------------------------------------\n");
for (i = 0; i < g_trace_info->funnel_num; i++) {
funnel = &g_trace_info->funnel[i];
SOFT_UNLOCK(funnel->base);
tmp = etm_readl(funnel->base, FUNCTRL);
SOFT_LOCK(funnel->base);
size += scnprintf(buf + size, 30, "FUNNEL%d Status : 0x%lx\n", i, tmp);
}
#ifdef CONFIG_EXYNOS_CORESIGHT_ETF
for (i = 0; i < g_trace_info->etf_num; i++) {
etf = &g_trace_info->etf[i];
SOFT_UNLOCK(etf->base);
tmp = etm_readl(etf->base, TMCCTL);
read_p = etm_readl(etf->base, TMCRWP);
SOFT_LOCK(etf->base);
size += scnprintf(buf + size, 30, "ETF%d Status : %3sabled\n",
i, tmp & 0x1 ? "en" : "dis");
size += scnprintf(buf + size, 30, "ETF%d RWP Reg : 0x%lx\n", i, read_p);
}
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_ETR
SOFT_UNLOCK(g_trace_info->etr.base);
tmp = etm_readl(g_trace_info->etr.base, TMCCTL);
read_p = etm_readl(g_trace_info->etr.base, TMCRWP);
SOFT_LOCK(g_trace_info->etr.base);
size += scnprintf(buf + size, 30, "ETR Status : %3sabled\n",
tmp & 0x1 ? "en" : "dis");
size += scnprintf(buf + size, 30, "ETR RWP Reg : 0x%lx\n", read_p);
size += scnprintf(buf + size, 30, "ETR save RWP : 0x%x\n\n",
g_trace_info->etr.buf_pointer);
#endif
return size;
}
static struct kobj_attribute etm_enable_attr =
__ATTR(etm_status, 0644, etm_show_all_status, NULL);
static struct attribute *etm_sysfs_attrs[] = {
&etm_enable_attr.attr,
NULL,
};
static struct attribute_group etm_sysfs_group = {
.attrs = etm_sysfs_attrs,
};
static const struct attribute_group *etm_sysfs_groups[] = {
&etm_sysfs_group,
NULL,
};
static int __init exynos_etm_sysfs_init(void)
{
int ret = 0;
ret = subsys_system_register(&etm_subsys, etm_sysfs_groups);
if (ret)
pr_err("fail to register exynos-etm subsys\n");
return ret;
}
late_initcall(exynos_etm_sysfs_init);
#endif

View file

@ -0,0 +1,609 @@
/*
* linux/arch/arm/mach-exynos/exynos-coresight.c
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/of.h>
#include <asm/core_regs.h>
#include <asm/cputype.h>
#ifdef CONFIG_PMUCAL_MOD
#include "../soc/samsung/pwrcal/pwrcal.h"
#else
#include <soc/samsung/exynos-pmu.h>
#endif
#define CS_READ(base, offset) __raw_readl(base + offset)
#define CS_READQ(base, offset) __raw_readq(base + offset)
#define CS_WRITE(val, base, offset) __raw_writel(val, base + offset)
#define SYS_READ(reg, val) asm volatile("mrs %0, " #reg : "=r" (val))
#define SYS_WRITE(reg, val) asm volatile("msr " #reg ", %0" :: "r" (val))
#define DBG_UNLOCK(base) \
do { mb(); __raw_writel(OSLOCK_MAGIC, base + DBGLAR); }while(0)
#define DBG_LOCK(base) \
do { __raw_writel(0x1, base + DBGLAR); mb(); }while(0)
#define DBG_REG_MAX_SIZE (8)
#define DBG_BW_REG_MAX_SIZE (30)
#define OS_LOCK_FLAG (DBG_REG_MAX_SIZE - 1)
#define ITERATION CONFIG_PC_ITERATION
#define CORE_CNT CONFIG_NR_CPUS
#define MSB_PADDING (0xFFFFFFC000000000)
#define MSB_MASKING (0x0001ffc000000000)
struct cs_dbg_cpu {
void __iomem *base;
ssize_t reg[DBG_REG_MAX_SIZE];
};
struct cs_dbg {
u8 arch;
u8 nr_wp;
u8 nr_bp;
ssize_t bw_reg[DBG_BW_REG_MAX_SIZE];
struct cs_dbg_cpu cpu[CORE_CNT];
};
static struct cs_dbg dbg;
static DEFINE_SPINLOCK(debug_lock);
static unsigned int cs_arm_arch;
bool FLAG_T32_EN = true;
static inline void get_arm_arch_version(void)
{
cs_arm_arch = CS_READ(dbg.cpu[0].base, MIDR);
cs_arm_arch = (cs_arm_arch >> 4) & 0xf00;
}
static inline void dbg_os_lock(void __iomem *base)
{
switch (cs_arm_arch) {
case ARMV8_PROCESSOR:
CS_WRITE(0x1, base, DBGOSLAR);
break;
default:
break;
}
mb();
isb();
}
static inline void dbg_os_unlock(void __iomem *base)
{
isb();
mb();
switch (cs_arm_arch) {
case ARMV8_PROCESSOR:
CS_WRITE(0x0, base, DBGOSLAR);
break;
default:
break;
}
}
#ifdef CONFIG_EXYNOS_CORESIGHT_PC_INFO
static int exynos_cs_stat;
unsigned long exynos_cs_pc[CORE_CNT][ITERATION];
static inline bool have_pc_offset(void __iomem *base)
{
return !(CS_READ(base, DBGDEVID1) & 0xf);
}
unsigned long exynos_cs_get_pcval(int cpu)
{
unsigned long valLo, valHi;
void __iomem *base = dbg.cpu[cpu].base;
int cpu_is_enabled;
#ifdef CONFIG_PMUCAL_MOD
cpu_is_enabled = cal_cpu_status(cpu);
#else
cpu_is_enabled = exynos_cpu.power_state(cpu);
#endif
if(!cpu_online(cpu) || !cpu_is_enabled)
return 0;
DBG_UNLOCK(base);
dbg_os_unlock(base);
valLo = CS_READ(base, DBGPCSRlo);
valHi = CS_READ(base, DBGPCSRhi);
dbg_os_lock(base);
DBG_LOCK(base);
return ((valHi << 32UL) | valLo);
}
EXPORT_SYMBOL(exynos_cs_get_pcval);
void exynos_cs_show_pcval(void)
{
unsigned long flags;
unsigned int cpu, iter, curr_cpu;
unsigned long val = 0, valHi = 0;
void __iomem *base;
char buf[KSYM_SYMBOL_LEN];
int cpu_is_enabled;
if (exynos_cs_stat < 0)
return;
spin_lock_irqsave(&debug_lock, flags);
curr_cpu = raw_smp_processor_id();
for (iter = 0; iter < ITERATION; iter++) {
for (cpu = 0; cpu < CORE_CNT; cpu++) {
base = dbg.cpu[cpu].base;
exynos_cs_pc[cpu][iter] = 0;
if (base == NULL || cpu == curr_cpu)
continue;
#ifdef CONFIG_PMUCAL_MOD
cpu_is_enabled = cal_cpu_status(cpu);
#else
cpu_is_enabled = exynos_cpu.power_state(cpu);
#endif
if (!cpu_is_enabled)
continue;
DBG_UNLOCK(base);
dbg_os_unlock(base);
val = CS_READ(base, DBGPCSRlo);
valHi = CS_READ(base, DBGPCSRhi);
val |= (valHi << 32L);
if (have_pc_offset(base))
val -= 0x8;
dbg_os_lock(base);
DBG_LOCK(base);
if(MSB_MASKING == (MSB_MASKING & val)) {
exynos_cs_pc[cpu][iter] = MSB_PADDING | val;
}
else exynos_cs_pc[cpu][iter] = val;
}
}
spin_unlock_irqrestore(&debug_lock, flags);
for (cpu = 0; cpu < CORE_CNT; cpu++) {
pr_err("CPU[%d] saved pc value\n", cpu);
for (iter = 0; iter < ITERATION; iter++) {
if (exynos_cs_pc[cpu][iter] == 0)
continue;
sprint_symbol(buf, exynos_cs_pc[cpu][iter]);
pr_err(" 0x%016zx : %s\n",
exynos_cs_pc[cpu][iter], buf);
}
}
}
EXPORT_SYMBOL(exynos_cs_show_pcval);
#endif
#ifdef CONFIG_EXYNOS_CORESIGHT_MAINTAIN_DBG_REG
/* save debug resgisters when suspending */
static void debug_save_bw_reg(int cpu)
{
int i, idx = 0;
pr_debug("%s: cpu %d\n", __func__, cpu);
for (i = 0; i < CORE_CNT; i++) {
if (!dbg.cpu[i].reg[OS_LOCK_FLAG])
return;
}
switch (dbg.arch) {
case DEBUG_ARCH_V8:
SYS_READ(DBGBVR0_EL1, dbg.bw_reg[idx++]); /* DBGBVR */
SYS_READ(DBGBVR1_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBVR2_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBVR3_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBVR4_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBVR5_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBCR0_EL1, dbg.bw_reg[idx++]); /* DBGDCR */
SYS_READ(DBGBCR1_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBCR2_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBCR3_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBCR4_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGBCR5_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWVR0_EL1, dbg.bw_reg[idx++]); /* DBGWVR */
SYS_READ(DBGWVR1_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWVR2_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWVR3_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWCR0_EL1, dbg.bw_reg[idx++]); /* DBGDCR */
SYS_READ(DBGWCR1_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWCR2_EL1, dbg.bw_reg[idx++]);
SYS_READ(DBGWCR3_EL1, dbg.bw_reg[idx++]);
break;
default:
break;
}
}
static void debug_suspend_cpu(int cpu)
{
int idx = 0;
struct cs_dbg_cpu *cpudata = &dbg.cpu[cpu];
void __iomem *base = cpudata->base;
pr_debug("%s: cpu %d\n", __func__, cpu);
if (!FLAG_T32_EN)
return;
DBG_UNLOCK(base);
spin_lock(&debug_lock);
dbg_os_lock(base);
cpudata->reg[OS_LOCK_FLAG] = 1;
debug_save_bw_reg(cpu);
spin_unlock(&debug_lock);
switch (dbg.arch) {
case DEBUG_ARCH_V8:
SYS_READ(MDSCR_EL1, cpudata->reg[idx++]); /* DBGDSCR */
SYS_READ(OSECCR_EL1, cpudata->reg[idx++]); /* DBGECCR */
SYS_READ(DBGDTRTX_EL0, cpudata->reg[idx++]); /* DBGDTRTX */
SYS_READ(DBGDTRRX_EL0, cpudata->reg[idx++]); /* DBGDTRRX */
SYS_READ(DBGCLAIMCLR_EL1, cpudata->reg[idx++]); /* DBGCLAIMCLR */
break;
default:
break;
}
DBG_LOCK(base);
pr_debug("%s: cpu %d done\n", __func__, cpu);
}
/* restore debug registers when resuming */
static void debug_restore_bw_reg(int cpu)
{
int core = 0, idx = 0;
struct cs_dbg_cpu *cpudata = &dbg.cpu[cpu];
void __iomem *a_base = NULL;
pr_debug("%s: cpu %d\n", __func__, cpu);
/* If debugger is not connected, do not accecs some registers. */
if (!(cpudata->reg[0] & (1<<14))) {
return;
}
for (core = 0; core < CORE_CNT; core++) {
if (!dbg.cpu[core].reg[OS_LOCK_FLAG]) {
a_base = dbg.cpu[core].base;
break;
}
}
switch (dbg.arch) {
case DEBUG_ARCH_V8:
if (core < CORE_CNT) {
SYS_WRITE(DBGBVR0_EL1, CS_READQ(a_base, DBGBVRn(0)));
SYS_WRITE(DBGBVR1_EL1, CS_READQ(a_base, DBGBVRn(1)));
SYS_WRITE(DBGBVR2_EL1, CS_READQ(a_base, DBGBVRn(2)));
SYS_WRITE(DBGBVR3_EL1, CS_READQ(a_base, DBGBVRn(3)));
SYS_WRITE(DBGBVR4_EL1, CS_READQ(a_base, DBGBVRn(4)));
SYS_WRITE(DBGBVR5_EL1, CS_READQ(a_base, DBGBVRn(5)));
SYS_WRITE(DBGBCR0_EL1, CS_READ(a_base, DBGBCRn(0)));
SYS_WRITE(DBGBCR1_EL1, CS_READ(a_base, DBGBCRn(1)));
SYS_WRITE(DBGBCR2_EL1, CS_READ(a_base, DBGBCRn(2)));
SYS_WRITE(DBGBCR3_EL1, CS_READ(a_base, DBGBCRn(3)));
SYS_WRITE(DBGBCR4_EL1, CS_READ(a_base, DBGBCRn(4)));
SYS_WRITE(DBGBCR5_EL1, CS_READ(a_base, DBGBCRn(5)));
SYS_WRITE(DBGWVR0_EL1, CS_READQ(a_base, DBGWVRn(0)));
SYS_WRITE(DBGWVR1_EL1, CS_READQ(a_base, DBGWVRn(1)));
SYS_WRITE(DBGWVR2_EL1, CS_READQ(a_base, DBGWVRn(2)));
SYS_WRITE(DBGWVR3_EL1, CS_READQ(a_base, DBGWVRn(3)));
SYS_WRITE(DBGWCR0_EL1, CS_READ(a_base, DBGWCRn(0)));
SYS_WRITE(DBGWCR1_EL1, CS_READ(a_base, DBGWCRn(1)));
SYS_WRITE(DBGWCR2_EL1, CS_READ(a_base, DBGWCRn(2)));
SYS_WRITE(DBGWCR3_EL1, CS_READ(a_base, DBGWCRn(3)));
} else {
SYS_WRITE(DBGBVR0_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBVR1_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBVR2_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBVR3_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBVR4_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBVR5_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR0_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR1_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR2_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR3_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR4_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGBCR5_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWVR0_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWVR1_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWVR2_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWVR3_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWCR0_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWCR1_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWCR2_EL1, dbg.bw_reg[idx++]);
SYS_WRITE(DBGWCR3_EL1, dbg.bw_reg[idx++]);
}
break;
default:
break;
}
pr_debug("%s: cpu %d\n", __func__, cpu);
}
static void debug_resume_cpu(int cpu)
{
int idx = 0;
struct cs_dbg_cpu *cpudata = &dbg.cpu[cpu];
void __iomem *base = cpudata->base;
pr_debug("%s: cpu %d\n", __func__, cpu);
if (!FLAG_T32_EN && !cpudata->reg[OS_LOCK_FLAG])
return;
DBG_UNLOCK(base);
dbg_os_lock(base);
switch (dbg.arch) {
case DEBUG_ARCH_V8:
SYS_WRITE(MDSCR_EL1, cpudata->reg[idx++]); /* DBGDSCR */
SYS_WRITE(OSECCR_EL1, cpudata->reg[idx++]); /* DBGECCR */
SYS_WRITE(DBGDTRTX_EL0, cpudata->reg[idx++]); /* DBGDTRTX */
SYS_WRITE(DBGDTRRX_EL0, cpudata->reg[idx++]); /* DBGDTRRX */
SYS_WRITE(DBGCLAIMSET_EL1, cpudata->reg[idx++]); /* DBGCLAIMSET */
break;
default:
break;
}
spin_lock(&debug_lock);
debug_restore_bw_reg(cpu);
dbg_os_unlock(base);
cpudata->reg[OS_LOCK_FLAG] = 0;
spin_unlock(&debug_lock);
DBG_LOCK(base);
pr_debug("%s: %d done\n", __func__, cpu);
}
static inline bool dbg_arch_supported(u8 arch)
{
switch (arch) {
case DEBUG_ARCH_V8:
break;
default:
return false;
}
return true;
}
static inline void get_dbg_arch_info(u32 dbgdidr)
{
dbgdidr = CS_READ(dbg.cpu[0].base, ID_AA64DFR0_EL1);
dbg.arch = dbgdidr & 0xf;
dbg.nr_bp = ((dbgdidr >> 12) & 0xf) + 1;
dbg.nr_wp = ((dbgdidr >> 20) & 0xf) + 1;
}
static int exynos_cs_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
int cpu = raw_smp_processor_id();
switch (cmd) {
case CPU_PM_ENTER:
debug_suspend_cpu(cpu);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
debug_resume_cpu(cpu);
break;
case CPU_CLUSTER_PM_ENTER:
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
break;
}
return NOTIFY_OK;
}
static int __cpuinit exynos_cs_cpu_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
case CPU_DOWN_FAILED:
debug_resume_cpu(cpu);
break;
case CPU_DYING:
debug_suspend_cpu(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata exynos_cs_pm_notifier_block = {
.notifier_call = exynos_cs_pm_notifier,
};
static struct notifier_block __cpuinitdata exynos_cs_cpu_notifier_block = {
.notifier_call = exynos_cs_cpu_notifier,
};
static int __init exynos_cs_debug_init(void)
{
unsigned int dbgdidr;
int ret = 0;
get_dbg_arch_info(dbgdidr);
if (!dbg_arch_supported(dbg.arch)) {
pr_err("%s: DBG archtecture is not supported.\n", __func__);
ret = -EPERM;
goto err;
}
ret = cpu_pm_register_notifier(&exynos_cs_pm_notifier_block);
if (ret < 0)
goto err;
ret = register_cpu_notifier(&exynos_cs_cpu_notifier_block);
if (ret < 0)
goto err;
pr_info("exynos-coresight debug enable: arch: %x:%d bp:%d, wp:%d\n",
cs_arm_arch, dbg.arch, dbg.nr_bp, dbg.nr_wp);
err:
return ret;
}
#endif
static const struct of_device_id of_exynos_cs_matches[] __initconst= {
{.compatible = "exynos,coresight"},
{},
};
static int exynos_cs_init_dt(void)
{
unsigned int cs_reg_base, offset, sjtag, i = 0;
struct device_node *np = NULL;
np = of_find_matching_node(NULL, of_exynos_cs_matches);
of_property_read_u32(np, "base", &cs_reg_base);
of_property_read_u32(np, "sjtag-offset", &offset);
#ifdef CONFIG_EXYNOS_CORESIGHT_PC_INFO
{
void __iomem *sjtag_base;
sjtag_base = ioremap(cs_reg_base + offset, SZ_8);
if (!sjtag_base) {
pr_err("%s: cannot ioremap sjtag base.\n", __func__);
exynos_cs_stat = -ENOMEM;
goto err_func;
}
sjtag = __raw_readl(sjtag_base + SJTAG_STATUS);
iounmap(sjtag_base);
if (sjtag & SJTAG_SOFT_LOCK) {
exynos_cs_stat = -EIO;
goto err_func;
}
}
#endif
while ((np = of_find_node_by_type(np, "cs"))) {
of_property_read_u32(np, "dbg-offset", &offset);
dbg.cpu[i].base = ioremap(cs_reg_base + offset, SZ_4K);
if (!dbg.cpu[i].base) {
pr_err("%s: cannot ioremap cs base.\n", __func__);
exynos_cs_stat = -ENOMEM;
goto err_func;
}
i++;
}
pr_info("[Coresight] Enable Show PC after kernel panic.\n");
return 0;
err_func:
pr_err("[Coresight] Fail PC print function.\n");
return exynos_cs_stat;
}
static int __init exynos_cs_init(void)
{
int ret = 0;
ret = exynos_cs_init_dt();
if (ret < 0)
goto err;
get_arm_arch_version();
#ifdef CONFIG_EXYNOS_CORESIGHT_MAINTAIN_DBG_REG
ret = exynos_cs_debug_init();
if (ret < 0)
goto err;
#endif
err:
return ret;
}
subsys_initcall(exynos_cs_init);
static struct bus_type ecs_subsys = {
.name = "exynos-cs",
.dev_name = "exynos-cs",
};
static ssize_t ecs_enable_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return scnprintf(buf, 10, "%sable\n", FLAG_T32_EN ? "en" : "dis");
}
static ssize_t ecs_enable_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int en;
if( sscanf(buf, "%1d", &en) != 1)
return -EINVAL;
if (en)
FLAG_T32_EN = true;
else
FLAG_T32_EN = false;
return count;
}
static struct kobj_attribute ecs_enable_attr =
__ATTR(enabled, 0644, ecs_enable_show, ecs_enable_store);
static struct attribute *ecs_sysfs_attrs[] = {
&ecs_enable_attr.attr,
NULL,
};
static struct attribute_group ecs_sysfs_group = {
.attrs = ecs_sysfs_attrs,
};
static const struct attribute_group *ecs_sysfs_groups[] = {
&ecs_sysfs_group,
NULL,
};
static int __init exynos_cs_sysfs_init(void)
{
int ret = 0;
ret = subsys_system_register(&ecs_subsys, ecs_sysfs_groups);
if (ret)
pr_err("fail to register exynos-coresight subsys\n");
return ret;
}
late_initcall(exynos_cs_sysfs_init);

3001
drivers/trace/exynos-ss.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,800 @@
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* IPs Traffic Monitor(ITM) Driver for Samsung Exynos7570 SOC
* By Hosung Kim (hosung0.kim@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "[ITM] detect: " fmt
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/exynos-itm.h>
#if defined(CONFIG_SEC_SIPC_MODEM_IF)
#include <linux/exynos-modem-ctrl.h>
#endif
/* S-NODE, M-NODE Common */
#define OFFSET_TIMEOUT_REG (0x2000)
#define OFFSET_REQ_R (0x0)
#define OFFSET_REQ_W (0x20)
#define OFFSET_RESP_R (0x40)
#define OFFSET_RESP_W (0x60)
#define OFFSET_ERR_REPT (0x20)
#define OFFSET_NUM (0x4)
#define REG_INT_MASK (0x0)
#define REG_INT_CLR (0x4)
#define REG_INT_INFO (0x8)
#define REG_EXT_INFO_0 (0x10)
#define REG_EXT_INFO_1 (0x14)
#define REG_EXT_INFO_2 (0x18)
#define REG_DBG_CTL (0x10)
#define REG_TIMEOUT_INIT_VAL (0x14)
#define REG_R_TIMEOUT_MO (0x18)
#define REG_W_TIMEOUT_MO (0x1C)
#define BIT_ERR_CODE(x) (((x) & (0xF << 28)) >> 28)
#define BIT_ERR_OCCURRED(x) (((x) & (0x1 << 27)) >> 27)
#define BIT_ERR_VALID(x) (((x) & (0x1 << 26)) >> 26)
#define BIT_AXID(x) (((x) & (0xFFFF)))
#define S_NODE (0)
#define M_NODE (1)
#define T_S_NODE (2)
#define T_M_NODE (3)
#define DATA_BACKBONE_PATH (0)
#define PERI_PATH (1)
#define PATH_NUM (2)
#define ERRCODE_SLVERR (0)
#define ERRCODE_DECERR (1)
#define ERRCODE_UNSUPORTED (2)
#define ERRCODE_POWER_DOWN (3)
#define ERRCODE_UNKNOWN_4 (4)
#define ERRCODE_UNKNOWN_5 (5)
#define ERRCODE_TIMEOUT (6)
#define TIMEOUT (0xFFFFF)
#define TIMEOUT_TEST (0x1)
#define NEED_TO_CHECK (0xCAFE)
struct itm_rpathinfo {
unsigned int id;
char *port_name;
char *dest_name;
unsigned int bits;
unsigned int shift_bits;
};
struct itm_masterinfo {
char *port_name;
unsigned int user;
char *master_name;
unsigned int bits;
};
struct itm_nodeinfo {
unsigned int type;
char *name;
unsigned int phy_regs;
void __iomem *regs;
unsigned int time_val;
bool timeout_enabled;
bool err_rpt_enabled;
bool retention;
};
/* Error Code Description */
static char *itm_errcode[] = {
"Error Detect by the Slave(SLVERR)",
"Decode error(DECERR)",
"Unsupported transaction error",
"Power Down access error",
"Unsupported transaction",
"Unsupported transaction",
"Timeout error - response timeout",
"Invalid errorcode",
};
struct itm_nodegroup {
int irq;
char *name;
unsigned int phy_regs;
void __iomem *regs;
struct itm_nodeinfo *nodeinfo;
unsigned int nodesize;
unsigned int irq_occurred;
bool panic_delayed;
};
struct itm_platdata {
struct itm_rpathinfo *rpathinfo;
struct itm_masterinfo *masterinfo;
struct itm_nodegroup *nodegroup;
bool probed;
};
static struct itm_rpathinfo rpathinfo[] = {
{0, "CPU", "DREX_CPU", GENMASK(0, 0), 1},
{1, "DBG", "DREX_CPU", GENMASK(0, 0), 1},
{0, "GNSS", "DREX_CP", GENMASK(1, 0), 2},
{1, "CP", "DREX_CP", GENMASK(1, 0), 2},
{2, "WFBT", "DREX_CP", GENMASK(1, 0), 2},
{0, "MFCMSCL", "DREX_MM", GENMASK(2, 0), 3},
{1, "G3D", "DREX_MM", GENMASK(2, 0), 3},
{2, "FSYS", "DREX_MM", GENMASK(2, 0), 3},
{3, "PDMA", "DREX_MM", GENMASK(2, 0), 3},
{4, "APM", "DREX_MM", GENMASK(2, 0), 3},
{5, "ISP", "DREX_MM", GENMASK(2, 0), 3},
{6, "DISPAUD", "DREX_MM", GENMASK(2, 0), 3},
{0, "CPU", "PERI", GENMASK(3, 0), 4},
{1, "DBG", "PERI", GENMASK(3, 0), 4},
{2, "MFCMSCL", "PERI", GENMASK(3, 0), 4},
{3, "G3D", "PERI", GENMASK(3, 0), 4},
{4, "FSYS", "PERI", GENMASK(3, 0), 4},
{5, "PDMA", "PERI", GENMASK(3, 0), 4},
{6, "APM", "PERI", GENMASK(3, 0), 4},
{7, "GNSS", "PERI", GENMASK(3, 0), 4},
{8, "CP", "PERI", GENMASK(3, 0), 4},
{9, "WFBT", "PERI", GENMASK(3, 0), 4},
{0, "CPU", "APMP", GENMASK(3, 0), 4},
{1, "DBG", "APMP", GENMASK(3, 0), 4},
{2, "MFCMSCL", "APMP", GENMASK(3, 0), 4},
{3, "G3D", "APMP", GENMASK(3, 0), 4},
{4, "FSYS", "APMP", GENMASK(3, 0), 4},
{5, "PDMA", "APMP", GENMASK(3, 0), 4},
{6, "APM", "APMP", GENMASK(3, 0), 4},
{7, "GNSS", "APMP", GENMASK(3, 0), 4},
{8, "CP", "APMP", GENMASK(3, 0), 4},
{9, "WFBT", "APMP", GENMASK(3, 0), 4},
};
/* XIU ID Information */
static struct itm_masterinfo masterinfo[] = {
{"DISPAUD", BIT(3), "IDMA0", GENMASK(3, 2)},
{"DISPAUD", 0, "IDMA1", GENMASK(3, 2)},
{"DISPAUD", BIT(2), "IDMA2", GENMASK(3, 2)},
{"ISP", 0, "CSIS0", GENMASK(3, 1)},
{"ISP", BIT(1), "FIMC_ISP", GENMASK(3, 1)},
{"ISP", BIT(2), "MC_SCALER", GENMASK(3, 1)},
{"ISP", BIT(1) | BIT(2), "FIMC_VRA", GENMASK(3, 1)},
{"FSYS", 0, "MMC55-0", GENMASK(2, 0)},
{"FSYS", BIT(0), "MMC50-2", GENMASK(2, 0)},
{"FSYS", BIT(1), "SSS0", GENMASK(2, 0)},
{"FSYS", BIT(0) | BIT(1), "RTIC", GENMASK(2, 0)},
{"FSYS", BIT(2), "USB20DRD", GENMASK(2, 0)},
{"MFCMSCL", 0, "JPEG", GENMASK(2, 1)},
{"MFCMSCL", BIT(1), "M2M_Poly", GENMASK(2, 1)},
{"MFCMSCL", BIT(2), "M2M_Bl", GENMASK(2, 1)},
{"MFCMSCL", BIT(1) | BIT(2), "MFC", GENMASK(2, 1)},
{"CP", 0, "CR4M", GENMASK(4, 3)},
{"CP", BIT(3), "TL3MtoL2", GENMASK(4, 3)},
{"CP", BIT(4), "DMA", GENMASK(4, 2)},
{"CP", BIT(2) | BIT(4), "CSXAP", GENMASK(4, 0)},
{"CP", BIT(0) | BIT(2) | BIT(4), "DMtoL2", GENMASK(4, 0)},
{"CP", BIT(1) | BIT(2) | BIT(4), "LMAC", GENMASK(4, 0)},
{"CP", BIT(0) | BIT(1) | BIT(2) | BIT(4),"HMtoL2", GENMASK(4, 0)},
{"WFBT", 0, "SXCR4", GENMASK(1, 0)},
{"WFBT", BIT(0), "SXCR4", GENMASK(5, 3) | GENMASK(1, 0)},
{"WFBT", BIT(1), "SHBWL", GENMASK(3, 0)},
/* Others */
{"CPU", 0, "", 0},
{"DBG", 0, "", 0},
{"G3D", 0, "", 0},
{"PDMA", 0, "", 0},
{"APM", 0, "", 0},
};
/* data_backbone_path is sorted by INT_VEC_DEBUG_INTERRUPT_VECTOR_TABLE bits */
static struct itm_nodeinfo data_backbone_path[] = {
{M_NODE, "APM", 0x12543000, NULL, TIMEOUT, true, true, false},
{M_NODE, "CP", 0x12483000, NULL, 0, true, true, false},
{M_NODE, "CPU", 0x12403000, NULL, 0, true, true, false},
{M_NODE, "DBG", 0x12413000, NULL, 0, false, true, false},
{M_NODE, "DISPAUD", 0x12463000, NULL, 0, false, true, false},
{M_NODE, "FSYS", 0x12443000, NULL, 0, false, true, false},
{M_NODE, "G3D", 0x12433000, NULL, 0, false, true, false},
{M_NODE, "GNSS", 0x12473000, NULL, 0, false, true, false},
{M_NODE, "ISP", 0x12453000, NULL, 0, false, true, false},
{M_NODE, "MFCMSCL", 0x12423000, NULL, 0, false, true, false},
{M_NODE, "PDMA", 0x12533000, NULL, 0, false, true, false},
{M_NODE, "WFBT", 0x12493000, NULL, 0, false, true, false},
{S_NODE, "APMP", 0x124E3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "DREX_CP", 0x124B3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "DREX_CPU", 0x124A3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "DREX_MM", 0x124C3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "PERI", 0x124D3000, NULL, TIMEOUT, true, true, false},
};
/* peri_path is sorted by INT_VEC_DEBUG_INTERRUPT_VECTOR_TABLE bits */
static struct itm_nodeinfo peri_path[] = {
{M_NODE, "MIFND", 0x12603000, NULL, 0, false, true, false},
{S_NODE, "BUS_SFR", 0x12693000, NULL, TIMEOUT, true, true, false},
{S_NODE, "CPU_SFR", 0x12623000, NULL, TIMEOUT, true, true, false},
{S_NODE, "DISPAUD_SFR", 0x12673000, NULL, TIMEOUT, true, true, false},
{S_NODE, "FSYS_SFR", 0x12653000, NULL, TIMEOUT, true, true, false},
{S_NODE, "G3D_SFR", 0x12643000, NULL, TIMEOUT, true, true, false},
{S_NODE, "GIC_SFR", 0x126B3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "ISP_SFR", 0x12663000, NULL, TIMEOUT, true, true, false},
{S_NODE, "MFCMSCL_SFR", 0x12663000, NULL, TIMEOUT, true, true, false},
{S_NODE, "MIF_SFR", 0x126A3000, NULL, TIMEOUT, true, true, false},
{S_NODE, "PERI_SFR", 0x12683000, NULL, TIMEOUT, true, true, false},
};
static struct itm_nodegroup nodegroup[] = {
{378, "DATA_BACKBONE",0x125F3000, NULL, data_backbone_path, ARRAY_SIZE(data_backbone_path), 0, false},
{382, "PERI", 0x126F3000, NULL, peri_path, ARRAY_SIZE(peri_path), 0, false},
};
struct itm_dev {
struct device *dev;
struct itm_platdata *pdata;
struct of_device_id *match;
int irq;
int id;
void __iomem *regs;
spinlock_t ctrl_lock;
struct itm_notifier notifier_info;
};
struct itm_panic_block {
struct notifier_block nb_panic_block;
struct itm_dev *pdev;
};
/* declare notifier_list */
static ATOMIC_NOTIFIER_HEAD(itm_notifier_list);
static const struct of_device_id itm_dt_match[] = {
{ .compatible = "samsung,exynos-itm",
.data = NULL, },
{},
};
MODULE_DEVICE_TABLE(of, itm_dt_match);
static struct itm_rpathinfo* itm_get_rpathinfo
(struct itm_dev *itm,
unsigned int id,
char *dest_name)
{
struct itm_platdata *pdata = itm->pdata;
struct itm_rpathinfo *rpath = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(rpathinfo); i++) {
if (pdata->rpathinfo[i].id == (id & pdata->rpathinfo[i].bits)) {
if (dest_name && !strncmp(pdata->rpathinfo[i].dest_name,
dest_name, strlen(pdata->rpathinfo[i].dest_name))) {
rpath = &pdata->rpathinfo[i];
break;
}
}
}
return rpath;
}
static struct itm_masterinfo* itm_get_masterinfo
(struct itm_dev *itm,
char *port_name,
unsigned int user)
{
struct itm_platdata *pdata = itm->pdata;
struct itm_masterinfo *master = NULL;
unsigned int val;
int i;
for (i = 0; i < ARRAY_SIZE(masterinfo); i++) {
if (!strncmp(pdata->masterinfo[i].port_name, port_name, strlen(port_name))) {
val = user & pdata->masterinfo[i].bits;
if (val == pdata->masterinfo[i].user) {
master = &pdata->masterinfo[i];
break;
}
}
}
return master;
}
static void itm_init(struct itm_dev *itm, bool enabled)
{
struct itm_platdata *pdata = itm->pdata;
struct itm_nodeinfo *node;
unsigned int offset;
int i, j;
for (i = 0; i < ARRAY_SIZE(nodegroup); i++) {
node = pdata->nodegroup[i].nodeinfo;
for (j = 0; j < pdata->nodegroup[i].nodesize; j++) {
if (node[j].type == S_NODE && node[j].timeout_enabled) {
offset = OFFSET_TIMEOUT_REG;
/* Enable Timeout setting */
__raw_writel(enabled, node[j].regs + offset + REG_DBG_CTL);
/* set timeout interval value */
__raw_writel(node[j].time_val,
node[j].regs + offset + REG_TIMEOUT_INIT_VAL);
pr_debug("Exynos IPM - %s timeout enabled\n", node[j].name);
}
if (node[j].err_rpt_enabled) {
/* clear previous interrupt of req_read */
offset = OFFSET_REQ_R;
if (!pdata->probed || !node->retention)
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(enabled, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of req_write */
offset = OFFSET_REQ_W;
if (pdata->probed || !node->retention)
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(enabled, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of response_read */
offset = OFFSET_RESP_R;
if (!pdata->probed || !node->retention)
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(enabled, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of response_write */
offset = OFFSET_RESP_W;
if (!pdata->probed || !node->retention)
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(enabled, node[j].regs + offset + REG_INT_MASK);
pr_debug("Exynos IPM - %s error reporting enabled\n", node[j].name);
}
}
}
}
static void itm_post_handler_by_master(struct itm_dev *itm,
struct itm_nodegroup *group,
char *port, char *master, bool read)
{
/* After treatment by port */
if (!port || strlen(port) < 1)
return;
if (!strncmp(port, "CP", strlen(port))) {
/* if master is DSP and operation is read, we don't care this */
if (master && !strncmp(master, "TL3MtoL2",strlen(master)) && read == true) {
group->panic_delayed = true;
group->irq_occurred = 0;
pr_info("ITM skips CP's DSP(TL3MtoL2) detected\n");
} else {
/* Disable busmon all interrupts */
itm_init(itm, false);
group->panic_delayed = true;
#if defined(CONFIG_SEC_SIPC_MODEM_IF)
ss310ap_force_crash_exit_ext();
#endif
}
} else if (!strncmp(port, "CPU", strlen(port))) {
pr_info("ITM is disabled for CPU exception\n");
/* Disable busmon all interrupts */
itm_init(itm, false);
group->panic_delayed = true;
}
}
static void itm_report_route(struct itm_dev *itm,
struct itm_nodegroup *group,
struct itm_nodeinfo *node,
unsigned int offset, bool read)
{
struct itm_masterinfo *master = NULL;
struct itm_rpathinfo *rpath = NULL;
unsigned int val, id;
char *port = NULL, *source = NULL, *dest = NULL;
val = __raw_readl(node->regs + offset + REG_INT_INFO);
id = BIT_AXID(val);
if (!strncmp(group->name, "DATA_BACKBONE", strlen(group->name))) {
/* Data Path */
if (node->type == S_NODE) {
rpath = itm_get_rpathinfo(itm, id, node->name);
if (!rpath) {
pr_info("failed to get route path - %s, id:%x\n",
node->name, id);
return;
}
id = (id >> rpath->shift_bits);
master = itm_get_masterinfo(itm, rpath->port_name, id);
if (!master) {
pr_info("failed to get master IP with "
"port:%s, id:%x\n", rpath->port_name, id);
return;
}
port = rpath->port_name;
source = master->master_name;
dest = rpath->dest_name;
} else {
master = itm_get_masterinfo(itm, node->name, id);
if (!master) {
pr_info("failed to get master IP with "
"port:%s, id:%x\n", node->name, id);
return;
}
port = node->name;
source = master->master_name;
}
} else {
/*
* PERI_PATH
* In this case, we will get the route table from DATA_BACKBONE interrupt
* of PERI port
*/
if (node->type == S_NODE) {
dest = node->name;
} else {
port = node->name;
}
}
pr_info("\n--------------------------------------------------------------------------------\n\n"
"ROUTE INFORMATION - %s\n"
"> Master : %s %s\n"
"> Slave : %s\n\n",
group->name,
port ? port : "Note other NODE Information",
source ? source : "",
dest ? dest : "Note other NODE Information");
itm_post_handler_by_master(itm, group, port, source, read);
}
static void itm_report_info(struct itm_dev *itm,
struct itm_nodegroup *group,
struct itm_nodeinfo *node,
unsigned int offset)
{
unsigned int errcode, int_info, info0, info1, info2;
bool read = false, req = false;
int_info = __raw_readl(node->regs + offset + REG_INT_INFO);
if (!BIT_ERR_VALID(int_info)) {
pr_info("no information, %s/offset:%x is stopover, "
"check other node\n", node->name, offset);
return;
}
errcode = BIT_ERR_CODE(int_info);
info0 = __raw_readl(node->regs + offset + REG_EXT_INFO_0);
info1 = __raw_readl(node->regs + offset + REG_EXT_INFO_1);
info2 = __raw_readl(node->regs + offset + REG_EXT_INFO_2);
switch(offset) {
case OFFSET_REQ_R:
read = true;
/* fall down */
case OFFSET_REQ_W:
req = true;
if (node->type == S_NODE) {
/* Only S-Node is able to make log to registers */
pr_info("invalid logged, see more following information\n");
goto out;
}
break;
case OFFSET_RESP_R:
read = true;
/* fall down */
case OFFSET_RESP_W:
req = false;
if (node->type != S_NODE) {
/* Only S-Node is able to make log to registers */
pr_info("invalid logged, see more following information\n");
goto out;
}
break;
default:
pr_info("Unknown Error - offset:%u\n", offset);
goto out;
}
/* Normally fall down to here */
itm_report_route(itm, group, node, offset, read);
pr_info("\n--------------------------------------------------------------------------------\n\n"
"TRANSACTION INFORMATION\n"
"> Path Type : %s in %s %s \n"
"> Target address : 0x%08X\n"
"> Error type : %s\n\n",
group->name,
read ? "READ" : "WRITE",
req ? "REQUEST" : "RESPONSE",
info0,
itm_errcode[errcode]);
out:
/* report extention raw information of register */
pr_info("\n--------------------------------------------------------------------------------\n\n"
"NODE RAW INFORMATION\n"
"> NODE NAME : %s(%s)\n"
"> NODE ADDRESS : 0x%08X\n"
"> INTERRUPT_INFO : 0x%08X\n"
"> EXT_INFO_0 : 0x%08X\n"
"> EXT_INFO_1 : 0x%08X\n"
"> EXT_INFO_2 : 0x%08X\n\n"
"--------------------------------------------------------------------------------\n",
node->name,
node->type ? "M_NODE" : "S_NODE",
node->phy_regs + offset,
int_info,
info0,
info1,
info2);
}
static int itm_parse_info(struct itm_dev *itm,
struct itm_nodegroup *group,
bool clear)
{
//struct itm_platdata *pdata = itm->pdata;
struct itm_nodeinfo *node = NULL;
unsigned int val, offset, vec;
unsigned long flags, bit = 0;
int i, j, ret = 0;
spin_lock_irqsave(&itm->ctrl_lock, flags);
if (group) {
/* Processing only this group and select detected node */
vec = __raw_readl(group->regs);
node = group->nodeinfo;
if (!vec)
pr_info("invalid detection\n");
for_each_set_bit(bit, (unsigned long *)&vec, group->nodesize) {
/* exist array */
for (i = 0; i < OFFSET_NUM; i++) {
offset = i * OFFSET_ERR_REPT;
/* Check Request information */
val = __raw_readl(node[bit].regs + offset + REG_INT_INFO);
if (BIT_ERR_OCCURRED(val)) {
/* This node occurs the error */
itm_report_info(itm, group, &node[bit], offset);
if (clear)
__raw_writel(1, node[bit].regs + offset + REG_INT_CLR);
ret = true;
}
}
}
} else {
/* Processing all group & nodes */
for (i = 0; i < ARRAY_SIZE(nodegroup); i++) {
group = &nodegroup[i];
vec = __raw_readl(group->regs);
node = group->nodeinfo;
bit = 0;
for_each_set_bit(bit, (unsigned long *)&vec, group->nodesize) {
for (j = 0; j < OFFSET_NUM; j++) {
offset = j * OFFSET_ERR_REPT;
/* Check Request information */
val = __raw_readl(node[bit].regs + offset + REG_INT_INFO);
if (BIT_ERR_OCCURRED(val)) {
/* This node occurs the error */
itm_report_info(itm, group, &node[bit], offset);
if (clear)
__raw_writel(1,
node[j].regs + offset + REG_INT_CLR);
ret = true;
}
}
}
}
}
spin_unlock_irqrestore(&itm->ctrl_lock, flags);
return ret;
}
static irqreturn_t itm_irq_handler(int irq, void *data)
{
struct itm_dev *itm = (struct itm_dev *)data;
struct itm_platdata *pdata = itm->pdata;
struct itm_nodegroup *group = NULL;
bool ret;
int i;
/* convert from raw irq source to SPI irq number */
irq = irq - 32;
/* Search itm group */
for (i = 0; i < ARRAY_SIZE(nodegroup); i++) {
if (irq == nodegroup[i].irq) {
pr_info("%s group, %d interrupt occurrs \n",
pdata->nodegroup[i].name, irq);
group = &pdata->nodegroup[i];
break;
}
}
if (group) {
ret = itm_parse_info(itm, group, true);
if (!ret) {
pr_info("can't process %s irq:%d IRQ_VECTOR:%x\n",
group->name, irq, __raw_readl(group->regs));
} else {
if (group->irq_occurred && !group->panic_delayed)
panic("STOP infinite output by Exynos ITM");
else
group->irq_occurred++;
}
} else {
pr_info("can't the group - irq:%d\n", irq);
}
return IRQ_HANDLED;
}
void itm_notifier_chain_register(struct notifier_block *block)
{
atomic_notifier_chain_register(&itm_notifier_list, block);
}
static int itm_logging_panic_handler(struct notifier_block *nb,
unsigned long l, void *buf)
{
struct itm_panic_block *itm_panic = (struct itm_panic_block *)nb;
struct itm_dev *itm = itm_panic->pdev;
int ret;
if (!IS_ERR_OR_NULL(itm)) {
/* Check error has been logged */
ret = itm_parse_info(itm, NULL, false);
if (!ret)
pr_info("No found error in %s\n", __func__);
else
pr_info("Found errors in %s\n", __func__);
}
return 0;
}
static int itm_probe(struct platform_device *pdev)
{
struct itm_dev *itm;
struct itm_panic_block *itm_panic = NULL;
struct itm_platdata *pdata;
struct itm_nodeinfo *node;
char *dev_name;
int ret, i, j;
itm = devm_kzalloc(&pdev->dev, sizeof(struct itm_dev), GFP_KERNEL);
if (!itm) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"private data\n");
return -ENOMEM;
}
itm->dev = &pdev->dev;
spin_lock_init(&itm->ctrl_lock);
pdata = devm_kzalloc(&pdev->dev, sizeof(struct itm_platdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"platform data\n");
return -ENOMEM;
}
itm->pdata = pdata;
itm->pdata->masterinfo = masterinfo;
itm->pdata->rpathinfo = rpathinfo;
itm->pdata->nodegroup = nodegroup;
for (i = 0; i < ARRAY_SIZE(nodegroup); i++)
{
dev_name = nodegroup[i].name;
node = nodegroup[i].nodeinfo;
nodegroup[i].regs = devm_ioremap_nocache(&pdev->dev, nodegroup[i].phy_regs, SZ_16K);
if (nodegroup[i].regs == NULL) {
dev_err(&pdev->dev, "failed to claim register region - %s\n", dev_name);
return -ENOENT;
}
ret = devm_request_irq(&pdev->dev, nodegroup[i].irq + 32,
itm_irq_handler, 0, //IRQF_GIC_MULTI_TARGET,
dev_name, itm);
for (j = 0; j < nodegroup[i].nodesize; j++) {
node[j].regs = devm_ioremap_nocache(&pdev->dev, node[j].phy_regs, SZ_16K);
if (node[j].regs == NULL) {
dev_err(&pdev->dev, "failed to claim register region - %s\n", dev_name);
return -ENOENT;
}
}
}
itm_panic = devm_kzalloc(&pdev->dev,
sizeof(struct itm_panic_block), GFP_KERNEL);
if (!itm_panic) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"panic handler data\n");
} else {
itm_panic->nb_panic_block.notifier_call =
itm_logging_panic_handler;
itm_panic->pdev = itm;
atomic_notifier_chain_register(&panic_notifier_list,
&itm_panic->nb_panic_block);
}
platform_set_drvdata(pdev, itm);
itm_init(itm, true);
pdata->probed = true;
dev_info(&pdev->dev, "success to probe ITM driver\n");
return 0;
}
static int itm_remove(struct platform_device *pdev)
{
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int itm_suspend(struct device *dev)
{
return 0;
}
static int itm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct itm_dev *itm = platform_get_drvdata(pdev);
itm_init(itm, true);
return 0;
}
static SIMPLE_DEV_PM_OPS(itm_pm_ops,
itm_suspend,
itm_resume);
#define ITM_PM (itm_pm_ops)
#else
#define ITM_PM NULL
#endif
static struct platform_driver exynos_itm_driver = {
.probe = itm_probe,
.remove = itm_remove,
.driver = {
.name = "exynos-itm",
.of_match_table = itm_dt_match,
.pm = &itm_pm_ops,
},
};
module_platform_driver(exynos_itm_driver);
MODULE_DESCRIPTION("Samsung Exynos ITM DRIVER");
MODULE_AUTHOR("Hosung Kim <hosung0.kim@samsung.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos-itm");

View file

@ -0,0 +1,726 @@
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* BUS Monitor Debugging Driver for Samsung EXYNOS8890 SoC
* By Hosung Kim (hosung0.kim@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/exynos-busmon.h>
/* S-NODE register */
#define REG_DBG_INT_MASK (0x0)
#define REG_DBG_INT_CLEAR (0x4)
#define REG_DBG_INT_SOURCE (0x8)
#define REG_DBG_CONTROL (0x10)
#define REG_DBG_TIMEOUT_INTERVAL (0x14)
#define REG_DBG_READ_TIMEOUT_MO (0x20)
#define REG_DBG_READ_TIMEOUT_USER (0x24)
#define REG_DBG_READ_TIMEOUT_ID (0x28)
#define REG_DBG_WRITE_TIMEOUT_MO (0x30)
#define REG_DBG_WRITE_TIMEOUT_USER (0x34)
#define REG_DBG_WRITE_TIMEOUT_ID (0x38)
#define REG_DBG_ERR_RPT_OFFSET (0x2000)
/* M-NODE register */
#define REG_ERR_RPT_INT_MASK (0x0)
#define REG_ERR_RPT_INT_CLEAR (0x4)
#define REG_ERR_RPT_INT_INFO (0x8)
#define REG_ERR_RPT_RID (0x28)
#define REG_ERR_RPT_BID (0x38)
#define BIT_TIMEOUT_AR (1 << 16)
#define BIT_TIMEOUT_AW (1 << 0)
#define BIT_ERR_RPT_AR (1 << 17)
#define BIT_ERR_RPT_AW (1 << 1)
#define BUSMON_TYPE_SNODE (0)
#define BUSMON_TYPE_MNODE (1)
#define VAL_TIMEOUT_DEFAULT (0xFFFFF)
#define VAL_TIMEOUT_TEST (0x1)
#define DISABLED (0)
#define ENABLED (1)
#define NEED_TO_CHECK (0xCAFE)
struct busmon_rpathinfo {
unsigned int id;
char *port_name;
char *dest_name;
unsigned int bits;
};
struct busmon_masterinfo {
char *port_name;
unsigned int user;
char *master_name;
unsigned int bits;
};
struct busmon_platdata {
unsigned int type;
char *name;
unsigned int phy_regs;
void __iomem *regs;
unsigned int irq;
unsigned int time_val;
bool timeout_enabled;
bool err_rpt_enabled;
char *need_rpath;
};
static struct busmon_rpathinfo rpathinfo_array[] = {
{0, "G3D1", "MEMS_0", 0x1F},
{5, "CAM0", "MEMS_0", 0x1F},
{6, "CAM1", "MEMS_0", 0x1F},
{1, "DISP0_0", "MEMS_0", 0x1F},
{2, "DISP0_1", "MEMS_0", 0x1F},
{3, "DISP1_0", "MEMS_0", 0x1F},
{4, "DISP1_1", "MEMS_0", 0x1F},
{7, "ISP0", "MEMS_0", 0x1F},
{12, "CAM0", "MEMS_0", 0x1F},
{13, "CAM1", "MEMS_0", 0x1F},
{8, "DISP0_0", "MEMS_0", 0x1F},
{9, "DISP0_1", "MEMS_0", 0x1F},
{10, "DISP1_0", "MEMS_0", 0x1F},
{11, "DISP1_1", "MEMS_0", 0x1F},
{14, "ISP0", "MEMS_0", 0x1F},
{15, "IMEM", "MEMS_0", 0x1F},
{16, "AUD", "MEMS_0", 0x1F},
{17, "CORESIGHT", "MEMS_0", 0x1F},
{18, "CAM1", "MEMS_0", 0x1F},
{20, "FSYS1", "MEMS_0", 0x1F},
{19, "ISP0", "MEMS_0", 0x1F},
{21, "CP", "MEMS_0", 0x1F},
{22, "FSYS0", "MEMS_0", 0x1F},
{25, "MFC0", "MEMS_0", 0x1F},
{26, "MFC1", "MEMS_0", 0x1F},
{23, "MSCL0", "MEMS_0", 0x1F},
{24, "MSCL1", "MEMS_0", 0x1F},
{0, "G3D1", "MEMS_1", 0x1F},
{5, "CAM0", "MEMS_1", 0x1F},
{6, "CAM1", "MEMS_1", 0x1F},
{1, "DISP0_0", "MEMS_1", 0x1F},
{2, "DISP0_1", "MEMS_1", 0x1F},
{3, "DISP1_0", "MEMS_1", 0x1F},
{4, "DISP1_1", "MEMS_1", 0x1F},
{7, "ISP0", "MEMS_1", 0x1F},
{12, "CAM0", "MEMS_1", 0x1F},
{13, "CAM1", "MEMS_1", 0x1F},
{8, "DISP0_0", "MEMS_1", 0x1F},
{9, "DISP0_1", "MEMS_1", 0x1F},
{10, "DISP1_0", "MEMS_1", 0x1F},
{11, "DISP1_1", "MEMS_1", 0x1F},
{14, "ISP0", "MEMS_1", 0x1F},
{15, "IMEM", "MEMS_1", 0x1F},
{16, "AUD", "MEMS_1", 0x1F},
{17, "CORESIGHT", "MEMS_1", 0x1F},
{18, "CAM1", "MEMS_1", 0x1F},
{20, "FSYS1", "MEMS_1", 0x1F},
{19, "ISP0", "MEMS_1", 0x1F},
{21, "CP", "MEMS_1", 0x1F},
{22, "FSYS0", "MEMS_1", 0x1F},
{25, "MFC0", "MEMS_1", 0x1F},
{26, "MFC1", "MEMS_1", 0x1F},
{23, "MSCL0", "MEMS_1", 0x1F},
{24, "MSCL1", "MEMS_1", 0x1F},
{0, "IMEM", "PERI", 0xF},
{1, "AUD", "PERI", 0xF},
{2, "CORESIGHT", "PERI", 0xF},
{3, "FSYS0", "PERI", 0xF},
{6, "MFC0", "PERI", 0xF},
{7, "MFC1", "PERI", 0xF},
{4, "MSCL0", "PERI", 0xF},
{5, "MSCL1", "PERI", 0xF},
{8, "CAM1", "PERI", 0xF},
{10, "FSYS1", "PERI", 0xF},
{9, "ISP0", "PERI", 0xF},
};
static struct busmon_masterinfo masterinfo_array[] = {
/* DISP0_0 */
{"DISP0_0", 1 << 0, "sysmmu", 0x1},
{"DISP0_0", 1 << 0, "S-IDMA0", 0x3},
{"DISP0_0", 1 << 1, "IDMA3", 0x3},
/* DISP0_1 */
{"DISP0_1", 1 << 0, "sysmmu", 0x1},
{"DISP0_1", 0 << 0, "IDMA0", 0x3},
{"DISP0_1", 1 << 1, "IDMA4", 0x3},
/* DISP1_0 */
{"DISP1_0", 1 << 0, "sysmmu", 0x1},
{"DISP1_0", 0 << 0, "IDMA1", 0x3},
{"DISP1_0", 1 << 1, "VGR0", 0x3},
/* DISP1_1 */
{"DISP1_1", 1 << 0, "sysmmu", 0x1},
{"DISP1_1", 0 << 0, "IDMA2", 0x7},
{"DISP1_1", 1 << 1, "VGR1", 0x7},
{"DISP1_1", 1 << 2, "WDMA", 0x7},
/* MFC0 */
{"MFC0", 1 << 0, "sysmmu", 0x1},
{"MFC0", 0 << 0, "MFC M0", 0x1},
/* MFC1 */
{"MFC1", 1 << 0, "sysmmu", 0x1},
{"MFC1", 0 << 0, "MFC M1", 0x1},
/* IMEM */
{"IMEM", 0 << 0, "SSS M0", 0xF},
{"IMEM", 1 << 2, "RTIC", 0xF},
{"IMEM", 1 << 3, "SSS M1", 0xF},
{"IMEM", 1 << 0, "MCOMP", 0x3},
{"IMEM", 1 << 1, "APM", 0x3},
/* G3D */
{"G3D0", 0 << 0, "G3D0", 0x1},
{"G3D1", 0 << 1, "G3D1", 0x1},
/* AUD */
{"AUD", 1 << 0, "sysmmu", 0x1},
{"AUD", 1 << 1, "DMAC", 0x7},
{"AUD", 1 << 2, "AUD CA5", 0x7},
/* MSCL0 */
{"MSCL0", 1 << 0, "sysmmu", 0x1},
{"MSCL0", 0 << 0, "JPEG", 0x3},
{"MSCL0", 1 << 1, "MSCL0", 0x3},
/* MSCL1 */
{"MSCL1", 1 << 0, "sysmmu", 0x1},
{"MSCL1", 0 << 0, "G2D", 0x3},
{"MSCL1", 1 << 1, "MSCL1", 0x3},
/* FSYS1 */
{"FSYS1", 0 << 0, "MMC51", 0x7},
{"FSYS1", 1 << 2, "UFS", 0x7},
{"FSYS1", 1 << 0, "PCIE_WIFI0", 0x3},
{"FSYS1", 1 << 1, "PCIE_WIFI1", 0x3},
/* FSYS0 */
{"FSYS0", 0 << 0, "ETR USB", 0x7},
{"FSYS0", 1 << 2, "USB30", 0x7},
{"FSYS0", 1 << 0, "UFS", 0x7},
{"FSYS0", 1 << 0 | 1 << 2, "MMC51", 0x7},
{"FSYS0", 1 << 1, "PDMA0", 0x7},
{"FSYS0", 1 << 1 | 1 << 2, "PDMA(secure)", 0x7},
{"FSYS0", 1 << 0 | 1 << 1, "USB20", 0x3},
/* CAM0 */
{"CAM0", 1 << 0, "sysmmu", 0x1},
{"CAM0", 0 << 0, "MIPI_CSIS0", 0x7},
{"CAM0", 1 << 1, "MIPI_CSIS1", 0x7},
{"CAM0", 1 << 1, "FIMC_3AA0", 0x7},
{"CAM0", 1 << 2, "FIMC_3AA1", 0x7},
/* CAM1 */
{"CAM1", 1 << 2, "sysmmu_IS_B", 0x7},
{"CAM1", 0 << 0, "MIPI_CSI2 or ISP2", 0xF},
{"CAM1", 1 << 3, "ISP1", 0xF},
{"CAM1", 1 << 0 | 1 << 2, "sysmmu_SCL", 0x3},
{"CAM1", 1 << 0, "MC_SCALER", 0x3},
{"CAM1", 1 << 0 | 1 << 1 | 1 << 2, "sysmmu_VRA", 0x7},
{"CAM1", 1 << 0 | 1 << 1, "FIMC_VRA", 0x7},
{"CAM1", 1 << 1 | 1 << 2, "sysmmu_CA7", 0x7},
{"CAM1", 1 << 1, "CA7", 0xF},
{"CAM1", 1 << 1 | 1 << 3, "PDMA_IS", 0xF},
/* ISP0 */
{"ISP0", 1 << 0, "sysmmu", 0x1},
{"ISP0", 0 << 0, "FIMC_ISP", 0x3},
{"ISP0", 1 << 1, "FIMC_TPU", 0x3},
/* CP */
{"CP", 0 << 0, "CR7M", 0x0},
{"CP", 1 << 3, "TL3MtoL2", 0x8},
{"CP", 1 << 4, "DMAC", 0x10},
{"CP", 1 << 2, "MEMtoL2", 0x14},
{"CP", 1 << 3 | 1 << 4, "CSXAP", 0x18},
{"CP", 1 << 0 | 1 << 3 | 1 << 4, "LMAC", 0x19},
{"CP", 1 << 1 | 1 << 3 | 1 << 4, "HMtoL2", 0x1A},
};
static struct busmon_platdata pdata_array[] = {
/* S-node, BLK_CCORE - Data Path */
{BUSMON_TYPE_SNODE, "CCORE_MEMS_0_S_NODE", 0x10703000, NULL, 320, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, "MEMS_0"},
{BUSMON_TYPE_SNODE, "CCORE_MEMS_1_S_NODE", 0x10713000, NULL, 321, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, "MEMS_1"},
{BUSMON_TYPE_SNODE, "CCORE_PERI_S_NODE", 0x10723000, NULL, 322, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, "PERI"},
/* S-node, BLK_BUS0 */
{BUSMON_TYPE_SNODE, "P_BUS0_BUS0_SFR_S_NODE", 0X11E73000, NULL, 352, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_CAM0_S_NODE", 0X11E63000, NULL, 353, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_CAM1_S_NODE", 0X11E53000, NULL, 354, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_DISP0_S_NODE", 0X11E33000, NULL, 355, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_FSYS1_S_NODE", 0X11E13000, NULL, 356, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_PERIC0_S_NODE", 0X11E23000, NULL, 357, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_PERIC1_S_NODE", 0X11EA3000, NULL, 358, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_PERIS_S_NODE", 0X11E03000, NULL, 359, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_TREX_BUS0_S_NODE", 0X11E93000, NULL, 360, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_TREX_BUS0_PERI_S_NODE", 0X11E83000, NULL, 361, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS0_VPP_S_NODE", 0X11E43000, NULL, 362, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
/* S-node, BLK_BUS1 */
{BUSMON_TYPE_SNODE, "P_BUS1_BUS1_SFR_S_NODE", 0X11C43000, NULL, 373, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS1_FSYS0_S_NODE", 0X11C03000, NULL, 374, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS1_MFC_S_NODE", 0X11C23000, NULL, 375, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS1_MSCL_S_NODE", 0X11C13000, NULL, 376, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS1_TREX_BUS1_S_NODE", 0X11C63000, NULL, 377, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_BUS1_TREX_BUS1_PERI_S_NODE", 0X11C53000, NULL, 378, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
/* S-node, BLK_CORE */
{BUSMON_TYPE_SNODE, "P_CORE_APL_S_NODE", 0X10443000, NULL, 326, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_AUD_S_NODE", 0X10493000, NULL, 327, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_CCORE_SFR_S_NODE", 0X104B3000, NULL, 335, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_CORESIGHT_S_NODE", 0X10423000, NULL, 329, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_G3D_S_NODE", 0X104A3000, NULL, 330, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_MIF0_S_NODE", 0X10453000, NULL, 331, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_MIF1_S_NODE", 0X10463000, NULL, 332, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_MIF2_S_NODE", 0X10473000, NULL, 333, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_MIF3_S_NODE", 0X10483000, NULL, 334, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_MNGS_S_NODE", 0X10433000, NULL, 328, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_TREX_MIF_S_NODE", 0X104D3000, NULL, 336, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
{BUSMON_TYPE_SNODE, "P_CORE_TREX_MIF_PERI_S_NODE", 0X104C3000, NULL, 337, VAL_TIMEOUT_DEFAULT, ENABLED, ENABLED, NULL},
/* M-node, BLK_BUS0 */
{BUSMON_TYPE_MNODE, "BUS1_CAM0_M_NODE", 0X11F73000, NULL, 344, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_CAM1_M_NODE", 0X11F13000, NULL, 345, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_DISP0_0_M_NODE", 0x11F33000, NULL, 346, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_DISP0_1_M_NODE", 0X11F43000, NULL, 347, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_DISP1_0_M_NODE", 0x11F53000, NULL, 350, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_DISP1_1_M_NODE", 0x11F63000, NULL, 351, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_FSYS1_M_NODE", 0x11F03000, NULL, 348, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "BUS1_ISP0_M_NODE", 0x11F23000, NULL, 349, 0, DISABLED, ENABLED, NULL},
/* M-node, BLK_BUS1 */
{BUSMON_TYPE_MNODE, "CCORE_AUD_M_NODE", 0x106C3000, NULL, 315, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "CCORE_CORESIGHT_M_NODE", 0x106D3000, NULL, 316, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "CCORE_CP_M_NODE", 0x10733000, NULL, 323, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "CCORE_G3D0_M_NODE", 0x10683000, NULL, 318, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "CCORE_G3D1_M_NODE", 0x10693000, NULL, 319, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "CCORE_IMEM_M_NODE", 0x106B3000, NULL, 317, 0, DISABLED, ENABLED, NULL},
/* M-node, BLK_CCORE */
{BUSMON_TYPE_MNODE, "P_CORE_BUS_M_NODE", 0x104F3000, NULL, 325, 0, DISABLED, ENABLED, NULL},
#if DISABLED
/* M-node, BLK_CAM0 */
{BUSMON_TYPE_MNODE, "TREX_A_AA0_M_NODE", 0x14433000, NULL, 115, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_A_AA1_M_NODE", 0x14443000, NULL, 116, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_A_BNS_A_M_NODE", 0x14423000, NULL, 117, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_A_CSIS_0_M_NODE", 0x14403000, NULL, 118, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_A_CSIS_1_M_NODE", 0x14413000, NULL, 119, 0, DISABLED, ENABLED, NULL},
/* M-node, BLK_CAM1 */
{BUSMON_TYPE_MNODE, "TREX_B_CSIS_2_M_NODE", 0x14503000, NULL, 165, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_B_CSIS_3_M_NODE", 0x14513000, NULL, 166, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_B_ISP_1_M_NODE", 0x14523000, NULL, 167, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_CAM1_ISP_1_1_M_NODE", 0x145A3000, NULL, 168, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_CAM1_MC_SCALER_M_NODE", 0x14593000, NULL, 169, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_CAM1_BLOCKB_M_NODE", 0x14583000, NULL, 170, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_CAM1_VRA_M_NODE", 0x145B3000, NULL, 171, 0, DISABLED, ENABLED, NULL},
/* M-node, BLK_ISP0 */
{BUSMON_TYPE_MNODE, "TREX_C_ISP_0_M_NODE", 0x14603000, NULL, 272, 0, DISABLED, ENABLED, NULL},
{BUSMON_TYPE_MNODE, "TREX_C_TPU_M_NODE", 0x14613000, NULL, 273, 0, DISABLED, ENABLED, NULL},
#endif
};
struct busmon_dev {
struct device *dev;
struct busmon_platdata *pdata;
struct busmon_rpathinfo *rpathinfo;
struct busmon_masterinfo *masterinfo;
struct of_device_id *match;
int irq;
int id;
void __iomem *regs;
spinlock_t ctrl_lock;
struct busmon_notifier notifier_info;
};
struct busmon_panic_block {
struct notifier_block nb_panic_block;
struct busmon_dev *pdev;
};
/* declare notifier_list */
static ATOMIC_NOTIFIER_HEAD(busmon_notifier_list);
static const struct of_device_id busmon_dt_match[] = {
{ .compatible = "samsung,exynos-busmonitor",
.data = NULL, },
{},
};
MODULE_DEVICE_TABLE(of, busmon_dt_match);
static struct busmon_rpathinfo*
busmon_get_rpathinfo(struct busmon_dev *busmon, unsigned int id, char *dest_name)
{
struct busmon_rpathinfo *rpath = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(rpathinfo_array); i++) {
if (busmon->rpathinfo[i].id == (id & busmon->rpathinfo[i].bits)) {
if (dest_name && !strncmp(busmon->rpathinfo[i].dest_name,
dest_name, strlen(busmon->rpathinfo[i].dest_name))) {
rpath = &busmon->rpathinfo[i];
break;
}
}
}
return rpath;
}
static struct busmon_masterinfo*
busmon_get_masterinfo(struct busmon_dev *busmon, char *port_name, unsigned int user)
{
struct busmon_masterinfo *master = NULL;
unsigned int val;
int i;
for (i = 0; i < ARRAY_SIZE(masterinfo_array); i++) {
if (!strncmp(busmon->masterinfo[i].port_name, port_name, strlen(port_name))) {
val = user & busmon->masterinfo[i].bits;
if (val == busmon->masterinfo[i].user) {
master = &busmon->masterinfo[i];
break;
}
}
}
return master;
}
static void busmon_err_rpt_dump(struct busmon_dev *busmon, int num, bool clear)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_rpathinfo *rpath = NULL;
unsigned int id = 0, val = 0, offset = 0;
if (!pdata[num].err_rpt_enabled)
return;
if (pdata[num].type == BUSMON_TYPE_SNODE)
offset = REG_DBG_ERR_RPT_OFFSET;
val = __raw_readl(pdata[num].regs + offset + REG_ERR_RPT_INT_INFO);
if (!val)
return;
if (pdata[num].type == BUSMON_TYPE_SNODE && pdata[num].need_rpath) {
if (val & BIT_ERR_RPT_AR) {
id = 0xfff & (__raw_readl(pdata[num].regs + offset + REG_ERR_RPT_RID));
if (clear)
__raw_writel(BIT_ERR_RPT_AR,
pdata[num].regs + offset + REG_ERR_RPT_INT_CLEAR);
} else if (val & BIT_ERR_RPT_AW) {
id = 0xfff & (__raw_readl(pdata[num].regs + offset + REG_ERR_RPT_BID));
if (clear)
__raw_writel(BIT_ERR_RPT_AW,
pdata[num].regs + offset + REG_ERR_RPT_INT_CLEAR);
}
rpath = busmon_get_rpathinfo(busmon, id, pdata[num].need_rpath);
if (!rpath)
pr_info("error: can't find rpathinfo with id:%x, dest_name:%s\n",
id, pdata[num].need_rpath);
}
pr_info("\n=================================================================\n"
"Debugging Information - error response detected in %s\n"
"Node : %s [address: 0x%08X]\n"
"Path : %s -> %s\n"
"ID : %x\n"
"INT_SOURCE : %x\n",
val & BIT_ERR_RPT_AR ? "reading" : "writing",
pdata[num].name,
pdata[num].phy_regs,
rpath ? rpath->port_name : (pdata[num].type ? pdata[num].name : "no information"),
rpath ? rpath->dest_name : (pdata[num].type ? "no information" : pdata[num].name),
id,val);
}
static void busmon_timeout_dump(struct busmon_dev *busmon, int num, bool clear)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_masterinfo *master = NULL;
struct busmon_rpathinfo *rpath = NULL;
unsigned int val = 0, user = 0, id = 0, mo = 0;
if (!pdata[num].timeout_enabled)
return;
val = __raw_readl(pdata[num].regs + REG_DBG_INT_SOURCE);
if (!val)
return;
if (!pdata[num].need_rpath) {
pr_info("info: it doesn't need rpath, Other M-node may will more "
"debugging information\n");
} else {
if (val & BIT_TIMEOUT_AR) {
user = 0xf & (__raw_readl(pdata[num].regs + REG_DBG_READ_TIMEOUT_USER));
id = 0xfff & (__raw_readl(pdata[num].regs + REG_DBG_READ_TIMEOUT_ID));
mo = 0x3f & (__raw_readl(pdata[num].regs + REG_DBG_READ_TIMEOUT_MO));
if (clear)
__raw_writel(BIT_TIMEOUT_AR,
pdata[num].regs + REG_DBG_INT_CLEAR);
} else if (val & BIT_TIMEOUT_AW) {
user = 0xf & (__raw_readl(pdata[num].regs + REG_DBG_WRITE_TIMEOUT_USER));
id = 0xfff & (__raw_readl(pdata[num].regs + REG_DBG_WRITE_TIMEOUT_ID));
mo = 0x1f & (__raw_readl(pdata[num].regs + REG_DBG_WRITE_TIMEOUT_MO));
if (clear)
__raw_writel(BIT_TIMEOUT_AW,
pdata[num].regs + REG_DBG_INT_CLEAR);
}
/* Find userinfo / master info */
rpath = busmon_get_rpathinfo(busmon, id, pdata[num].need_rpath);
if (!rpath) {
pr_info("error: can't find rpathinfo with user:"
"%x, id:%x, mo:%x dest_name:%s\n",
user, id, mo, pdata[num].need_rpath);
} else {
master = busmon_get_masterinfo(busmon, rpath->port_name, user);
if (!master) {
pr_info("error: can't find masterinfo with port:"
"%s, user:%x, id:%x, mo:%x\n",
rpath->port_name, user, id, mo);
}
}
}
pr_info("\n=================================================================\n"
"Debugging Information - Timeout occurs in %s\n"
"Node : %s [address: 0x%08X]\n"
"Path : %s (master: %s) -> %s\n"
"User : %x\n"
"ID : %x\n"
"MO : %x\n"
"INT_SOURCE : %x\n",
val & BIT_TIMEOUT_AR ? "reading" : "writing",
pdata[num].name,
pdata[num].phy_regs,
master ? master->port_name : "no information",
master ? master->master_name : "no information",
pdata[num].name,
user, id, mo, val);
}
static void busmon_dump(struct busmon_dev *busmon, bool clear)
{
int i;
for (i = 0; i < ARRAY_SIZE(pdata_array); i++) {
/* Check Timeout */
busmon_timeout_dump(busmon, i, clear);
/* Check Error Report */
busmon_err_rpt_dump(busmon, i, clear);
}
}
static irqreturn_t busmon_irq_handler(int irq, void *data)
{
struct busmon_dev *busmon = (struct busmon_dev *)data;
/* Check error has been logged */
dev_info(busmon->dev, "BUS monitor information: %d interrupt occurs.\n", (irq - 32));
busmon_dump(busmon, true);
/* Disable to call notifier_call_chain of busmon in Exynos8890 EVT0 */
#if DISABLED
atomic_notifier_call_chain(&busmon_notifier_list, 0, &busmon->notifier_info);
#endif
pr_info("\n=================================================================\n");
panic("Error detected by BUS monitor.");
return IRQ_HANDLED;
}
void busmon_notifier_chain_register(struct notifier_block *block)
{
atomic_notifier_chain_register(&busmon_notifier_list, block);
}
static int busmon_logging_panic_handler(struct notifier_block *nb,
unsigned long l, void *buf)
{
struct busmon_panic_block *busmon_panic = (struct busmon_panic_block *)nb;
struct busmon_dev *busmon = busmon_panic->pdev;
if (!IS_ERR_OR_NULL(busmon)) {
/* Check error has been logged */
busmon_dump(busmon, false);
}
return 0;
}
static void busmon_init(struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
int i;
for (i = 0; i < ARRAY_SIZE(pdata_array); i++) {
if (pdata[i].type == BUSMON_TYPE_SNODE && pdata[i].timeout_enabled) {
/* first of all, error clear at occurs previous */
__raw_writel(BIT_TIMEOUT_AR | BIT_TIMEOUT_AW,
pdata[i].regs + REG_DBG_INT_CLEAR);
/* set timeout interval value */
__raw_writel(pdata[i].time_val, pdata[i].regs + REG_DBG_TIMEOUT_INTERVAL);
/* unmask timeout function */
__raw_writel(BIT_TIMEOUT_AR | BIT_TIMEOUT_AW, pdata[i].regs + REG_DBG_INT_MASK);
/* enable timeout function */
__raw_writel(ENABLED, pdata[i].regs + REG_DBG_CONTROL);
pr_debug("Exynos BUS Monitor irq:%u - %s timeout enabled\n",
pdata[i].irq - 32, pdata[i].name);
}
if (pdata[i].err_rpt_enabled) {
/* enable err_rpt of s-node */
if (pdata[i].type == BUSMON_TYPE_SNODE) {
/* first of all, error clear at occurs previous */
__raw_writel(BIT_ERR_RPT_AR | BIT_ERR_RPT_AW,
pdata[i].regs + REG_DBG_ERR_RPT_OFFSET +
REG_ERR_RPT_INT_CLEAR);
/* unmask timeout function */
__raw_writel(BIT_ERR_RPT_AR | BIT_ERR_RPT_AW, pdata[i].regs +
REG_DBG_ERR_RPT_OFFSET + REG_ERR_RPT_INT_MASK);
} else {
/* first of all, error clear at occurs previous */
__raw_writel(BIT_ERR_RPT_AR | BIT_ERR_RPT_AW,
pdata[i].regs + REG_ERR_RPT_INT_CLEAR);
/* unmask timeout function */
__raw_writel(BIT_ERR_RPT_AR | BIT_ERR_RPT_AW, pdata[i].regs +
REG_ERR_RPT_INT_MASK);
}
pr_debug("Exynos BUS Monitor irq:%u - %s error reporting enabled\n",
pdata[i].irq - 32, pdata[i].name);
}
}
}
static int busmon_probe(struct platform_device *pdev)
{
struct busmon_dev *busmon;
struct busmon_panic_block *busmon_panic = NULL;
int ret, i;
u32 size;
busmon = devm_kzalloc(&pdev->dev, sizeof(struct busmon_dev), GFP_KERNEL);
if (!busmon) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"private data\n");
return -ENOMEM;
}
busmon->dev = &pdev->dev;
spin_lock_init(&busmon->ctrl_lock);
busmon->pdata = pdata_array;
busmon->masterinfo = masterinfo_array;
busmon->rpathinfo = rpathinfo_array;
for (i = 0; i < ARRAY_SIZE(pdata_array); i++) {
if (busmon->pdata[i].type == BUSMON_TYPE_SNODE)
size = SZ_16K;
else
size = SZ_256;
busmon->pdata[i].regs = devm_ioremap_nocache(&pdev->dev,
busmon->pdata[i].phy_regs, size);
if (busmon->pdata[i].regs == NULL) {
dev_err(&pdev->dev, "failed to claim register region\n");
return -ENOENT;
}
ret = devm_request_irq(&pdev->dev, busmon->pdata[i].irq + 32,
busmon_irq_handler, IRQF_GIC_MULTI_TARGET,
busmon->pdata[i].name, busmon);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
return -ENXIO;
}
}
busmon_panic = devm_kzalloc(&pdev->dev,
sizeof(struct busmon_panic_block), GFP_KERNEL);
if (!busmon_panic) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"panic handler data\n");
} else {
busmon_panic->nb_panic_block.notifier_call =
busmon_logging_panic_handler;
busmon_panic->pdev = busmon;
atomic_notifier_chain_register(&panic_notifier_list,
&busmon_panic->nb_panic_block);
}
platform_set_drvdata(pdev, busmon);
busmon_init(busmon);
dev_info(&pdev->dev, "success to probe bus monitor driver\n");
return 0;
}
static int busmon_remove(struct platform_device *pdev)
{
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int busmon_suspend(struct device *dev)
{
return 0;
}
static int busmon_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct busmon_dev *busmon = platform_get_drvdata(pdev);
busmon_init(busmon);
return 0;
}
static SIMPLE_DEV_PM_OPS(busmon_pm_ops,
busmon_suspend,
busmon_resume);
#define BUSMON_PM (busmon_pm_ops)
#else
#define BUSMON_PM NULL
#endif
static struct platform_driver exynos_busmon_driver = {
.probe = busmon_probe,
.remove = busmon_remove,
.driver = {
.name = "exynos-busmon",
.of_match_table = busmon_dt_match,
.pm = &busmon_pm_ops,
},
};
module_platform_driver(exynos_busmon_driver);
MODULE_DESCRIPTION("Samsung Exynos8890 EVT0 BUS MONITOR DRIVER");
MODULE_AUTHOR("Hosung Kim <hosung0.kim@samsung.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos-busmon");

View file

@ -0,0 +1,946 @@
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* BUS Monitor Debugging Driver for Samsung EXYNOS8890 SoC
* By Hosung Kim (hosung0.kim@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "[detect] abnormal access: " fmt
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/exynos-busmon.h>
/* S-NODE, M-NODE Common */
#define OFFSET_TIMEOUT_REG (0x2000)
#define OFFSET_REQ_R (0x0)
#define OFFSET_REQ_W (0x20)
#define OFFSET_RESP_R (0x40)
#define OFFSET_RESP_W (0x60)
#define OFFSET_ERR_REPT (0x20)
#define REG_INT_MASK (0x0)
#define REG_INT_CLR (0x4)
#define REG_INT_INFO (0x8)
#define REG_EXT_INFO_0 (0x10)
#define REG_EXT_INFO_1 (0x14)
#define REG_EXT_INFO_2 (0x18)
#define REG_DBG_CTL (0x10)
#define REG_TIMEOUT_INIT_VAL (0x14)
#define REG_R_TIMEOUT_MO (0x18)
#define REG_W_TIMEOUT_MO (0x1C)
#define BIT_ERR_CODE(x) (((x) & (0xF << 28)) >> 28)
#define BIT_ERR_OCCURRED(x) (((x) & (0x1 << 27)) >> 27)
#define BIT_ERR_VALID(x) (((x) & (0x1 << 26)) >> 26)
#define BIT_ID_VAL(x) (((x) & (0xFFFF)))
#define BIT_AXUSER(x) (((x) & (0xFFFF << 16)) >> 16)
#define S_NODE (0)
#define M_NODE (1)
#define T_S_NODE (2)
#define T_M_NODE (3)
#define PATH_DATA_CCORE (0)
#define PATH_DATA_BUS0 (1)
#define PATH_DATA_BUS1 (2)
#define PATH_SFR_CCORE (3)
#define PATH_SFR_BUS0 (4)
#define PATH_SFR_BUS1 (5)
#define PATH_NUM (6)
#define ERRCODE_SLVERR (0)
#define ERRCODE_DECERR (1)
#define ERRCODE_UNSUPORTED (2)
#define ERRCODE_POWER_DOWN (3)
#define ERRCODE_UNKNOWN_4 (4)
#define ERRCODE_UNKNOWN_5 (5)
#define ERRCODE_TIMEOUT (6)
#define TIMEOUT (0xFFFFF)
#define TIMEOUT_TEST (0x1)
#define NEED_TO_CHECK (0xCAFE)
struct busmon_rpathinfo {
unsigned int id;
char *port_name;
char *dest_name;
};
struct busmon_masterinfo {
char *port_name;
unsigned int user;
char *master_name;
unsigned int bits;
};
struct busmon_nodeinfo {
unsigned int type;
char *name;
unsigned int phy_regs;
void __iomem *regs;
unsigned int irq;
unsigned int time_val;
bool timeout_enabled;
bool err_rpt_enabled;
char *need_rpath;
char *comment;
};
/* Error Code Description */
static char *busmon_errcode[] = {
"Error Detect by the Slave(SLVERR)",
"Decode error(DECERR)",
"Unsupported transaction error",
"Power Down access error",
"Unsupported transaction",
"Unsupported transaction",
"Timeout error - response timeout",
"Invalid errorcode",
};
/* Error Code Description */
static char *busmon_sfr_errmaster[] = {
"CPU(Access from either CPU clusters)",
"TREX_CCORE(The PERI S-node from the data backbone)",
"G3D(SFR accesses from G3D)",
"CP(SFR accesses from CP)",
"CPU(IMEM access only)",
"CPU(IMEM access only)",
};
struct busmon_nodegroup {
int irq;
char *name;
struct busmon_nodeinfo *nodeinfo;
unsigned int nodesize;
};
struct busmon_platdata {
struct busmon_rpathinfo *rpathinfo;
struct busmon_masterinfo *masterinfo;
struct busmon_nodegroup nodegroup[PATH_NUM];
};
static struct busmon_rpathinfo rpathinfo[] = {
{0, "G3D1", "MEMS_0"},
{65, "CAM0", "MEMS_0"},
{81, "CAM1", "MEMS_0"},
{1, "DISP0_0", "MEMS_0"},
{17, "DISP0_1", "MEMS_0"},
{33, "DISP1_0", "MEMS_0"},
{49, "DISP1_1", "MEMS_0"},
{97, "ISP0", "MEMS_0"},
{66, "CAM0", "MEMS_0"},
{82, "CAM1", "MEMS_0"},
{2, "DISP0_0", "MEMS_0"},
{18, "DISP0_1", "MEMS_0"},
{34, "DISP1_0", "MEMS_0"},
{50, "DISP1_1", "MEMS_0"},
{98, "ISP0", "MEMS_0"},
{3, "IMEM", "MEMS_0"},
{35, "AUD", "MEMS_0"},
{67, "CORESIGHT", "MEMS_0"},
{11, "CAM1", "MEMS_0"},
{75, "FSYS1", "MEMS_0"},
{43, "ISP0", "MEMS_0"},
{19, "CP", "MEMS_0"},
{4, "FSYS0", "MEMS_0"},
{52, "MFC0", "MEMS_0"},
{68, "MFC1", "MEMS_0"},
{20, "MSCL0", "MEMS_0"},
{36, "MSCL1", "MEMS_0"},
{0, "G3D1", "MEMS_1"},
{65, "CAM0", "MEMS_1"},
{81, "CAM1", "MEMS_1"},
{1, "DISP0_0", "MEMS_1"},
{17, "DISP0_1", "MEMS_1"},
{33, "DISP1_0", "MEMS_1"},
{49, "DISP1_1", "MEMS_1"},
{97, "ISP0", "MEMS_1"},
{66, "CAM0", "MEMS_1"},
{82, "CAM1", "MEMS_1"},
{2, "DISP0_0", "MEMS_1"},
{18, "DISP0_1", "MEMS_1"},
{34, "DISP1_0", "MEMS_1"},
{50, "DISP1_1", "MEMS_1"},
{98, "ISP0", "MEMS_1"},
{3, "IMEM", "MEMS_1"},
{35, "AUD", "MEMS_1"},
{67, "CORESIGHT", "MEMS_1"},
{11, "CAM1", "MEMS_1"},
{75, "FSYS1", "MEMS_1"},
{43, "ISP0", "MEMS_1"},
{19, "CP", "MEMS_1"},
{4, "FSYS0", "MEMS_1"},
{52, "MFC0", "MEMS_1"},
{68, "MFC1", "MEMS_1"},
{20, "MSCL0", "MEMS_1"},
{36, "MSCL1", "MEMS_1"},
{0, "IMEM", "PERI"},
{8, "AUD", "PERI"},
{16, "CORESIGHT", "PERI"},
{1, "FSYS0", "PERI"},
{13, "MFC0", "PERI"},
{17, "MFC1", "PERI"},
{5, "MSCL0", "PERI"},
{9, "MSCL1", "PERI"},
{2, "CAM1", "PERI"},
{18, "FSYS1", "PERI"},
{10, "ISP0", "PERI"},
};
static struct busmon_masterinfo masterinfo[] = {
/* DISP0_0 */
{"DISP0_0", 1 << 0, "sysmmu", 0x1},
{"DISP0_0", 1 << 0, "S-IDMA0", 0x3},
{"DISP0_0", 1 << 1, "IDMA3", 0x3},
/* DISP0_1 */
{"DISP0_1", 1 << 0, "sysmmu", 0x1},
{"DISP0_1", 0 << 0, "IDMA0", 0x3},
{"DISP0_1", 1 << 1, "IDMA4", 0x3},
/* DISP1_0 */
{"DISP1_0", 1 << 0, "sysmmu", 0x1},
{"DISP1_0", 0 << 0, "IDMA1", 0x3},
{"DISP1_0", 1 << 1, "VGR0", 0x3},
/* DISP1_1 */
{"DISP1_1", 1 << 0, "sysmmu", 0x1},
{"DISP1_1", 0 << 0, "IDMA2", 0x7},
{"DISP1_1", 1 << 1, "VGR1", 0x7},
{"DISP1_1", 1 << 2, "WDMA", 0x7},
/* MFC0 */
{"MFC0", 1 << 0, "sysmmu", 0x1},
{"MFC0", 0 << 0, "MFC M0", 0x1},
/* MFC1 */
{"MFC1", 1 << 0, "sysmmu", 0x1},
{"MFC1", 0 << 0, "MFC M1", 0x1},
/* IMEM */
{"IMEM", 0 << 0, "SSS M0", 0xF},
{"IMEM", 1 << 2, "RTIC", 0xF},
{"IMEM", 1 << 3, "SSS M1", 0xF},
{"IMEM", 1 << 0, "MCOMP", 0x3},
{"IMEM", 1 << 1, "APM", 0x3},
/* G3D */
{"G3D0", 0 << 0, "G3D0", 0x1},
{"G3D1", 0 << 1, "G3D1", 0x1},
/* AUD */
{"AUD", 1 << 0, "sysmmu", 0x1},
{"AUD", 1 << 1, "DMAC", 0x7},
{"AUD", 1 << 2, "AUD CA5", 0x7},
/* MSCL0 */
{"MSCL0", 1 << 0, "sysmmu", 0x1},
{"MSCL0", 0 << 0, "JPEG", 0x3},
{"MSCL0", 1 << 1, "MSCL0", 0x3},
/* MSCL1 */
{"MSCL1", 1 << 0, "sysmmu", 0x1},
{"MSCL1", 0 << 0, "G2D", 0x3},
{"MSCL1", 1 << 1, "MSCL1", 0x3},
/* FSYS1 */
{"FSYS1", 0 << 0, "MMC51", 0x7},
{"FSYS1", 1 << 2, "UFS", 0x7},
{"FSYS1", 1 << 0, "PCIE_WIFI0", 0x3},
{"FSYS1", 1 << 1, "PCIE_WIFI1", 0x3},
/* FSYS0 */
{"FSYS0", 0 << 0, "ETR USB", 0x7},
{"FSYS0", 1 << 2, "USB30", 0x7},
{"FSYS0", 1 << 0, "UFS", 0x7},
{"FSYS0", 1 << 0 | 1 << 2, "MMC51", 0x7},
{"FSYS0", 1 << 1, "PDMA0", 0x7},
{"FSYS0", 1 << 1 | 1 << 2, "PDMA(secure)", 0x7},
{"FSYS0", 1 << 0 | 1 << 1, "USB20", 0x3},
/* CAM0 */
{"CAM0", 1 << 0, "sysmmu", 0x1},
{"CAM0", 0 << 0, "MIPI_CSIS0", 0x7},
{"CAM0", 1 << 1, "MIPI_CSIS1", 0x7},
{"CAM0", 1 << 2, "FIMC_3AA0", 0x7},
{"CAM0", 1 << 1 | 1 << 2, "FIMC_3AA1", 0x7},
/* CAM1 */
{"CAM1", 1 << 2, "sysmmu_IS_B", 0x7},
{"CAM1", 0 << 0, "MIPI_CSI2", 0x1F},
{"CAM1", 1 << 3, "ISP1", 0x1F},
{"CAM1", 1 << 4, "MIPI_CSI3", 0x1F},
{"CAM1", 1 << 0 | 1 << 2, "sysmmu_SCL", 0x7},
{"CAM1", 1 << 0, "MC_SCALER", 0x7},
{"CAM1", 1 << 0 | 1 << 1 | 1 << 2, "sysmmu_VRA", 0x7},
{"CAM1", 1 << 0 | 1 << 1, "FIMC_VRA", 0x7},
{"CAM1", 1 << 1 | 1 << 2, "sysmmu_CA7", 0x7},
{"CAM1", 1 << 1, "CA7", 0xF},
{"CAM1", 1 << 1 | 1 << 3, "PDMA_IS", 0xF},
/* ISP0 */
{"ISP0", 1 << 0, "sysmmu", 0x1},
{"ISP0", 0 << 0, "FIMC_ISP", 0x3},
{"ISP0", 1 << 1, "FIMC_TPU", 0x3},
/* CP */
{"CP", 0 << 0, "CR7M", 0x18},
{"CP", 1 << 3, "TL3MtoL2", 0x18},
{"CP", 1 << 4, "DMAC", 0x1C},
{"CP", 1 << 2 | 1 << 4, "MEMtoL2", 0x1C},
{"CP", 1 << 3 | 1 << 4, "CSXAP", 0x1F},
{"CP", 1 << 0 | 1 << 3 | 1 << 4, "LMAC", 0x1F},
{"CP", 1 << 1 | 1 << 3 | 1 << 4, "HMtoL2", 0x1F},
};
static struct busmon_nodeinfo ccore_datapath[] = {
/* Data Path - CCORE */
{M_NODE, "CCORE_G3D0_M_NODE", 0x10683000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(G3D0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "CCORE_G3D1_M_NODE", 0x10693000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(G3D1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "CCORE_IMEM_M_NODE", 0x106B3000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(IMEM Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "CCORE_AUD_M_NODE", 0x106C3000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(AUD Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "CCORE_CORESIGHT_M_NODE", 0x106D3000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(Coresight) -> Slave(MEM or PERI) ]"},
{M_NODE, "CCORE_CP_M_NODE", 0x10733000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master(CP) -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_0_M_NODE", 0x10603000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_0 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_1_M_NODE", 0x10613000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_1 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS1_0_M_NODE", 0x10623000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS1_0 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS1_1_M_NODE", 0x10633000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS1_1 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_R0_T_M_NODE", 0x10643000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_R0 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_R1_T_M_NODE", 0x10653000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_R1 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_R2_T_M_NODE", 0x10663000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_R2 -> Slave(MEM or PERI) ]"},
{T_M_NODE, "CCORE_BUS0_R3_T_M_NODE", 0x10673000, NULL, 322, 0, false, true, NULL, "DATA Path [ Master -> CCORE_BUS0_R3 -> Slave(MEM or PERI) ]"},
{S_NODE, "CCORE_MEMS_0_S_NODE", 0x10703000, NULL, 322, TIMEOUT, true, true, "MEMS_0", "DATA Path [ Master(Refer Route Information for DATA) -> Slave(MEM0) ]"},
{S_NODE, "CCORE_MEMS_1_S_NODE", 0x10713000, NULL, 322, TIMEOUT, true, true, "MEMS_1", "DATA Path [ Master(Refer Route Information for DATA) -> Slave(MEM1) ]"},
{S_NODE, "CCORE_PERI_S_NODE", 0x10723000, NULL, 322, TIMEOUT, true, true, "PERI", "DATA Path [ Master(Refer Route Information for DATA) -> Slave(PERI) ]"},
};
static struct busmon_nodeinfo bus0_datapath[] = {
/* Data Path BUS0 */
{M_NODE, "BUS0_FSYS1_M_NODE", 0x11F03000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(FSYS1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_CAM1_M_NODE", 0X11F13000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(CAM1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_ISP0_M_NODE", 0X11F23000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(ISP0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_DISP0_0_M_NODE", 0X11F33000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(DISP0_0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_DISP0_1_M_NODE", 0X11F43000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(DISP0_1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_DISP1_0_M_NODE", 0X11F53000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(DISP1_0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_DISP1_1_M_NODE", 0X11F63000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(DISP1_1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS0_CAM0_M_NODE", 0X11F73000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master(CAM0 Block) -> Slave(MEM or PERI) ]"},
{T_S_NODE, "BUS0_0_T_S_NODE", 0X11F83000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM or PERI) ]"},
{T_S_NODE, "BUS0_1_T_S_NODE", 0X11F93000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM or PERI) ]"},
{T_S_NODE, "BUS0_R0_T_S_NODE", 0X11FA3000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM0) ]"},
{T_S_NODE, "BUS0_R1_T_S_NODE", 0X11FB3000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM1) ]"},
{T_S_NODE, "BUS0_R2_T_S_NODE", 0X11FC3000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM2) ]"},
{T_S_NODE, "BUS0_R3_T_S_NODE", 0X11FD3000, NULL, 344, 0, false, true, NULL, "DATA Path [ Master -> BUS0 -> Slave(MEM3) ]"},
};
static struct busmon_nodeinfo bus1_datapath[] = {
/* Data Path - BUS1 */
{M_NODE, "BUS1_FSYS0_M_NODE", 0x11D03000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master(FSYS0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS1_MSCL0_M_NODE", 0x11D13000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master(MSCL0 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS1_MSCL1_M_NODE", 0x11D23000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master(MSCL1 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS1_MFC0_M_NODE", 0x11D33000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master(MFC01 Block) -> Slave(MEM or PERI) ]"},
{M_NODE, "BUS1_MFC1_M_NODE", 0x11D43000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master(MFC1 Block) -> Slave(MEM or PERI) ]"},
{T_S_NODE, "BUS1_0_T_S_NODE", 0x11D53000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master -> BUS1 -> Slave) ]"},
{T_S_NODE, "BUS1_1_T_S_NODE", 0x11D63000, NULL, 368, 0, false, true, NULL, "DATA Path [ Master -> BUS1 -> Slave) ]"},
};
static struct busmon_nodeinfo ccore_sfrpath[] = {
/* SFR Path CCORE */
{M_NODE, "P_CCORE_BUS_M_NODE", 0x104E3000, NULL, 323, 0, false, true, NULL, "SFR Path - Connected with CCORE"},
{S_NODE, "P_CCORE_APL_S_NODE", 0x10443000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(APL(Apollo) Block) ]"},
{S_NODE, "P_CCORE_AUD_S_NODE", 0x10493000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(AUD(Audio) Block) ]"},
{S_NODE, "P_CCORE_CCORE_SFR_S_NODE", 0x104B3000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(CCORE Block) ]"},
{S_NODE, "P_CCORE_CORESIGHT_S_NODE", 0x10423000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(Coresight Access) ]"},
{S_NODE, "P_CCORE_G3D_S_NODE", 0x104A3000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(G3D block) ]"},
{S_NODE, "P_CCORE_MIF0_S_NODE", 0x10453000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MIF0 Block) ]"},
{S_NODE, "P_CCORE_MIF1_S_NODE", 0x10463000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MIF1 Block) ]"},
{S_NODE, "P_CCORE_MIF2_S_NODE", 0x10473000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MIF2 Block) ]"},
{S_NODE, "P_CCORE_MIF3_S_NODE", 0x10483000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MIF3 Block) ]"},
{S_NODE, "P_CCORE_MNGS_NODE", 0x10433000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MNG(mongoose) Block) ]"},
{S_NODE, "P_CCORE_TREX_MIF_S_NODE", 0x104D3000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX_MIF Block) ]"},
{S_NODE, "P_CCORE_TREX_MIF_PERI_S_NODE",0x104C3000, NULL, 323, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX_MIF_PERI Block) ]"},
{T_S_NODE, "P_CCORE_BUS1_T_S_NODE", 0x10403000, NULL, 323, 0, false, true, NULL, "SFR Path [ (Master(Refer Route Information for SFR) -> BUS1 Block) ]"},
{T_S_NODE, "P_CCORE_BUS0_T_S_NODE", 0x10413000, NULL, 323, 0, false, true, NULL, "SFR Path [ (Master(Refer Route Information for SFR) -> BUS0 Block) ]"},
};
static struct busmon_nodeinfo bus1_sfrpath[] = {
/* Path BUS1 */
{T_M_NODE, "P_BUS1_CCORE_BUS1_T_M_NODE",0x11CE3000, NULL, 373, 0, false, true, NULL, "SFR Path - Connected with BUS1"},
{S_NODE, "P_BUS1_BUS1_SFR_S_NODE", 0x11C43000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(BUS1 Block) ]"},
{S_NODE, "P_BUS1_FSYS0_S_NODE", 0x11C03000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(FSYS0 Block) ]"},
{S_NODE, "P_BUS1_MFC_S_NODE", 0x11C23000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MFC Block) ]"},
{S_NODE, "P_BUS1_MSCL_S_NODE", 0x11C13000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(MSCL Block) ]"},
{S_NODE, "P_BUS1_TREX_BUS1_S_NODE", 0x11C63000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX_BUS1 Block) ]"},
{S_NODE, "P_BUS1_TREX_BUS1_PERI_S_NODE",0x11C53000, NULL, 373, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX_BUS1_PERI Block) ]"},
};
static struct busmon_nodeinfo bus0_sfrpath[] = {
/* Path BUS0 */
{T_M_NODE, "P_BUS0_CCORE_BUS0_T_M_NODE",0x11EE3000, NULL, 352, 0, false, true, NULL, "SFR Path - Connected with BUS0"},
{S_NODE, "P_BUS0_BUS0_SFR_S_NODE", 0x11E73000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(BUS0 Block) ]"},
{S_NODE, "P_BUS0_CAM0_S_NODE", 0x11E63000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(CAM0 Block) ]"},
{S_NODE, "P_BUS0_CAM1_S_NODE", 0x11E53000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(CAM1 Block) ]"},
{S_NODE, "P_BUS0_DISP_S_NODE", 0x11E33000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(DISP Block) ]"},
{S_NODE, "P_BUS0_FSYS1_S_NODE", 0x11E13000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(FSYS1 Block) ]"},
{S_NODE, "P_BUS0_PERIC0_S_NODE", 0x11E23000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(PERIC0 Block) ]"},
{S_NODE, "P_BUS0_PERIC1_S_NODE", 0x11EA3000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(PERIC1 Block) ]"},
{S_NODE, "P_BUS0_PERIS_S_NODE", 0x11E03000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(PERIS Block) ]"},
{S_NODE, "P_BUS0_TREX_BUS0_S_NODE", 0x11E93000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX_BUS0 Block) ]"},
{S_NODE, "P_BUS0_TREX_BUS0_PERI_S_NODE",0x11E83000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(TREX BUS0 PERI) ]"},
{S_NODE, "P_BUS0_VPP_S_NODE", 0x11E43000, NULL, 352, TIMEOUT, true, true, NULL, "SFR Path [ Master(Refer Route Information for SFR) -> Slave(VPP Block) ]"},
};
struct busmon_dev {
struct device *dev;
struct busmon_platdata *pdata;
struct of_device_id *match;
int irq;
int id;
void __iomem *regs;
spinlock_t ctrl_lock;
struct busmon_notifier notifier_info;
};
struct busmon_panic_block {
struct notifier_block nb_panic_block;
struct busmon_dev *pdev;
};
/* declare notifier_list */
static ATOMIC_NOTIFIER_HEAD(busmon_notifier_list);
static const struct of_device_id busmon_dt_match[] = {
{ .compatible = "samsung,exynos-busmonitor",
.data = NULL, },
{},
};
MODULE_DEVICE_TABLE(of, busmon_dt_match);
static struct busmon_rpathinfo* busmon_get_rpathinfo
(struct busmon_dev *busmon,
unsigned int id,
char *dest_name)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_rpathinfo *rpath = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(rpathinfo); i++) {
if (pdata->rpathinfo[i].id == id) {
if (dest_name && !strncmp(pdata->rpathinfo[i].dest_name,
dest_name, strlen(pdata->rpathinfo[i].dest_name))) {
rpath = &pdata->rpathinfo[i];
break;
}
}
}
return rpath;
}
static struct busmon_masterinfo* busmon_get_masterinfo
(struct busmon_dev *busmon,
char *port_name,
unsigned int user)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_masterinfo *master = NULL;
unsigned int val;
int i;
for (i = 0; i < ARRAY_SIZE(masterinfo); i++) {
if (!strncmp(pdata->masterinfo[i].port_name, port_name, strlen(port_name))) {
val = user & pdata->masterinfo[i].bits;
if (val == pdata->masterinfo[i].user) {
master = &pdata->masterinfo[i];
break;
}
}
}
return master;
}
static void busmon_report_ext_info(struct busmon_dev *busmon,
struct busmon_nodeinfo *node,
unsigned int offset)
{
unsigned int info_int, info0, info1, info2;
info_int = __raw_readl(node->regs + offset + REG_INT_INFO);
info0 = __raw_readl(node->regs + offset + REG_EXT_INFO_0);
info1 = __raw_readl(node->regs + offset + REG_EXT_INFO_1);
info2 = __raw_readl(node->regs + offset + REG_EXT_INFO_2);
pr_info("\n--------------------------------------------------------------------------------\n"
"Detail NODE information for debuggging\n\n"
"Node Name : %s [address: 0x%08X]\n"
"INTERRUPT_INFO : 0x%08X\n"
"EXT_INFO_0 : 0x%08X\n"
"EXT_INFO_1 : 0x%08X\n"
"EXT_INFO_2 : 0x%08X\n"
"--------------------------------------------------------------------------------\n",
node->name, node->phy_regs + offset,
info_int,
info0,
info1,
info2);
}
static void busmon_report_route(struct busmon_dev *busmon,
struct busmon_nodeinfo *node,
unsigned int offset)
{
struct busmon_masterinfo *master = NULL;
struct busmon_rpathinfo *rpath = NULL;
unsigned int val, id, user, addr;
val = __raw_readl(node->regs + offset + REG_INT_INFO);
id = BIT_ID_VAL(val);
val = __raw_readl(node->regs + offset + REG_EXT_INFO_2);
user = BIT_AXUSER(val);
rpath = busmon_get_rpathinfo(busmon, id, node->need_rpath);
if (!rpath) {
pr_info("failed to get route path - %s, id:%x\n",
node->need_rpath, id);
} else {
val = __raw_readl(node->regs + offset + REG_EXT_INFO_2);
user = BIT_AXUSER(val);
master = busmon_get_masterinfo(busmon, rpath->port_name, user);
if (!master) {
pr_info("failed to get master IP with "
"port:%s, id:%x, user:%x\n",
rpath->port_name, id, user);
} else {
addr = __raw_readl(node->regs + offset + REG_EXT_INFO_0);
pr_info("\n--------------------------------------------------------------------------------\n"
"Route Information for DATA transaction\n\n"
"Master IP:%s's %s ---> Target:%s(addr: 0x%08X)\n",
master->port_name, master->master_name, rpath->dest_name, addr);
}
}
}
static void busmon_report_info(struct busmon_dev *busmon,
struct busmon_nodegroup *group,
struct busmon_nodeinfo *node,
unsigned int offset)
{
unsigned int val, errcode, addr;
bool read = false, req;
val = __raw_readl(node->regs + offset + REG_INT_INFO);
if (!BIT_ERR_VALID(val)) {
if (!strncmp(node->name, "P_CCORE_BUS_M_NODE", strlen(node->name))) {
unsigned int master;
master = BIT_ID_VAL(val) & 0x7;
pr_info("\n--------------------------------------------------------------------------------\n"
"Route Information for SFR transaction\n\n"
"Master IP : %s\n"
"AxID : 0x%X\n"
"--------------------------------------------------------------------------------\n",
busmon_sfr_errmaster[master],
BIT_ID_VAL(val));
} else {
pr_info("no information, %s/offset:%x is stopover, "
"check other node\n", node->name, offset);
}
return;
}
errcode = BIT_ERR_CODE(val);
addr = __raw_readl(node->regs + offset + REG_EXT_INFO_0);
switch(offset) {
case OFFSET_REQ_R:
read = true;
/* fall down */
case OFFSET_REQ_W:
req = true;
if (node->type == S_NODE) {
/* Only S-Node is able to make log to registers */
pr_info("invalid logged, see more following information\n");
break;
}
break;
case OFFSET_RESP_R:
read = true;
/* fall down */
case OFFSET_RESP_W:
req = false;
if (node->type != S_NODE) {
/* Only S-Node is able to make log to registers */
pr_info("invalid logged, see more following information\n");
break;
}
if (node->need_rpath) {
/* Data Path */
busmon_report_route(busmon, node, offset);
}
break;
default:
pr_info("Unknown Error - offset:%u\n", offset);
break;
}
pr_info("\n--------------------------------------------------------------------------------\n"
"Transaction information => Fail to access %s %s \n\n"
"reason : %s\n"
"Target address : 0x%08X\n"
"error type : %s\n"
"type : %s\n",
read ? "reading" : "wrting",
req ? "request" : "response",
node->comment,
addr,
busmon_errcode[errcode],
group->name);
/* 0x6 means timeout */
if (node->type == S_NODE && errcode == 0x6) {
unsigned int mo_offset = read ? REG_R_TIMEOUT_MO : REG_W_TIMEOUT_MO;
pr_info("\n--------------------------------------------------------------------------------\n"
"additional information for timeout error\n\n"
"Timeout MO val : 0x%08X\n", __raw_readl(node->regs + offset + mo_offset));
}
/* report extention raw information of register */
busmon_report_ext_info(busmon, node, offset);
}
static int busmon_parse_info(struct busmon_dev *busmon,
struct busmon_nodegroup *group,
bool clear)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_nodeinfo *node = NULL;
unsigned int val, offset;
unsigned long flags;
int ret = 0;
int i, j, k;
spin_lock_irqsave(&busmon->ctrl_lock, flags);
if (group) {
/* Processing only this group */
node = group->nodeinfo;
for (i = 0; i < group->nodesize; i++) {
for (j = 0; j < 4; j++) {
offset = j * OFFSET_ERR_REPT;
/* Check Request information */
val = __raw_readl(node[i].regs + offset + REG_INT_INFO);
if (BIT_ERR_OCCURRED(val)) {
/* This node occurs the error */
busmon_report_info(busmon, group, &node[i], offset);
if (clear)
__raw_writel(1,
node[i].regs + offset + REG_INT_CLR);
ret = 1;
}
}
}
} else {
/* Processing all group & nodes */
for (i = 0; i < ARRAY_SIZE(pdata->nodegroup); i++) {
group = &pdata->nodegroup[i];
node = group->nodeinfo;
for (j = 0; j < group->nodesize; j++) {
for (k = 0; k < 4; k++) {
offset = k * OFFSET_ERR_REPT;
/* Check Request information */
val = __raw_readl(node[j].regs + offset + REG_INT_INFO);
if (BIT_ERR_OCCURRED(val)) {
/* This node occurs the error */
busmon_report_info(busmon, group, &node[j], offset);
if (clear)
__raw_writel(1,
node[j].regs + offset + REG_INT_CLR);
ret = 1;
}
}
}
}
}
spin_unlock_irqrestore(&busmon->ctrl_lock, flags);
return ret;
}
static irqreturn_t busmon_irq_handler(int irq, void *data)
{
struct busmon_dev *busmon = (struct busmon_dev *)data;
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_nodegroup *group = NULL;
int i, ret;
/* Check error has been logged */
pr_info("%d interrupt detected\n", (irq - 32));
/* Search busmon group */
for (i = 0; i < ARRAY_SIZE(pdata->nodegroup); i++) {
if ((irq - 32) == pdata->nodegroup[i].irq) {
pr_info("%s group, %d interrupt occurrs \n",
pdata->nodegroup[i].name, irq - 32);
group = &pdata->nodegroup[i];
break;
}
}
if (group) {
ret = busmon_parse_info(busmon, group, true);
if (!ret) {
pr_info("%s can't find the error - interrupt %d\n", pdata->nodegroup[i].name, irq - 32);
}
}
else {
pr_info("%s can't find interrupt %d number \n", pdata->nodegroup[i].name, irq - 32);
}
#if 0
/* Disable to call notifier_call_chain of busmon in Exynos8890 EVT0 */
atomic_notifier_call_chain(&busmon_notifier_list, 0, &busmon->notifier_info);
panic("Error detected by BUS monitor.");
#endif
return IRQ_HANDLED;
}
void busmon_notifier_chain_register(struct notifier_block *block)
{
atomic_notifier_chain_register(&busmon_notifier_list, block);
}
static int busmon_logging_panic_handler(struct notifier_block *nb,
unsigned long l, void *buf)
{
struct busmon_panic_block *busmon_panic = (struct busmon_panic_block *)nb;
struct busmon_dev *busmon = busmon_panic->pdev;
int ret;
if (!IS_ERR_OR_NULL(busmon)) {
/* Check error has been logged */
ret = busmon_parse_info(busmon, NULL, false);
if (!ret)
pr_info("No found error in %s\n", __func__);
else
pr_info("Found errors in %s\n", __func__);
}
return 0;
}
static void busmon_init(struct busmon_dev *busmon)
{
struct busmon_platdata *pdata = busmon->pdata;
struct busmon_nodeinfo *node;
unsigned int offset;
int i, j;
for (i = 0; i < ARRAY_SIZE(pdata->nodegroup); i++) {
node = pdata->nodegroup[i].nodeinfo;
for (j = 0; j < pdata->nodegroup[i].nodesize; j++) {
if (node[j].type == S_NODE && node[j].timeout_enabled) {
offset = OFFSET_TIMEOUT_REG;
/* Enable Timeout setting */
__raw_writel(1, node[j].regs + offset + REG_DBG_CTL);
/* set timeout interval value */
__raw_writel(node[j].time_val,
node[j].regs + offset + REG_TIMEOUT_INIT_VAL);
pr_debug("Exynos BUS Monitor irq:%u - %s timeout enabled\n",
node[j].irq, node[j].name);
}
if (node[j].err_rpt_enabled) {
/* clear previous interrupt of req_read */
offset = OFFSET_REQ_R;
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(1, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of req_write */
offset = OFFSET_REQ_W;
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(1, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of response_read */
offset = OFFSET_RESP_R;
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(1, node[j].regs + offset + REG_INT_MASK);
/* clear previous interrupt of response_write */
offset = OFFSET_RESP_W;
__raw_writel(1, node[j].regs + offset + REG_INT_CLR);
/* enable interrupt */
__raw_writel(1, node[j].regs + offset + REG_INT_MASK);
pr_debug("Exynos BUS Monitor irq:%u - %s error reporting enabled\n",
node[j].irq, node[j].name);
}
}
}
}
static int busmon_probe(struct platform_device *pdev)
{
struct busmon_dev *busmon;
struct busmon_panic_block *busmon_panic = NULL;
struct busmon_platdata *pdata;
struct busmon_nodeinfo *node;
char *dev_name;
int ret, i, j;
busmon = devm_kzalloc(&pdev->dev, sizeof(struct busmon_dev), GFP_KERNEL);
if (!busmon) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"private data\n");
return -ENOMEM;
}
busmon->dev = &pdev->dev;
spin_lock_init(&busmon->ctrl_lock);
pdata = devm_kzalloc(&pdev->dev, sizeof(struct busmon_platdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"platform data\n");
return -ENOMEM;
}
busmon->pdata = pdata;
busmon->pdata->masterinfo = masterinfo;
busmon->pdata->rpathinfo = rpathinfo;
for (i = 0; i < ARRAY_SIZE(pdata->nodegroup); i++)
{
switch (i) {
case PATH_DATA_CCORE:
node = ccore_datapath;
dev_name = "DATA_PATH_CCORE";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(ccore_datapath);
break;
case PATH_DATA_BUS0:
node = bus0_datapath;
dev_name = "DATA_PATH_BUS0";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(bus0_datapath);
break;
case PATH_DATA_BUS1:
node = bus1_datapath;
dev_name = "DATA_PATH_BUS1";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(bus1_datapath);
break;
case PATH_SFR_CCORE:
node = ccore_sfrpath;
dev_name = "SFR_PATH_CCORE";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(ccore_sfrpath);
break;
case PATH_SFR_BUS0:
node = bus0_sfrpath;
dev_name = "SFR_PATH_BUS0";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(bus0_sfrpath);
break;
case PATH_SFR_BUS1:
node = bus1_sfrpath;
dev_name = "SFR_PATH_BUS1";
pdata->nodegroup[i].nodesize = ARRAY_SIZE(bus1_sfrpath);
break;
default:
break;
};
pdata->nodegroup[i].nodeinfo = node;
pdata->nodegroup[i].irq = node[0].irq;
pdata->nodegroup[i].name = dev_name;
ret = devm_request_irq(&pdev->dev, pdata->nodegroup[i].irq + 32,
busmon_irq_handler, 0, //IRQF_GIC_MULTI_TARGET,
dev_name, busmon);
for (j = 0; j < pdata->nodegroup[i].nodesize; j++) {
node[j].regs = devm_ioremap_nocache(&pdev->dev, node[j].phy_regs, SZ_16K);
if (node[j].regs == NULL) {
dev_err(&pdev->dev, "failed to claim register region - %s\n", dev_name);
return -ENOENT;
}
}
}
busmon_panic = devm_kzalloc(&pdev->dev,
sizeof(struct busmon_panic_block), GFP_KERNEL);
if (!busmon_panic) {
dev_err(&pdev->dev, "failed to allocate memory for driver's "
"panic handler data\n");
} else {
busmon_panic->nb_panic_block.notifier_call =
busmon_logging_panic_handler;
busmon_panic->pdev = busmon;
atomic_notifier_chain_register(&panic_notifier_list,
&busmon_panic->nb_panic_block);
}
platform_set_drvdata(pdev, busmon);
busmon_init(busmon);
dev_info(&pdev->dev, "success to probe bus monitor driver\n");
return 0;
}
static int busmon_remove(struct platform_device *pdev)
{
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int busmon_suspend(struct device *dev)
{
return 0;
}
static int busmon_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct busmon_dev *busmon = platform_get_drvdata(pdev);
busmon_init(busmon);
return 0;
}
static SIMPLE_DEV_PM_OPS(busmon_pm_ops,
busmon_suspend,
busmon_resume);
#define BUSMON_PM (busmon_pm_ops)
#else
#define BUSMON_PM NULL
#endif
static struct platform_driver exynos_busmon_driver = {
.probe = busmon_probe,
.remove = busmon_remove,
.driver = {
.name = "exynos-busmon",
.of_match_table = busmon_dt_match,
.pm = &busmon_pm_ops,
},
};
module_platform_driver(exynos_busmon_driver);
MODULE_DESCRIPTION("Samsung Exynos8890 BUS MONITOR DRIVER");
MODULE_AUTHOR("Hosung Kim <hosung0.kim@samsung.com");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:exynos-busmon");

View file

@ -0,0 +1,99 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/coresight.h>
struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node)
{
int i, ret = 0;
uint32_t outports_len = 0;
struct device_node *child_node;
struct coresight_platform_data *pdata;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32(node, "coresight-id", &pdata->id);
if (ret)
return ERR_PTR(ret);
ret = of_property_read_string(node, "coresight-name", &pdata->name);
if (ret)
return ERR_PTR(ret);
ret = of_property_read_u32(node, "coresight-nr-inports",
&pdata->nr_inports);
if (ret)
return ERR_PTR(ret);
pdata->nr_outports = 0;
if (of_get_property(node, "coresight-outports", &outports_len))
pdata->nr_outports = outports_len/sizeof(uint32_t);
if (pdata->nr_outports) {
pdata->outports = devm_kzalloc(dev, pdata->nr_outports *
sizeof(*pdata->outports),
GFP_KERNEL);
if (!pdata->outports)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32_array(node, "coresight-outports",
(u32 *)pdata->outports,
pdata->nr_outports);
if (ret)
return ERR_PTR(ret);
pdata->child_ids = devm_kzalloc(dev, pdata->nr_outports *
sizeof(*pdata->child_ids),
GFP_KERNEL);
if (!pdata->child_ids)
return ERR_PTR(-ENOMEM);
for (i = 0; i < pdata->nr_outports; i++) {
child_node = of_parse_phandle(node,
"coresight-child-list",
i);
if (!child_node)
return ERR_PTR(-EINVAL);
ret = of_property_read_u32(child_node, "coresight-id",
(u32 *)&pdata->child_ids[i]);
of_node_put(child_node);
if (ret)
return ERR_PTR(ret);
}
pdata->child_ports = devm_kzalloc(dev, pdata->nr_outports *
sizeof(*pdata->child_ports),
GFP_KERNEL);
if (!pdata->child_ports)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32_array(node, "coresight-child-ports",
(u32 *)pdata->child_ports,
pdata->nr_outports);
if (ret)
return ERR_PTR(ret);
}
pdata->default_sink = of_property_read_bool(node,
"coresight-default-sink");
return pdata;
}
EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);

View file

@ -0,0 +1,19 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __OF_CORESIGHT_H
#define __OF_CORESIGHT_H
extern struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, struct device_node *node);
#endif