Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

34
arch/arc/kernel/Makefile Normal file
View file

@ -0,0 +1,34 @@
#
# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
# Pass UTS_MACHINE for user_regset definition
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
obj-y += devtree.o
obj-$(CONFIG_MODULES) += arcksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
CFLAGS_fpu.o += -mdpfp
ifdef CONFIG_ARC_DW2_UNWIND
CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
obj-y += ctx_sw.o
else
obj-y += ctx_sw_asm.o
endif
extra-y := vmlinux.lds head.o

View file

@ -0,0 +1,58 @@
/*
* arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
*
* Allows Linux userland access to host in absence of any peripherals.
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h> /* file_operations */
#include <linux/miscdevice.h>
#include <linux/mm.h> /* VM_IO */
#include <linux/module.h>
#include <linux/uaccess.h>
static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
pr_warn("Hostlink buffer mmap ERROR\n");
return -EAGAIN;
}
return 0;
}
static long arc_hl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* we only support, returning the physical addr to mmap in user space */
put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
return 0;
}
static const struct file_operations arc_hl_fops = {
.unlocked_ioctl = arc_hl_ioctl,
.mmap = arc_hl_mmap,
};
static struct miscdevice arc_hl_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "hostlink",
.fops = &arc_hl_fops
};
static int __init arc_hl_init(void)
{
pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
return misc_register(&arc_hl_dev);
}
module_init(arc_hl_init);

View file

@ -0,0 +1,56 @@
/*
* arcksyms.c - Exporting symbols not exportable from their own sources
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
/* libgcc functions, not part of kernel sources */
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __divsf3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __cmpdi2(void);
extern void __fixunsdfsi(void);
extern void __muldf3(void);
extern void __divdf3(void);
extern void __floatunsidf(void);
extern void __floatunsisf(void);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__divsf3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__cmpdi2);
EXPORT_SYMBOL(__fixunsdfsi);
EXPORT_SYMBOL(__muldf3);
EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf);
/* ARC optimised assembler routines */
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strlen);

View file

@ -0,0 +1,63 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <linux/ptrace.h>
#include <asm/hardirq.h>
#include <asm/page.h>
int main(void)
{
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
BLANK();
DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_event, offsetof(struct pt_regs, event));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
DEFINE(PT_r2, offsetof(struct pt_regs, r2));
DEFINE(PT_r3, offsetof(struct pt_regs, r3));
DEFINE(PT_r4, offsetof(struct pt_regs, r4));
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
return 0;
}

21
arch/arc/kernel/clk.c Normal file
View file

@ -0,0 +1,21 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/clk.h>
unsigned long core_freq = 80000000;
/*
* As of now we default to device-tree provided clock
* In future we can determine this in early boot
*/
int arc_set_core_freq(unsigned long freq)
{
core_freq = freq;
return 0;
}

116
arch/arc/kernel/ctx_sw.c Normal file
View file

@ -0,0 +1,116 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Aug 2009
* -"C" version of lowest level context switch asm macro called by schedular
* gcc doesn't generate the dward CFI info for hand written asm, hence can't
* backtrace out of it (e.g. tasks sleeping in kernel).
* So we cheat a bit by writing almost similar code in inline-asm.
* -This is a hacky way of doing things, but there is no other simple way.
* I don't want/intend to extend unwinding code to understand raw asm
*/
#include <asm/asm-offsets.h>
#include <linux/sched.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
struct task_struct *__sched
__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
{
unsigned int tmp;
unsigned int prev = (unsigned int)prev_task;
unsigned int next = (unsigned int)next_task;
__asm__ __volatile__(
/* FP/BLINK save generated by gcc (standard function prologue */
"st.a r13, [sp, -4] \n\t"
"st.a r14, [sp, -4] \n\t"
"st.a r15, [sp, -4] \n\t"
"st.a r16, [sp, -4] \n\t"
"st.a r17, [sp, -4] \n\t"
"st.a r18, [sp, -4] \n\t"
"st.a r19, [sp, -4] \n\t"
"st.a r20, [sp, -4] \n\t"
"st.a r21, [sp, -4] \n\t"
"st.a r22, [sp, -4] \n\t"
"st.a r23, [sp, -4] \n\t"
"st.a r24, [sp, -4] \n\t"
#ifndef CONFIG_ARC_CURR_IN_REG
"st.a r25, [sp, -4] \n\t"
#else
"sub sp, sp, 4 \n\t" /* usual r25 placeholder */
#endif
/* set ksp of outgoing task in tsk->thread.ksp */
#if KSP_WORD_OFF <= 255
"st.as sp, [%3, %1] \n\t"
#else
/*
* Workaround for NR_CPUS=4k
* %1 is bigger than 255 (S9 offset for st.as)
*/
"add2 r24, %3, %1 \n\t"
"st sp, [r24] \n\t"
#endif
"sync \n\t"
/*
* setup _current_task with incoming tsk.
* optionally, set r25 to that as well
* For SMP extra work to get to &_current_task[cpu]
* (open coded SET_CURR_TASK_ON_CPU)
*/
#ifndef CONFIG_SMP
"st %2, [@_current_task] \n\t"
#else
"lr r24, [identity] \n\t"
"lsr r24, r24, 8 \n\t"
"bmsk r24, r24, 7 \n\t"
"add2 r24, @_current_task, r24 \n\t"
"st %2, [r24] \n\t"
#endif
#ifdef CONFIG_ARC_CURR_IN_REG
"mov r25, %2 \n\t"
#endif
/* get ksp of incoming task from tsk->thread.ksp */
"ld.as sp, [%2, %1] \n\t"
/* start loading it's CALLEE reg file */
#ifndef CONFIG_ARC_CURR_IN_REG
"ld.ab r25, [sp, 4] \n\t"
#else
"add sp, sp, 4 \n\t"
#endif
"ld.ab r24, [sp, 4] \n\t"
"ld.ab r23, [sp, 4] \n\t"
"ld.ab r22, [sp, 4] \n\t"
"ld.ab r21, [sp, 4] \n\t"
"ld.ab r20, [sp, 4] \n\t"
"ld.ab r19, [sp, 4] \n\t"
"ld.ab r18, [sp, 4] \n\t"
"ld.ab r17, [sp, 4] \n\t"
"ld.ab r16, [sp, 4] \n\t"
"ld.ab r15, [sp, 4] \n\t"
"ld.ab r14, [sp, 4] \n\t"
"ld.ab r13, [sp, 4] \n\t"
/* last (ret value) = prev : although for ARC it mov r0, r0 */
"mov %0, %3 \n\t"
/* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp)
: "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
: "blink"
);
return (struct task_struct *)tmp;
}

View file

@ -0,0 +1,65 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Aug 2009
* -Moved core context switch macro out of entry.S into this file.
* -This is the more "natural" hand written assembler
*/
#include <linux/linkage.h>
#include <asm/entry.h> /* For the SAVE_* macros */
#include <asm/asm-offsets.h>
#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
;################### Low Level Context Switch ##########################
.section .sched.text,"ax",@progbits
.align 4
.global __switch_to
.type __switch_to, @function
__switch_to:
/* Save regs on kernel mode stack of task */
st.a blink, [sp, -4]
st.a fp, [sp, -4]
SAVE_CALLEE_SAVED_KERNEL
/* Save the now KSP in task->thread.ksp */
#if KSP_WORD_OFF <= 255
st.as sp, [r0, KSP_WORD_OFF]
#else
/* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */
add2 r24, r0, KSP_WORD_OFF
st sp, [r24]
#endif
/*
* Return last task in r0 (return reg)
* On ARC, Return reg = First Arg reg = r0.
* Since we already have last task in r0,
* don't need to do anything special to return it
*/
/* hardware memory barrier */
sync
/*
* switch to new task, contained in r1
* Temp reg r3 is required to get the ptr to store val
*/
SET_CURR_TASK_ON_CPU r1, r3
/* reload SP with kernel mode stack pointer in task->thread.ksp */
ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
/* restore the registers */
RESTORE_CALLEE_SAVED_KERNEL
ld.ab fp, [sp, 4]
ld.ab blink, [sp, 4]
j [blink]
END(__switch_to)

60
arch/arc/kernel/devtree.c Normal file
View file

@ -0,0 +1,60 @@
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* Based on reduced version of METAG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
static const void * __init arch_get_next_mach(const char *const **match)
{
static const struct machine_desc *mdesc = __arch_info_begin;
const struct machine_desc *m = mdesc;
if (m >= __arch_info_end)
return NULL;
mdesc++;
*match = m->dt_compat;
return m;
}
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
*
* If a dtb was passed to the kernel, then use it to choose the correct
* machine_desc and to setup the system.
*/
const struct machine_desc * __init setup_machine_fdt(void *dt)
{
const struct machine_desc *mdesc;
unsigned long dt_root;
const void *clk;
int len;
if (!early_init_dt_scan(dt))
return NULL;
mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
if (!mdesc)
machine_halt();
dt_root = of_get_flat_dt_root();
clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
if (clk)
arc_set_core_freq(of_read_ulong(clk, len/4));
return mdesc;
}

538
arch/arc/kernel/disasm.c Normal file
View file

@ -0,0 +1,538 @@
/*
* several functions that help interpret ARC instructions
* used for unaligned accesses, kprobes and kgdb
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/disasm.h>
#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_EMUL_UNALIGNED) || \
defined(CONFIG_KPROBES)
/* disasm_instr: Analyses instruction at addr, stores
* findings in *state
*/
void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
int userspace, struct pt_regs *regs, struct callee_regs *cregs)
{
int fieldA = 0;
int fieldC = 0, fieldCisReg = 0;
uint16_t word1 = 0, word0 = 0;
int subopcode, is_linked, op_format;
uint16_t *ins_ptr;
uint16_t ins_buf[4];
int bytes_not_copied = 0;
memset(state, 0, sizeof(struct disasm_state));
/* This fetches the upper part of the 32 bit instruction
* in both the cases of Little Endian or Big Endian configurations. */
if (userspace) {
bytes_not_copied = copy_from_user(ins_buf,
(const void __user *) addr, 8);
if (bytes_not_copied > 6)
goto fault;
ins_ptr = ins_buf;
} else {
ins_ptr = (uint16_t *) addr;
}
word1 = *((uint16_t *)addr);
state->major_opcode = (word1 >> 11) & 0x1F;
/* Check if the instruction is 32 bit or 16 bit instruction */
if (state->major_opcode < 0x0B) {
if (bytes_not_copied > 4)
goto fault;
state->instr_len = 4;
word0 = *((uint16_t *)(addr+2));
state->words[0] = (word1 << 16) | word0;
} else {
state->instr_len = 2;
state->words[0] = word1;
}
/* Read the second word in case of limm */
word1 = *((uint16_t *)(addr + state->instr_len));
word0 = *((uint16_t *)(addr + state->instr_len + 2));
state->words[1] = (word1 << 16) | word0;
switch (state->major_opcode) {
case op_Bcc:
state->is_branch = 1;
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 16)) ?
FIELD_s25(state->words[0]) :
FIELD_s21(state->words[0]);
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->flow = direct_jump;
break;
case op_BLcc:
if (IS_BIT(state->words[0], 16)) {
/* Branch and Link*/
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 17)) ?
(FIELD_s25(state->words[0]) & ~0x3) :
FIELD_s21(state->words[0]);
state->flow = direct_call;
} else {
/*Branch On Compare */
fieldA = FIELD_s9(state->words[0]) & ~0x3;
state->flow = direct_jump;
}
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->is_branch = 1;
break;
case op_LD: /* LD<zz> a,[b,s9] */
state->write = 0;
state->di = BITS(state->words[0], 11, 11);
if (state->di)
break;
state->x = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 7, 8);
state->aa = BITS(state->words[0], 9, 10);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->aa = 0;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs, cregs);
}
state->src2 = FIELD_s9(state->words[0]);
state->dest = FIELD_A(state->words[0]);
state->pref = (state->dest == REG_LIMM);
break;
case op_ST:
state->write = 1;
state->di = BITS(state->words[0], 5, 5);
if (state->di)
break;
state->aa = BITS(state->words[0], 3, 4);
state->zz = BITS(state->words[0], 1, 2);
state->src1 = FIELD_C(state->words[0]);
if (state->src1 == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->src1, regs, cregs);
}
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->aa = 0;
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->wb_reg, regs, cregs);
}
state->src3 = FIELD_s9(state->words[0]);
break;
case op_MAJOR_4:
subopcode = MINOR_OPCODE(state->words[0]);
switch (subopcode) {
case 32: /* Jcc */
case 33: /* Jcc.D */
case 34: /* JLcc */
case 35: /* JLcc.D */
is_linked = 0;
if (subopcode == 33 || subopcode == 35)
state->delay_slot = 1;
if (subopcode == 34 || subopcode == 35)
is_linked = 1;
fieldCisReg = 0;
op_format = BITS(state->words[0], 22, 23);
if (op_format == 0 || ((op_format == 3) &&
(!IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
if (fieldC == REG_LIMM) {
fieldC = state->words[1];
state->instr_len += 4;
} else {
fieldCisReg = 1;
}
} else if (op_format == 1 || ((op_format == 3)
&& (IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
} else {
/* op_format == 2 */
fieldC = FIELD_s12(state->words[0]);
}
if (!fieldCisReg) {
state->target = fieldC;
state->flow = is_linked ?
direct_call : direct_jump;
} else {
state->target = get_reg(fieldC, regs, cregs);
state->flow = is_linked ?
indirect_call : indirect_jump;
}
state->is_branch = 1;
break;
case 40: /* LPcc */
if (BITS(state->words[0], 22, 23) == 3) {
/* Conditional LPcc u7 */
fieldC = FIELD_C(state->words[0]);
fieldC = fieldC << 1;
fieldC += (addr & ~0x03);
state->is_branch = 1;
state->flow = direct_jump;
state->target = fieldC;
}
/* For Unconditional lp, next pc is the fall through
* which is updated */
break;
case 48 ... 55: /* LD a,[b,c] */
state->di = BITS(state->words[0], 15, 15);
if (state->di)
break;
state->x = BITS(state->words[0], 16, 16);
state->zz = BITS(state->words[0], 17, 18);
state->aa = BITS(state->words[0], 22, 23);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs,
cregs);
}
state->src2 = FIELD_C(state->words[0]);
if (state->src2 == REG_LIMM) {
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->src2, regs,
cregs);
}
state->dest = FIELD_A(state->words[0]);
if (state->dest == REG_LIMM)
state->pref = 1;
break;
case 10: /* MOV */
/* still need to check for limm to extract instr len */
/* MOV is special case because it only takes 2 args */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if (FIELD_C(state->words[0]) == REG_LIMM)
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
}
break;
default:
/* Not a Load, Jump or Loop instruction */
/* still need to check for limm to extract instr len */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if ((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM)))
state->instr_len += 4;
break;
}
break;
}
break;
/* 16 Bit Instructions */
case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
state->zz = BITS(state->words[0], 3, 4);
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->dest = FIELD_S_A(state->words[0]);
break;
case op_ADD_MOV_CMP:
/* check for limm, ignore mov_s h,b (== mov_s 0,b) */
if ((BITS(state->words[0], 3, 4) < 3) &&
(FIELD_S_H(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case op_S:
subopcode = BITS(state->words[0], 5, 7);
switch (subopcode) {
case 0: /* j_s */
case 1: /* j_s.d */
case 2: /* jl_s */
case 3: /* jl_s.d */
state->target = get_reg(FIELD_S_B(state->words[0]),
regs, cregs);
state->delay_slot = subopcode & 1;
state->flow = (subopcode >= 2) ?
direct_call : indirect_jump;
break;
case 7:
switch (BITS(state->words[0], 8, 10)) {
case 4: /* jeq_s [blink] */
case 5: /* jne_s [blink] */
case 6: /* j_s [blink] */
case 7: /* j_s.d [blink] */
state->delay_slot = (subopcode == 7);
state->flow = indirect_jump;
state->target = get_reg(31, regs, cregs);
default:
break;
}
default:
break;
}
break;
case op_LD_S: /* LD_S c, [b, u7] */
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_LDB_S:
case op_STB_S:
/* no further handling required as byte accesses should not
* cause an unaligned access exception */
state->zz = 1;
break;
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
/* intentional fall-through */
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u6(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_ST_S: /* ST_S c, [b, u7] */
state->write = 1;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
break;
case op_STW_S: /* STW_S c,[b,u6] */
state->write = 1;
state->zz = 2;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u6(state->words[0]);
break;
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not
* cause unaliged exception anyway */
state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5);
if (state->zz)
break; /* byte accesses should not come here */
if (!state->write) {
state->src1 = get_reg(28, regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
} else {
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
cregs);
state->src2 = get_reg(28, regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
}
break;
case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
/* note: ADD_S r0, gp, s11 is ignored */
state->zz = BITS(state->words[0], 9, 10);
state->src1 = get_reg(26, regs, cregs);
state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
FIELD_S_s11(state->words[0]);
state->dest = 0;
break;
case op_Pcl: /* LD_S b,[pcl,u10] */
state->src1 = regs->ret & ~3;
state->src2 = FIELD_S_u10(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
break;
case op_BR_S:
state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_B_S:
fieldA = (BITS(state->words[0], 9, 10) == 3) ?
FIELD_S_s7(state->words[0]) :
FIELD_S_s10(state->words[0]);
state->target = fieldA + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_BL_S:
state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
state->flow = direct_call;
state->is_branch = 1;
break;
default:
break;
}
if (bytes_not_copied <= (8 - state->instr_len))
return;
fault: state->fault = 1;
}
long __kprobes get_reg(int reg, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
if (reg <= 12) {
p = &regs->r0;
return p[-reg];
}
if (cregs && (reg <= 25)) {
p = &cregs->r13;
return p[13-reg];
}
if (reg == 26)
return regs->r26;
if (reg == 27)
return regs->fp;
if (reg == 28)
return regs->sp;
if (reg == 31)
return regs->blink;
return 0;
}
void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
switch (reg) {
case 0 ... 12:
p = &regs->r0;
p[-reg] = val;
break;
case 13 ... 25:
if (cregs) {
p = &cregs->r13;
p[13-reg] = val;
}
break;
case 26:
regs->r26 = val;
break;
case 27:
regs->fp = val;
break;
case 28:
regs->sp = val;
break;
case 31:
regs->blink = val;
break;
default:
break;
}
}
/*
* Disassembles the insn at @pc and sets @next_pc to next PC (which could be
* @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
*
* If @pc is a branch
* -@tgt_if_br is set to branch target.
* -If branch has delay slot, @next_pc updated with actual next PC.
*/
int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
struct callee_regs *cregs,
unsigned long *next_pc, unsigned long *tgt_if_br)
{
struct disasm_state instr;
memset(&instr, 0, sizeof(struct disasm_state));
disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len;
/* Instruction with possible two targets branch, jump and loop */
if (instr.is_branch)
*tgt_if_br = instr.target;
/* For the instructions with delay slots, the fall through is the
* instruction following the instruction in delay slot.
*/
if (instr.delay_slot) {
struct disasm_state instr_d;
disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
*next_pc += instr_d.instr_len;
}
/* Zero Overhead Loop - end of the loop */
if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
&& (regs->lp_count > 1)) {
*next_pc = regs->lp_start;
}
return instr.is_branch;
}
#endif /* CONFIG_KGDB || CONFIG_ARC_EMUL_UNALIGNED || CONFIG_KPROBES */

773
arch/arc/kernel/entry.S Normal file
View file

@ -0,0 +1,773 @@
/*
* Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -Userspace unaligned access emulation
*
* vineetg: Feb 2011 (ptrace low level code fixes)
* -traced syscall return code (r0) was not saved into pt_regs for restoring
* into user reg-file when traded task rets to user space.
* -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
* were not invoking post-syscall trace hook (jumping directly into
* ret_from_system_call)
*
* vineetg: Nov 2010:
* -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
* -To maintain the slot size of 8 bytes/vector, added nop, which is
* not executed at runtime.
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
* -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
* need ptregs anymore
*
* Vineetg: Oct 2009
* -In a rare scenario, Process gets a Priv-V exception and gets scheduled
* out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
* active (AE bit enabled). This causes a double fault for a subseq valid
* exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
* Instr Error could also cause similar scenario, so same there as well.
*
* Vineetg: March 2009 (Supporting 2 levels of Interrupts)
*
* Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
* Normally CPU does this automatically, however when doing FAKE rtie,
* we need to explicitly do this. The problem in macros
* FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
* was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
* setting it and not clearing it clears ZOL context
*
* Vineetg: May 16th, 2008
* - r25 now contains the Current Task when in kernel
*
* Vineetg: Dec 22, 2007
* Minor Surgery of Low Level ISR to make it SMP safe
* - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
* - _current_task is made an array of NR_CPUS
* - Access of _current_task wrapped inside a macro so that if hardware
* team agrees for a dedicated reg, no other code is touched
*
* Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
*/
/*------------------------------------------------------------------
* Function ABI
*------------------------------------------------------------------
*
* Arguments r0 - r7
* Caller Saved Registers r0 - r12
* Callee Saved Registers r13- r25
* Global Pointer (gp) r26
* Frame Pointer (fp) r27
* Stack Pointer (sp) r28
* Interrupt link register (ilink1) r29
* Interrupt link register (ilink2) r30
* Branch link register (blink) r31
*------------------------------------------------------------------
*/
.cpu A7
;############################ Vector Table #################################
.macro VECTOR lbl
#if 1 /* Just in case, build breaks */
j \lbl
#else
b \lbl
nop
#endif
.endm
.section .vector, "ax",@progbits
.align 4
/* Each entry in the vector table must occupy 2 words. Since it is a jump
* across sections (.vector to .text) we are gauranteed that 'j somewhere'
* will use the 'j limm' form of the intrsuction as long as somewhere is in
* a section other than .vector.
*/
; ********* Critical System Events **********************
VECTOR res_service ; 0x0, Restart Vector (0x0)
VECTOR mem_service ; 0x8, Mem exception (0x1)
VECTOR instr_service ; 0x10, Instrn Error (0x2)
; ******************** Device ISRs **********************
#ifdef CONFIG_ARC_IRQ3_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
VECTOR handle_interrupt_level1
#ifdef CONFIG_ARC_IRQ5_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
VECTOR handle_interrupt_level2
#else
VECTOR handle_interrupt_level1
#endif
.rept 25
VECTOR handle_interrupt_level1 ; Other devices
.endr
/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
; ******************** Exceptions **********************
VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
; or Misaligned Access
VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
VECTOR EV_Trap ; 0x128, Trap exception (0x25)
VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
.rept 24
VECTOR reserved ; Reserved Exceptions
.endr
#include <linux/linkage.h> /* {EXTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */
#include <asm/errno.h>
#include <asm/arcregs.h>
#include <asm/irqflags.h>
;##################### Scratch Mem for IRQ stack switching #############
ARCFP_DATA int1_saved_reg
.align 32
.type int1_saved_reg, @object
.size int1_saved_reg, 4
int1_saved_reg:
.zero 4
/* Each Interrupt level needs its own scratch */
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
ARCFP_DATA int2_saved_reg
.type int2_saved_reg, @object
.size int2_saved_reg, 4
int2_saved_reg:
.zero 4
#endif
; ---------------------------------------------
.section .text, "ax",@progbits
res_service: ; processor restart
flag 0x1 ; not implemented
nop
nop
reserved: ; processor restart
rtie ; jump to processor initializations
;##################### Interrupt Handling ##############################
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
; ---------------------------------------------
; Level 2 ISR: Can interrupt a Level 1 ISR
; ---------------------------------------------
ENTRY(handle_interrupt_level2)
; TODO-vineetg for SMP this wont work
; free up r9 as scratchpad
st r9, [@int2_saved_reg]
;Which mode (user/kernel) was the system in when intr occured
lr r9, [status32_l2]
SWITCH_TO_KERNEL_STK
SAVE_ALL_INT2
;------------------------------------------------------
; if L2 IRQ interrupted a L1 ISR, disable preemption
;------------------------------------------------------
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
; A1 is set in status32_l2
; bump thread_info->preempt_count (Disable preemption)
GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
add r9, r9, 1
st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
1:
;------------------------------------------------------
; setup params for Linux common ISR and invoke it
;------------------------------------------------------
lr r0, [icause2]
and r0, r0, 0x1f
bl.d @arch_do_IRQ
mov r1, sp
mov r8,0x2
sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
b ret_from_exception
END(handle_interrupt_level2)
#endif
; ---------------------------------------------
; Level 1 ISR
; ---------------------------------------------
ENTRY(handle_interrupt_level1)
/* free up r9 as scratchpad */
#ifdef CONFIG_SMP
sr r9, [ARC_REG_SCRATCH_DATA0]
#else
st r9, [@int1_saved_reg]
#endif
;Which mode (user/kernel) was the system in when intr occured
lr r9, [status32_l1]
SWITCH_TO_KERNEL_STK
SAVE_ALL_INT1
lr r0, [icause1]
and r0, r0, 0x1f
#ifdef CONFIG_TRACE_IRQFLAGS
; icause1 needs to be read early, before calling tracing, which
; can clobber scratch regs, hence use of stack to stash it
push r0
TRACE_ASM_IRQ_DISABLE
pop r0
#endif
bl.d @arch_do_IRQ
mov r1, sp
mov r8,0x1
sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
b ret_from_exception
END(handle_interrupt_level1)
;################### Non TLB Exception Handling #############################
; ---------------------------------------------
; Instruction Error Exception Handler
; ---------------------------------------------
ENTRY(instr_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_insterror_or_kprobe
b ret_from_exception
END(instr_service)
; ---------------------------------------------
; Memory Error Exception Handler
; ---------------------------------------------
ENTRY(mem_service)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_memory_error
b ret_from_exception
END(mem_service)
; ---------------------------------------------
; Machine Check Exception Handler
; ---------------------------------------------
ENTRY(EV_MachineCheck)
EXCEPTION_PROLOGUE
lr r2, [ecr]
lr r0, [efa]
mov r1, sp
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
bl do_tlb_overlap_fault
b ret_from_exception
1:
; DEAD END: can't do much, display Regs and HALT
SAVE_CALLEE_SAVED_USER
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
j do_machine_check_fault
END(EV_MachineCheck)
; ---------------------------------------------
; Protection Violation Exception Handler
; ---------------------------------------------
ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
;---------(3) Save some more regs-----------------
; vineetg: Mar 6th: Random Seg Fault issue #1
; ecr and efa were not saved in case an Intr sneaks in
; after fake rtie
lr r2, [ecr]
lr r0, [efa] ; Faulting Data address
; --------(4) Return from CPU Exception Mode ---------
; Fake a rtie, but rtie to next label
; That way, subsequently, do_page_fault ( ) executes in pure kernel
; mode with further Exceptions enabled
FAKE_RET_FROM_EXCPN r9
mov r1, sp
;------ (5) Type of Protection Violation? ----------
;
; ProtV Hardware Exception is triggered for Access Faults of 2 types
; -Access Violaton : 00_23_(00|01|02|03)_00
; x r w r+w
; -Unaligned Access : 00_23_04_00
;
bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
;========= (6a) Access Violation Processing ========
bl do_page_fault
b ret_from_exception
;========== (6b) Non aligned access ============
4:
SAVE_CALLEE_SAVED_USER
mov r2, sp ; callee_regs
bl do_misaligned_access
; TBD: optimize - do this only if a callee reg was involved
; either a dst of emulated LD/ST or src with address-writeback
RESTORE_CALLEE_SAVED_USER
b ret_from_exception
END(EV_TLBProtV)
; ---------------------------------------------
; Privilege Violation Exception Handler
; ---------------------------------------------
ENTRY(EV_PrivilegeV)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_privilege_fault
b ret_from_exception
END(EV_PrivilegeV)
; ---------------------------------------------
; Extension Instruction Exception Handler
; ---------------------------------------------
ENTRY(EV_Extension)
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_extension_fault
b ret_from_exception
END(EV_Extension)
;######################### System Call Tracing #########################
tracesys:
; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_entry
; Tracing code now returns the syscall num (orig or modif)
mov r8, r0
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls
mov.hi r0, -ENOSYS
bhi tracesys_exit
; Restore the sys-call args. Mere invocation of the hook abv could have
; clobbered them (since they are in scratch regs). The tracer could also
; have deliberately changed the syscall args: r0-r7
ld r0, [sp, PT_r0]
ld r1, [sp, PT_r1]
ld r2, [sp, PT_r2]
ld r3, [sp, PT_r3]
ld r4, [sp, PT_r4]
ld r5, [sp, PT_r5]
ld r6, [sp, PT_r6]
ld r7, [sp, PT_r7]
ld.as r9, [sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above
;################### Break Point TRAP ##########################
; ======= (5b) Trap is due to Break-Point =========
trap_with_param:
; stop_pc info by gdb needs this info
lr r0, [efa]
mov r1, sp
; Now that we have read EFA, it is safe to do "fake" rtie
; and get out of CPU exception mode
FAKE_RET_FROM_EXCPN r11
; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12
SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->pc
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
; Call the trap handler
bl do_non_swi_trap
; unwind stack to discard Callee saved Regs
DISCARD_CALLEE_SAVED_USER
b ret_from_exception
;##################### Trap Handling ##############################
;
; EV_Trap caused by TRAP_S and TRAP0 instructions.
;------------------------------------------------------------------
; (1) System Calls
; :parameters in r0-r7.
; :r8 has the system call number
; (2) Break Points
;------------------------------------------------------------------
ENTRY(EV_Trap)
EXCEPTION_PROLOGUE
;------- (4) What caused the Trap --------------
lr r12, [ecr]
bmsk.f 0, r12, 7
bnz trap_with_param
; ======= (5a) Trap is due to System Call ========
; Before doing anything, return from CPU Exception Mode
FAKE_RET_FROM_EXCPN r11
; If syscall tracing ongoing, invoke pre-pos-hooks
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys ; this never comes back
;============ This is normal System Call case ==========
; Sys-call num shd not exceed the total system calls avail
cmp r8, NR_syscalls
mov.hi r0, -ENOSYS
bhi ret_from_system_call
; Offset into the syscall_table and call handler
ld.as r9,[sys_call_table, r8]
jl [r9] ; Entry into Sys Call Handler
; fall through to ret_from_system_call
END(EV_Trap)
ENTRY(ret_from_system_call)
st r0, [sp, PT_r0] ; sys call return value in pt_regs
; fall through yet again to ret_from_exception
;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
;
; If ret to user mode do we need to handle signals, schedule() et al.
ENTRY(ret_from_exception)
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
bbit0 r8, STATUS_U_BIT, resume_kernel_mode
; Before returning to User mode check-for-and-complete any pending work
; such as rescheduling/signal-delivery etc.
resume_user_mode_begin:
; Disable IRQs to ensures that chk for pending work itself is atomic
; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
; interim IRQ).
IRQ_DISABLE r10
; Fast Path return to user mode if no pending work
GET_CURR_THR_INFO_FLAGS r9
and.f 0, r9, _TIF_WORK_MASK
bz restore_regs
; --- (Slow Path #1) task preemption ---
bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
b @schedule ; BTST+Bnz causes relo error in link
.Lchk_pend_signals:
IRQ_ENABLE r10
; --- (Slow Path #2) pending signal ---
mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
GET_CURR_THR_INFO_FLAGS r9
bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
; in pt_reg since the "C" ABI (kernel code) will automatically
; save/restore callee-saved regs.
;
; However, here we need to explicitly save callee regs because
; (i) If this signal causes coredump - full regfile needed
; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
; tracer might call PEEKUSR(CALLEE reg)
;
; NOTE: SP will grow up by size of CALLEE Reg-File
SAVE_CALLEE_SAVED_USER ; clobbers r12
; save location of saved Callee Regs @ thread_struct->callee
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
bl @do_signal
; Ideally we want to discard the Callee reg above, however if this was
; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
RESTORE_CALLEE_SAVED_USER
b resume_user_mode_begin ; loop back to start of U mode ret
; --- (Slow Path #3) notify_resume ---
.Lchk_notify_resume:
btst r9, TIF_NOTIFY_RESUME
blnz @do_notify_resume
b resume_user_mode_begin ; unconditionally back to U mode ret chks
; for single exit point from this block
resume_kernel_mode:
; Disable Interrupts from this point on
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
IRQ_DISABLE r9
#ifdef CONFIG_PREEMPT
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
brne r8, 0, restore_regs
; check if this task's NEED_RESCHED flag set
ld r9, [r10, THREAD_INFO_FLAGS]
bbit0 r9, TIF_NEED_RESCHED, restore_regs
; Invoke PREEMPTION
bl preempt_schedule_irq
; preempt_schedule_irq() always returns with IRQ disabled
#endif
; fall through
;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
;
; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
; IRQ shd definitely not happen between now and rtie
; All 2 entry points to here already disable interrupts
restore_regs :
TRACE_ASM_IRQ_ENABLE
lr r10, [status32]
; Restore REG File. In case multiple Events outstanding,
; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
; Note that we use realtime STATUS32 (not pt_regs->status32) to
; decide that.
; if Returning from Exception
bbit0 r10, STATUS_AE_BIT, not_exception
RESTORE_ALL_SYS
rtie
; Not Exception so maybe Interrupts (Level 1 or 2)
not_exception:
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
; Level 2 interrupt return Path - from hardware standpoint
bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
;------------------------------------------------------------------
; However the context returning might not have taken L2 intr itself
; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
; Special considerations needed for the context which took L2 intr
ld r9, [sp, PT_event] ; Ensure this is L2 intr context
brne r9, event_IRQ2, 149f
;------------------------------------------------------------------
; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier
; so that sched doesn't move to new task, causing L1 to be delayed
; undeterministically. Now that we've achieved that, let's reset
; things to what they were, before returning from L2 context
;----------------------------------------------------------------
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
; decrement thread_info->preempt_count (re-enable preemption)
GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
; paranoid check, given A1 was active when A2 happened, preempt count
; must not be 0 because we would have incremented it.
; If this does happen we simply HALT as it means a BUG !!!
cmp r9, 0
bnz 2f
flag 1
2:
sub r9, r9, 1
st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
149:
;return from level 2
RESTORE_ALL_INT2
debug_marker_l2:
rtie
not_level2_interrupt:
#endif
bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
;return from level 1
RESTORE_ALL_INT1
debug_marker_l1:
rtie
not_level1_interrupt:
;this case is for syscalls or Exceptions (with fake rtie)
RESTORE_ALL_SYS
debug_marker_syscall:
rtie
END(ret_from_exception)
ENTRY(ret_from_fork)
; when the forked child comes here from the __switch_to function
; r0 has the last task pointer.
; put last task in scheduler queue
bl @schedule_tail
; If kernel thread, jump to its entry-point
ld r9, [sp, PT_status32]
brne r9, 0, 1f
jl.d [r14]
mov r0, r13 ; arg to payload
1:
; special case of kernel_thread entry point returning back due to
; kernel_execve() - pretend return from syscall to ret to userland
b ret_from_exception
END(ret_from_fork)
;################### Special Sys Call Wrappers ##########################
ENTRY(sys_clone_wrapper)
SAVE_CALLEE_SAVED_USER
bl @sys_clone
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
END(sys_clone_wrapper)
#ifdef CONFIG_ARC_DW2_UNWIND
; Workaround for bug 94179 (STAR ):
; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
; section (.debug_frame) as loadable. So we force it here.
; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
; would not work after a clean build due to kernel build system dependencies.
.section .debug_frame, "wa",@progbits
#endif

55
arch/arc/kernel/fpu.c Normal file
View file

@ -0,0 +1,55 @@
/*
* fpu.c - save/restore of Floating Point Unit Registers on task switch
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <asm/switch_to.h>
/*
* To save/restore FPU regs, simplest scheme would use LR/SR insns.
* However since SR serializes the pipeline, an alternate "hack" can be used
* which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
*
* Store to 64bit dpfp1 reg from a pair of core regs:
* dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
*
* Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
* mov_s r3, 0
* daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
* dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
* dexcl1 0, r1, r0 ; restore dpfp1 to orig value
*
* However we can tweak the read, so that read-out of outgoing task's FPU regs
* and write of incoming task's regs happen in one shot. So all the work is
* done before context switch
*/
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
{
unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
const unsigned int zero = 0;
__asm__ __volatile__(
"daddh11 %0, %2, %2\n"
"dexcl1 %1, %3, %4\n"
: "=&r" (*(saveto + 1)), /* early clobber must here */
"=&r" (*(saveto))
: "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
);
__asm__ __volatile__(
"daddh22 %0, %2, %2\n"
"dexcl2 %1, %3, %4\n"
: "=&r"(*(saveto + 3)), /* early clobber must here */
"=&r"(*(saveto + 2))
: "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
);
}

130
arch/arc/kernel/head.S Normal file
View file

@ -0,0 +1,130 @@
/*
* ARC CPU startup Code
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: Dec 2007
* -Check if we are running on Simulator or on real hardware
* to skip certain things during boot on simulator
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
.macro CPU_EARLY_SETUP
; Setting up Vectror Table (in case exception happens in early boot
sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
; Disable I-cache/D-cache if kernel so configured
lr r5, [ARC_REG_IC_BCR]
breq r5, 0, 1f ; I$ doesn't exist
lr r5, [ARC_REG_IC_CTRL]
#ifdef CONFIG_ARC_HAS_ICACHE
bclr r5, r5, 0 ; 0 - Enable, 1 is Disable
#else
bset r5, r5, 0 ; I$ exists, but is not used
#endif
sr r5, [ARC_REG_IC_CTRL]
1:
lr r5, [ARC_REG_DC_BCR]
breq r5, 0, 1f ; D$ doesn't exist
lr r5, [ARC_REG_DC_CTRL]
bclr r5, r5, 6 ; Invalidate (discard w/o wback)
#ifdef CONFIG_ARC_HAS_DCACHE
bclr r5, r5, 0 ; Enable (+Inv)
#else
bset r5, r5, 0 ; Disable (+Inv)
#endif
sr r5, [ARC_REG_DC_CTRL]
1:
.endm
.cpu A7
.section .init.text, "ax",@progbits
.type stext, @function
.globl stext
stext:
;-------------------------------------------------------------------
; Don't clobber r0-r2 yet. It might have bootloader provided info
;-------------------------------------------------------------------
CPU_EARLY_SETUP
#ifdef CONFIG_SMP
; Ensure Boot (Master) proceeds. Others wait in platform dependent way
; IDENTITY Reg [ 3 2 1 0 ]
; (cpu-id) ^^^ => Zero for UP ARC700
; => #Core-ID if SMP (Master 0)
; Note that non-boot CPUs might not land here if halt-on-reset and
; instead breath life from @first_lines_of_secondary, but we still
; need to make sure only boot cpu takes this path.
GET_CPU_ID r5
cmp r5, 0
mov.ne r0, r5
jne arc_platform_smp_wait_to_boot
#endif
; Clear BSS before updating any globals
; XXX: use ZOL here
mov r5, __bss_start
sub r6, __bss_stop, r5
lsr.f lp_count, r6, 2
lpnz 1f
st.ab 0, [r5, 4]
1:
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (board identity, unused as of now
; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in setup_arch()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task
SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
; setup stack (fp, sp)
mov fp, 0
; tsk->thread_info is really a PAGE, whose bottom hoists stack
GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
j start_kernel ; "C" entry point
#ifdef CONFIG_SMP
;----------------------------------------------------------------
; First lines of code run by secondary before jumping to 'C'
;----------------------------------------------------------------
.section .text, "ax",@progbits
.type first_lines_of_secondary, @function
.globl first_lines_of_secondary
first_lines_of_secondary:
CPU_EARLY_SETUP
; setup per-cpu idle task as "current" on this CPU
ld r0, [@secondary_idle_tsk]
SET_CURR_TASK_ON_CPU r0, r1
; setup stack (fp, sp)
mov fp, 0
; set it's stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary
#endif

284
arch/arc/kernel/irq.c Normal file
View file

@ -0,0 +1,284 @@
/*
* Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include "../../drivers/irqchip/irqchip.h"
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/mach_desc.h>
/*
* Early Hardware specific Interrupt setup
* -Platform independent, needed for each CPU (not foldable into init_IRQ)
* -Called very early (start_kernel -> setup_arch -> setup_processor)
*
* what it does ?
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
void arc_init_IRQ(void)
{
int level_mask = 0;
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
/*
* Write to register, even if no LV2 IRQs configured to reset it
* in case bootloader had mucked with it
*/
write_aux_reg(AUX_IRQ_LEV, level_mask);
if (level_mask)
pr_info("Level-2 interrupts bitset %x\n", level_mask);
}
/*
* ARC700 core includes a simple on-chip intc supporting
* -per IRQ enable/disable
* -2 levels of interrupts (high/low)
* -all interrupts being level triggered
*
* To reduce platform code, we assume all IRQs directly hooked-up into intc.
* Platforms with external intc, hence cascaded IRQs, are free to over-ride
* below, per IRQ.
*/
static void arc_irq_mask(struct irq_data *data)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << data->irq);
write_aux_reg(AUX_IENABLE, ienb);
}
static void arc_irq_unmask(struct irq_data *data)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb |= (1 << data->irq);
write_aux_reg(AUX_IENABLE, ienb);
}
static struct irq_chip onchip_intc = {
.name = "ARC In-core Intc",
.irq_mask = arc_irq_mask,
.irq_unmask = arc_irq_unmask,
};
static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
if (irq == TIMER0_IRQ)
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
else
irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
return 0;
}
static const struct irq_domain_ops arc_intc_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = arc_intc_domain_map,
};
static struct irq_domain *root_domain;
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
if (parent)
panic("DeviceTree incore intc not a root irq controller\n");
root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
&arc_intc_domain_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
/* with this we don't need to export root_domain */
irq_set_default_host(root_domain);
return 0;
}
IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
/*
* Late Interrupt system init called from start_kernel for Boot CPU only
*
* Since slab must already be initialized, platforms can start doing any
* needed request_irq( )s
*/
void __init init_IRQ(void)
{
/* Any external intc can be setup here */
if (machine_desc->init_irq)
machine_desc->init_irq();
/* process the entire interrupt tree in one go */
irqchip_init();
#ifdef CONFIG_SMP
/* Master CPU can initialize it's side of IPI */
if (machine_desc->init_smp)
machine_desc->init_smp(smp_processor_id());
#endif
}
/*
* "C" Entry point for any ARC ISR, called from low level vector handler
* @irq is the vector number read from ICAUSE reg of on-chip intc
*/
void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}
void arc_request_percpu_irq(int irq, int cpu,
irqreturn_t (*isr)(int irq, void *dev),
const char *irq_nm,
void *percpu_dev)
{
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
/*
* These 2 calls are essential to making percpu IRQ APIs work
* Ideally these details could be hidden in irq chip map function
* but the issue is IPIs IRQs being static (non-DT) and platform
* specific, so we can't identify them there.
*/
irq_set_percpu_devid(irq);
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
if (rc)
panic("Percpu IRQ request failed for %d\n", irq);
}
enable_percpu_irq(irq, 0);
}
/*
* arch_local_irq_enable - Enable interrupts.
*
* 1. Explicitly called to re-enable interrupts
* 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
* which maybe in hard ISR itself
*
* Semantics of this function change depending on where it is called from:
*
* -If called from hard-ISR, it must not invert interrupt priorities
* e.g. suppose TIMER is high priority (Level 2) IRQ
* Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
* Here local_irq_enable( ) shd not re-enable lower priority interrupts
* -If called from soft-ISR, it must re-enable all interrupts
* soft ISR are low prioity jobs which can be very slow, thus all IRQs
* must be enabled while they run.
* Now hardware context wise we may still be in L2 ISR (not done rtie)
* still we must re-enable both L1 and L2 IRQs
* Another twist is prev scenario with flow being
* L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
* here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
* over-written (this is deficiency in ARC700 Interrupt mechanism)
*/
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
void arch_local_irq_enable(void)
{
unsigned long flags;
flags = arch_local_save_flags();
/* Allow both L1 and L2 at the onset */
flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
/* Called from hard ISR (between irq_enter and irq_exit) */
if (in_irq()) {
/* If in L2 ISR, don't re-enable any further IRQs as this can
* cause IRQ priorities to get upside down. e.g. it could allow
* L1 be taken while in L2 hard ISR which is wrong not only in
* theory, it can also cause the dreaded L1-L2-L1 scenario
*/
if (flags & STATUS_A2_MASK)
flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
/* Even if in L1 ISR, allowe Higher prio L2 IRQs */
else if (flags & STATUS_A1_MASK)
flags &= ~(STATUS_E1_MASK);
}
/* called from soft IRQ, ideally we want to re-enable all levels */
else if (in_softirq()) {
/* However if this is case of L1 interrupted by L2,
* re-enabling both may cause whaco L1-L2-L1 scenario
* because ARC700 allows level 1 to interrupt an active L2 ISR
* Thus we disable both
* However some code, executing in soft ISR wants some IRQs
* to be enabled so we re-enable L2 only
*
* How do we determine L1 intr by L2
* -A2 is set (means in L2 ISR)
* -E1 is set in this ISR's pt_regs->status32 which is
* saved copy of status32_l2 when l2 ISR happened
*/
struct pt_regs *pt = get_irq_regs();
if ((flags & STATUS_A2_MASK) && pt &&
(pt->status32 & STATUS_A1_MASK)) {
/*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
flags &= ~(STATUS_E1_MASK);
}
}
arch_local_irq_restore(flags);
}
#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
/*
* Simpler version for only 1 level of interrupt
* Here we only Worry about Level 1 Bits
*/
void arch_local_irq_enable(void)
{
unsigned long flags;
/*
* ARC IDE Drivers tries to re-enable interrupts from hard-isr
* context which is simply wrong
*/
if (in_irq()) {
WARN_ONCE(1, "IRQ enabled from hard-isr");
return;
}
flags = arch_local_save_flags();
flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
arch_local_irq_restore(flags);
}
#endif
EXPORT_SYMBOL(arch_local_irq_enable);

213
arch/arc/kernel/kgdb.c Normal file
View file

@ -0,0 +1,213 @@
/*
* kgdb support for ARC
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kgdb.h>
#include <linux/sched.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>
static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
for (regno = 27; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
gdb_regs[_FP] = kernel_regs->fp;
gdb_regs[__SP] = kernel_regs->sp;
gdb_regs[_BLINK] = kernel_regs->blink;
gdb_regs[_RET] = kernel_regs->ret;
gdb_regs[_STATUS32] = kernel_regs->status32;
gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
gdb_regs[_LP_END] = kernel_regs->lp_end;
gdb_regs[_LP_START] = kernel_regs->lp_start;
gdb_regs[_BTA] = kernel_regs->bta;
gdb_regs[_STOP_PC] = kernel_regs->ret;
}
static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
kernel_regs->fp = gdb_regs[_FP];
kernel_regs->sp = gdb_regs[__SP];
kernel_regs->blink = gdb_regs[_BLINK];
kernel_regs->ret = gdb_regs[_RET];
kernel_regs->status32 = gdb_regs[_STATUS32];
kernel_regs->lp_count = gdb_regs[_LP_COUNT];
kernel_regs->lp_end = gdb_regs[_LP_END];
kernel_regs->lp_start = gdb_regs[_LP_START];
kernel_regs->bta = gdb_regs[_BTA];
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
if (task)
to_gdb_regs(gdb_regs, task_pt_regs(task),
(struct callee_regs *) task->thread.callee_reg);
}
struct single_step_data_t {
uint16_t opcode[2];
unsigned long address[2];
int is_branch;
int armed;
} single_step_data;
static void undo_single_step(struct pt_regs *regs)
{
if (single_step_data.armed) {
int i;
for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
memcpy((void *) single_step_data.address[i],
&single_step_data.opcode[i],
BREAK_INSTR_SIZE);
flush_icache_range(single_step_data.address[i],
single_step_data.address[i] +
BREAK_INSTR_SIZE);
}
single_step_data.armed = 0;
}
}
static void place_trap(unsigned long address, void *save)
{
memcpy(save, (void *) address, BREAK_INSTR_SIZE);
memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
BREAK_INSTR_SIZE);
flush_icache_range(address, address + BREAK_INSTR_SIZE);
}
static void do_single_step(struct pt_regs *regs)
{
single_step_data.is_branch = disasm_next_pc((unsigned long)
regs->ret, regs, (struct callee_regs *)
current->thread.callee_reg,
&single_step_data.address[0],
&single_step_data.address[1]);
place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
if (single_step_data.is_branch) {
place_trap(single_step_data.address[1],
&single_step_data.opcode[1]);
}
single_step_data.armed++;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *regs)
{
unsigned long addr;
char *ptr;
undo_single_step(regs);
switch (remcomInBuffer[0]) {
case 's':
case 'c':
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcomInBuffer[0] == 's') {
do_single_step(regs);
atomic_set(&kgdb_cpu_doing_single_step,
smp_processor_id());
}
return 0;
}
return -1;
}
int kgdb_arch_init(void)
{
single_step_data.armed = 0;
return 0;
}
void kgdb_trap(struct pt_regs *regs)
{
/* trap_s 3 is used for breakpoints that overwrite existing
* instructions, while trap_s 4 is used for compiled breakpoints.
*
* with trap_s 3 breakpoints the original instruction needs to be
* restored and continuation needs to start at the location of the
* breakpoint.
*
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
if (regs->ecr_param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
instruction_pointer(regs) = ip;
}
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), NULL);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = {0x78, 0x7e},
#else
.gdb_bpt_instr = {0x7e, 0x78},
#endif
};

523
arch/arc/kernel/kprobes.c Normal file
View file

@ -0,0 +1,523 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/current.h>
#include <asm/disasm.h>
#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
(unsigned long)current_thread_info() + THREAD_SIZE - (addr))
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* Attempt to probe at unaligned address */
if ((unsigned long)p->addr & 0x01)
return -EINVAL;
/* Address should not be in exception handling code */
p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
p->opcode = *p->addr;
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = UNIMP_S_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
arch_disarm_kprobe(p);
/* Can we remove the kprobe in the middle of kprobe handling? */
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
struct pt_regs *regs)
{
/* Remove the trap instructions inserted for single step and
* restore the original instructions
*/
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
return;
}
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long next_pc;
unsigned long tgt_if_br = 0;
int is_branch;
unsigned long bta;
/* Copy the opcode back to the kprobe location and execute the
* instruction. Because of this we will not be able to get into the
* same kprobe until this kprobe is done
*/
*(p->addr) = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
/* Now we insert the trap at the next location after this instruction to
* single step. If it is a branch we insert the trap at possible branch
* targets
*/
bta = regs->bta;
if (regs->status32 & 0x40) {
/* We are in a delay slot with the branch taken */
next_pc = bta & ~0x01;
if (!p->ainsn.is_short) {
if (bta & 0x01)
regs->blink += 2;
else {
/* Branch not taken */
next_pc += 2;
/* next pc is taken from bta after executing the
* delay slot instruction
*/
regs->bta += 2;
}
}
is_branch = 0;
} else
is_branch =
disasm_next_pc((unsigned long)p->addr, regs,
(struct callee_regs *) current->thread.callee_reg,
&next_pc, &tgt_if_br);
p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
*(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
if (is_branch) {
p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
*(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
}
}
int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((unsigned long *)addr);
if (p) {
/*
* We have reentered the kprobe_handler, since another kprobe
* was hit while within the handler, we save the original
* kprobes and single step on the instruction of the new probe
* without calling any user handlers to avoid recursive
* kprobes.
*/
if (kprobe_running()) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
* non-zero - which is expected from setjmp_pre_handler for
* jprobe, we return without single stepping and leave that to
* the break-handler which is invoked by a kprobe from
* jprobe_return
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
}
return 1;
} else if (kprobe_running()) {
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
}
}
/* no_kprobe: */
preempt_enable_no_resched();
return 0;
}
static int __kprobes arc_post_kprobe_handler(unsigned long addr,
struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
resume_execution(cur, addr, regs);
/* Rearm the kprobe */
arch_arm_kprobe(cur);
/*
* When we return from trap instruction we go to the next instruction
* We restored the actual instruction in resume_exectuiont and we to
* return to the same address and execute it
*/
regs->ret = addr;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
/*
* Fault can be for the instruction being single stepped or for the
* pre/post handlers in the module.
* This is applicable for applications like user probes, where we have the
* probe in user space and the handlers in the kernel
*/
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single stepped
* caused the fault. We reset the current kprobe and allow the
* exception handler as if it is regular exception. In our
* case it doesn't matter because the system will be halted
*/
resume_execution(cur, (unsigned long)cur->addr, regs);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We are here because the instructions in the pre/post handler
* caused the fault.
*/
/* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
*/
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
unsigned long addr = args->err;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_IERR:
if (arc_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
case DIE_TRAP:
if (arc_post_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long sp_addr = regs->sp;
kcb->jprobe_saved_regs = *regs;
memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
regs->ret = (unsigned long)(jp->entry);
return 1;
}
void __kprobes jprobe_return(void)
{
__asm__ __volatile__("unimp_s");
return;
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long sp_addr;
*regs = kcb->jprobe_saved_regs;
sp_addr = regs->sp;
memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
preempt_enable_no_resched();
return 1;
}
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n" "nop\n");
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->blink;
/* Replace the return addr with trampoline addr */
regs->blink = (unsigned long)&kretprobe_trampoline;
}
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address) {
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->ret = orig_ret_address;
reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
/* By returning a non zero value, we are telling the kprobe handler
* that we don't want the post_handler to run
*/
return 1;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
/* Registering the trampoline code for the kret probe */
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
return 1;
return 0;
}
void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
{
notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
}

145
arch/arc/kernel/module.c Normal file
View file

@ -0,0 +1,145 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <asm/unwind.h>
static inline void arc_write_me(unsigned short *addr, unsigned long value)
{
*addr = (value & 0xffff0000) >> 16;
*(addr + 1) = (value & 0xffff);
}
/* ARC specific section quirks - before relocation loop in generic loader
*
* For dwarf unwinding out of modules, this needs to
* 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
* -fasynchronous-unwind-tables it doesn't).
* 2. Since we are iterating thru sec hdr tbl anyways, make a note of
* the exact section index, for later use.
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int i;
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
sechdrs[i].sh_flags |= SHF_ALLOC;
mod->arch.unw_sec_idx = i;
break;
}
}
#endif
return 0;
}
void module_arch_cleanup(struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
if (mod->arch.unw_info)
unwind_remove_table(mod->arch.unw_info, 0);
#endif
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex, /* sec index for sym tbl */
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
int i, n;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation;
Elf32_Addr location;
Elf32_Addr sec_to_patch;
int relo_type;
sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\n========== Module Sym reloc ===========================\n");
pr_debug("Section to fixup %x\n", sec_to_patch);
pr_debug("=========================================================\n");
pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
/* This is where to make the change */
location = sec_to_patch + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
relocation = sym_entry->st_value + rel_entry[i].r_addend;
pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation,
strtab + sym_entry->st_name);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
* global data access again is abs 32 bit.
* Both of these are handled by same relocation type
*/
relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
if (likely(R_ARC_32_ME == relo_type))
arc_write_me((unsigned short *)location, relocation);
else if (R_ARC_32 == relo_type)
*((Elf32_Addr *) location) = relocation;
else
goto relo_err;
}
return 0;
relo_err:
pr_err("%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel_entry[i].r_info));
return -ENOEXEC;
}
/* Just before lift off: After sections have been relocated, we add the
* dwarf section to unwinder table pool
* This couldn't be done in module_frob_arch_sections() because
* relocations had not been applied by then
*/
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
void *unw;
int unwsec = mod->arch.unw_sec_idx;
if (unwsec) {
unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
sechdrs[unwsec].sh_size);
mod->arch.unw_info = unw;
}
#endif
return 0;
}

View file

@ -0,0 +1,323 @@
/*
* Linux performance counter support for ARC700 series
*
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*
* This code is inspired by the perf support of various other architectures.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <asm/arcregs.h>
struct arc_pmu {
struct pmu pmu;
int counter_size; /* in bits */
int n_counters;
unsigned long used_mask[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS)];
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
};
/* read counter #idx; note that counter# != event# on ARC! */
static uint64_t arc_pmu_read_counter(int idx)
{
uint32_t tmp;
uint64_t result;
/*
* ARC supports making 'snapshots' of the counters, so we don't
* need to care about counters wrapping to 0 underneath our feet
*/
write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result;
}
static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
uint64_t prev_raw_count, new_raw_count;
int64_t delta;
do {
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = arc_pmu_read_counter(idx);
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count);
delta = (new_raw_count - prev_raw_count) &
((1ULL << arc_pmu->counter_size) - 1ULL);
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
}
static void arc_pmu_read(struct perf_event *event)
{
arc_perf_event_update(event, &event->hw, event->hw.idx);
}
static int arc_pmu_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
int ret;
cache_type = (config >> 0) & 0xff;
cache_op = (config >> 8) & 0xff;
cache_result = (config >> 16) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
return ret;
}
/* initializes hw_perf_event structure if event is supported */
static int arc_pmu_event_init(struct perf_event *event)
{
struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
struct hw_perf_event *hwc = &event->hw;
int ret;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
if (event->attr.config >= PERF_COUNT_HW_MAX)
return -ENOENT;
if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
return -ENOENT;
hwc->config = arc_pmu->ev_hw_idx[event->attr.config];
pr_debug("initializing event %d with cfg %d\n",
(int) event->attr.config, (int) hwc->config);
return 0;
case PERF_TYPE_HW_CACHE:
ret = arc_pmu_cache_event(event->attr.config);
if (ret < 0)
return ret;
hwc->config = arc_pmu->ev_hw_idx[ret];
return 0;
default:
return -ENOENT;
}
}
/* starts all counters */
static void arc_pmu_enable(struct pmu *pmu)
{
uint32_t tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
}
/* stops all counters */
static void arc_pmu_disable(struct pmu *pmu)
{
uint32_t tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
}
/*
* Assigns hardware counter to hardware condition.
* Note that there is no separate start/stop mechanism;
* stopping is achieved by assigning the 'never' condition
*/
static void arc_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
event->hw.state = 0;
/* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx);
write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);
}
static void arc_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (!(event->hw.state & PERF_HES_STOPPED)) {
/* stop ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* condition code #0 is always "never" */
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
event->hw.state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) &&
!(event->hw.state & PERF_HES_UPTODATE)) {
arc_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void arc_pmu_del(struct perf_event *event, int flags)
{
struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
arc_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, arc_pmu->used_mask);
perf_event_update_userpage(event);
}
/* allocate hardware counter and optionally start counting */
static int arc_pmu_add(struct perf_event *event, int flags)
{
struct arc_pmu *arc_pmu = container_of(event->pmu, struct arc_pmu, pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (__test_and_set_bit(idx, arc_pmu->used_mask)) {
idx = find_first_zero_bit(arc_pmu->used_mask,
arc_pmu->n_counters);
if (idx == arc_pmu->n_counters)
return -EAGAIN;
__set_bit(idx, arc_pmu->used_mask);
hwc->idx = idx;
}
write_aux_reg(ARC_REG_PCT_INDEX, idx);
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
write_aux_reg(ARC_REG_PCT_COUNTL, 0);
write_aux_reg(ARC_REG_PCT_COUNTH, 0);
local64_set(&hwc->prev_count, 0);
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
arc_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
return 0;
}
static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_pmu *arc_pmu;
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
int i, j, ret;
union cc_name {
struct {
uint32_t word0, word1;
char sentinel;
} indiv;
char str[9];
} cc_name;
READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
if (!pct_bcr.v) {
pr_err("This core does not have performance counters!\n");
return -ENODEV;
}
BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS);
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
if (!cc_bcr.v) {
pr_err("Performance counters exist, but no countable conditions?\n");
return -ENODEV;
}
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
return -ENOMEM;
arc_pmu->n_counters = pct_bcr.c;
arc_pmu->counter_size = 32 + (pct_bcr.s << 4);
pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n",
arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c);
cc_name.str[8] = 0;
for (i = 0; i < PERF_COUNT_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1;
for (j = 0; j < cc_bcr.c; j++) {
write_aux_reg(ARC_REG_CC_INDEX, j);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
if (arc_pmu_ev_hw_map[i] &&
!strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
strlen(arc_pmu_ev_hw_map[i])) {
pr_debug("mapping %d to idx %d with name %s\n",
i, j, cc_name.str);
arc_pmu->ev_hw_idx[i] = j;
}
}
}
arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable,
.event_init = arc_pmu_event_init,
.add = arc_pmu_add,
.del = arc_pmu_del,
.start = arc_pmu_start,
.stop = arc_pmu_stop,
.read = arc_pmu_read,
};
/* ARC 700 PMU does not support sampling events */
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
return ret;
}
#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pmu" },
{},
};
MODULE_DEVICE_TABLE(of, arc_pmu_match);
#endif
static struct platform_driver arc_pmu_driver = {
.driver = {
.name = "arc700-pmu",
.of_match_table = of_match_ptr(arc_pmu_match),
},
.probe = arc_pmu_device_probe,
};
module_platform_driver(arc_pmu_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
MODULE_DESCRIPTION("ARC PMU driver");

234
arch/arc/kernel/process.c Normal file
View file

@ -0,0 +1,234 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Amit Bhor, Kanika Nema: Codito Technologies 2004
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/elf.h>
#include <linux/tick.h>
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
{
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
return 0;
}
/*
* We return the user space TLS data ptr as sys-call return code
* Ideally it should be copy to user.
* However we can cheat by the fact that some sys-calls do return
* absurdly high values
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
* it won't be considered a sys-call error
* and it will be loads better than copy-to-user, which is a definite
* D-TLB Miss
*/
SYSCALL_DEFINE0(arc_gettls)
{
return task_thread_info(current)->thr_ptr;
}
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
__asm__("sleep 0x3");
}
asmlinkage void ret_from_fork(void);
/* Layout of Child kernel mode stack as setup at the end of this function is
*
* | ... |
* | ... |
* | unused |
* | |
* ------------------
* | r25 | <==== top of Stack (thread.ksp)
* ~ ~
* | --to-- | (CALLEE Regs of user mode)
* | r13 |
* ------------------
* | fp |
* | blink | @ret_from_fork
* ------------------
* | |
* ~ ~
* ~ ~
* | |
* ------------------
* | r12 |
* ~ ~
* | --to-- | (scratch Regs of user mode)
* | r0 |
* ------------------
* | SP |
* | orig_r0 |
* | event/ECR |
* | user_r25 |
* ------------------ <===== END of PAGE
*/
int copy_thread(unsigned long clone_flags,
unsigned long usp, unsigned long arg,
struct task_struct *p)
{
struct pt_regs *c_regs; /* child's pt_regs */
unsigned long *childksp; /* to unwind out of __switch_to() */
struct callee_regs *c_callee; /* child's callee regs */
struct callee_regs *parent_callee; /* paren't callee */
struct pt_regs *regs = current_pt_regs();
/* Mark the specific anchors to begin with (see pic above) */
c_regs = task_pt_regs(p);
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
c_callee = ((struct callee_regs *)childksp) - 1;
/*
* __switch_to() uses thread.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
if (unlikely(p->flags & PF_KTHREAD)) {
memset(c_regs, 0, sizeof(struct pt_regs));
c_callee->r13 = arg; /* argument to kernel thread */
c_callee->r14 = usp; /* function */
return 0;
}
/*--------- User Task Only --------------*/
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
childksp[0] = 0; /* for POP fp */
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
/* Copy parents pt regs on child's kernel mode stack */
*c_regs = *regs;
if (usp)
c_regs->sp = usp;
c_regs->r0 = 0; /* fork returns 0 in child */
parent_callee = ((struct callee_regs *)regs) - 1;
*c_callee = *parent_callee;
if (unlikely(clone_flags & CLONE_SETTLS)) {
/*
* set task's userland tls data ptr from 4th arg
* clone C-lib call is difft from clone sys-call
*/
task_thread_info(p)->thr_ptr = regs->r3;
} else {
/* Normal fork case: set parent's TLS ptr in child */
task_thread_info(p)->thr_ptr =
task_thread_info(current)->thr_ptr;
}
return 0;
}
/*
* Do necessary setup to start up a new user task
*/
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
{
set_fs(USER_DS); /* user space */
regs->sp = usp;
regs->ret = pc;
/*
* [U]ser Mode bit set
* [L] ZOL loop inhibited to begin with - cleared by a LP insn
* Interrupts enabled
*/
regs->status32 = STATUS_U_MASK | STATUS_L_MASK |
STATUS_E1_MASK | STATUS_E2_MASK;
/* bogus seed values for debugging */
regs->lp_start = 0x10;
regs->lp_end = 0x80;
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
/*
* Free any architecture-specific thread data structures, etc.
*/
void exit_thread(void)
{
}
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
return 0;
}
/*
* API: expected by schedular Code: If thread is sleeping where is that.
* What is this good for? it will be always the scheduler or ret_from_fork.
* So we hard code that anyways.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
struct pt_regs *regs = task_pt_regs(t);
unsigned long blink = 0;
/*
* If the thread being queried for in not itself calling this, then it
* implies it is not executing, which in turn implies it is sleeping,
* which in turn implies it got switched OUT by the schedular.
* In that case, it's kernel mode blink can reliably retrieved as per
* the picture above (right above pt_regs).
*/
if (t != current && t->state != TASK_RUNNING)
blink = *((unsigned int *)regs - 1);
return blink;
}
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
if (x->e_machine != EM_ARCOMPACT)
return 0;
eflags = x->e_flags;
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n");
force_sigsegv(SIGSEGV, current);
return 0;
}
return 1;
}
EXPORT_SYMBOL(elf_check_arch);

172
arch/arc/kernel/ptrace.c Normal file
View file

@ -0,0 +1,172 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/regset.h>
#include <linux/unistd.h>
#include <linux/elf.h>
static struct callee_regs *task_callee_regs(struct task_struct *tsk)
{
struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
return tmp;
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *ptregs = task_pt_regs(target);
const struct callee_regs *cregs = task_callee_regs(target);
int ret = 0;
unsigned int stop_pc_val;
#define REG_O_CHUNK(START, END, PTR) \
if (!ret) \
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
offsetof(struct user_regs_struct, START), \
offsetof(struct user_regs_struct, END));
#define REG_O_ONE(LOC, PTR) \
if (!ret) \
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
#define REG_O_ZERO(LOC) \
if (!ret) \
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
REG_O_ZERO(pad);
REG_O_CHUNK(scratch, callee, ptregs);
REG_O_ZERO(pad2);
REG_O_CHUNK(callee, efa, cregs);
REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
if (!ret) {
if (in_brkpt_trap(ptregs)) {
stop_pc_val = target->thread.fault_address;
pr_debug("\t\tstop_pc (brk-pt)\n");
} else {
stop_pc_val = ptregs->ret;
pr_debug("\t\tstop_pc (others)\n");
}
REG_O_ONE(stop_pc, &stop_pc_val);
}
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const struct pt_regs *ptregs = task_pt_regs(target);
const struct callee_regs *cregs = task_callee_regs(target);
int ret = 0;
#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
(void *)(PTR), \
offsetof(struct user_regs_struct, FIRST), \
offsetof(struct user_regs_struct, NEXT));
#define REG_IN_ONE(LOC, PTR) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
(void *)(PTR), \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
#define REG_IGNORE_ONE(LOC) \
if (!ret) \
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
REG_IGNORE_ONE(pad);
/* TBD: disallow updates to STATUS32 etc*/
REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */
REG_IGNORE_ONE(pad2);
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
REG_IGNORE_ONE(efa); /* efa update invalid */
REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
return ret;
}
enum arc_getset {
REGSET_GENERAL,
};
static const struct user_regset arc_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
}
};
static const struct user_regset_view user_arc_view = {
.name = UTS_MACHINE,
.e_machine = EM_ARCOMPACT,
.regsets = arc_regsets,
.n = ARRAY_SIZE(arc_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_arc_view;
}
void ptrace_disable(struct task_struct *child)
{
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EIO;
pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
switch (request) {
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->thr_ptr,
(unsigned long __user *)data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace_entry(struct pt_regs *regs)
{
if (tracehook_report_syscall_entry(regs))
return ULONG_MAX;
return regs->r8;
}
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
tracehook_report_syscall_exit(regs, 0);
}

34
arch/arc/kernel/reset.c Normal file
View file

@ -0,0 +1,34 @@
/*
* Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/pm.h>
void machine_halt(void)
{
/* Halt the processor */
__asm__ __volatile__("flag 1\n");
}
void machine_restart(char *__unused)
{
/* Soft reset : jump to reset vector */
pr_info("Put your restart handler here\n");
machine_halt();
}
void machine_power_off(void)
{
/* FIXME :: power off ??? */
machine_halt();
}
void (*pm_power_off) (void) = NULL;
EXPORT_SYMBOL(pm_power_off);

477
arch/arc/kernel/setup.c Normal file
View file

@ -0,0 +1,477 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/clk-provider.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/cache.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/irq.h>
#include <asm/unwind.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
#include <asm/smp.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
/* Part of U-boot ABI: see head.S */
int __initdata uboot_tag;
char __initdata *uboot_arg;
const struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
static void read_arc_build_cfg_regs(void)
{
struct bcr_perip uncached_space;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
cpu->uncached_base = uncached_space.start << 24;
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
/* Note that we read the CCM BCRs independent of kernel config
* This is to catch the cases where user doesn't know that
* CCMs are present in hardware build
*/
{
struct bcr_iccm iccm;
struct bcr_dccm dccm;
struct bcr_dccm_base dccm_base;
unsigned int bcr_32bit_val;
bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
if (bcr_32bit_val) {
iccm = *((struct bcr_iccm *)&bcr_32bit_val);
cpu->iccm.base_addr = iccm.base << 16;
cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
}
bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
if (bcr_32bit_val) {
dccm = *((struct bcr_dccm *)&bcr_32bit_val);
cpu->dccm.sz = 0x800 << (dccm.sz);
READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
cpu->dccm.base_addr = dccm_base.addr << 8;
}
}
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
read_decode_mmu_bcr();
read_decode_cache_bcr();
{
struct bcr_fp_arcompact sp, dp;
struct bcr_bpu_arcompact bpu;
READ_BCR(ARC_REG_FP_BCR, sp);
READ_BCR(ARC_REG_DPFP_BCR, dp);
cpu->extn.fpu_sp = sp.ver ? 1 : 0;
cpu->extn.fpu_dp = dp.ver ? 1 : 0;
READ_BCR(ARC_REG_BPU_BCR, bpu);
cpu->bpu.ver = bpu.ver;
cpu->bpu.full = bpu.fam ? 1 : 0;
if (bpu.ent) {
cpu->bpu.num_cache = 256 << (bpu.ent - 1);
cpu->bpu.num_pred = 256 << (bpu.ent - 1);
}
}
READ_BCR(ARC_REG_AP_BCR, bcr);
cpu->extn.ap = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart;
}
static const struct cpuinfo_data arc_cpu_tbl[] = {
{ {0x20, "ARC 600" }, 0x2F},
{ {0x30, "ARC 700" }, 0x33},
{ {0x34, "ARC 700 R4.10"}, 0x34},
{ {0x35, "ARC 700 R4.11"}, 0x35},
{ {0x00, NULL } }
};
#define IS_AVAIL1(v, str) ((v) ? str : "")
#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ")
#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg))
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
const struct cpuinfo_data *tbl;
char *isa_nm;
int i, be, atomic;
int n = 0;
FIX_PTR(cpu);
{
isa_nm = "ARCompact";
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
atomic = cpu->isa.atomic1;
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
}
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
if ((core->family >= tbl->info.id) &&
(core->family <= tbl->up_range)) {
n += scnprintf(buf + n, len - n,
"processor [%d]\t: %s (%s ISA) %s\n",
cpu_id, tbl->info.str, isa_nm,
IS_AVAIL1(be, "[Big-Endian]"));
break;
}
}
if (tbl->info.id == 0)
n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
(unsigned int)(arc_get_core_freq() / 1000000),
(unsigned int)(arc_get_core_freq() / 10000) % 100);
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->timers.t0, "Timer0 "),
IS_AVAIL1(cpu->timers.t1, "Timer1 "),
IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC));
n += i = scnprintf(buf + n, len - n, "%s%s",
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC));
if (i)
n += scnprintf(buf + n, len - n, "\n\t\t: ");
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
IS_AVAIL1(cpu->extn_mpy.ver, "mpy "),
IS_AVAIL1(cpu->extn.norm, "norm "),
IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred);
return buf;
}
static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
{
int n = 0;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
FIX_PTR(cpu);
n += scnprintf(buf + n, len - n,
"Vector Table\t: %#x\nUncached Base\t: %#x\n",
cpu->vec_base, cpu->uncached_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
if (cpu->extn.debug)
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT "));
if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
n += scnprintf(buf + n, len - n,
"OS ABI [v3]\t: no-legacy-syscalls\n");
return buf;
}
static void arc_chk_core_config(void)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
int fpu_enabled;
if (!cpu->timers.t0)
panic("Timer0 is not present!\n");
if (!cpu->timers.t1)
panic("Timer1 is not present!\n");
if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc)
panic("RTSC is not present\n");
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
panic("Linux built with incorrect DCCM Base address\n");
if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
panic("Linux built with incorrect DCCM Size\n");
#endif
#ifdef CONFIG_ARC_HAS_ICCM
if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
panic("Linux built with incorrect ICCM Size\n");
#endif
/*
* FP hardware/software config sanity
* -If hardware contains DPFP, kernel needs to save/restore FPU state
* -If not, it will crash trying to save/restore the non-existant regs
*
* (only DPDP checked since SP has no arch visible regs)
*/
fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
if (cpu->extn.fpu_dp && !fpu_enabled)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
}
/*
* Initialize and setup the processor core
* This is called by all the CPUs thus should not do special case stuff
* such as only for boot CPU etc
*/
void setup_processor(void)
{
char str[512];
int cpu_id = smp_processor_id();
read_arc_build_cfg_regs();
arc_init_IRQ();
printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
arc_mmu_init();
arc_cache_init();
printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
printk(arc_platform_smp_cpuinfo());
arc_chk_core_config();
}
static inline int is_kernel(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
return 1;
return 0;
}
void __init setup_arch(char **cmdline_p)
{
/* make sure that uboot passed pointer to cmdline/dtb is valid */
if (uboot_tag && is_kernel((unsigned long)uboot_arg))
panic("Invalid uboot arg\n");
/* See if u-boot passed an external Device Tree blob */
machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
if (!machine_desc) {
/* No, so try the embedded one */
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
* Appent to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg,
COMMAND_LINE_SIZE);
}
}
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
/* To force early parsing of things like mem=xxx */
parse_early_param();
/* Platform/board specific: e.g. early console registration */
if (machine_desc->init_early)
machine_desc->init_early();
setup_processor();
smp_init_cpus();
setup_arch_memory();
/* copy flat DT out of .init and then unflatten it */
unflatten_and_copy_device_tree();
/* Can be issue if someone passes cmd line arg "ro"
* But that is unlikely so keeping it as it is
*/
root_mountflags &= ~MS_RDONLY;
#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
arc_unwind_init();
arc_unwind_setup();
}
static int __init customize_machine(void)
{
of_clk_init(NULL);
/*
* Traverses flattened DeviceTree - registering platform devices
* (if any) complete with their resources
*/
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (machine_desc->init_machine)
machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
static int __init init_late_machine(void)
{
if (machine_desc->init_late)
machine_desc->init_late();
return 0;
}
late_initcall(init_late_machine);
/*
* Get CPU information for use by the procfs.
*/
#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *str;
int cpu_id = ptr_to_cpu(v);
str = (char *)__get_free_page(GFP_TEMPORARY);
if (!str)
goto done;
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_platform_smp_cpuinfo());
free_page((unsigned long)str);
done:
seq_printf(m, "\n\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
/*
* Callback returns cpu-id to iterator for show routine, NULL to stop.
* However since NULL is also a valid cpu-id (0), we use a round-about
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo
};
static DEFINE_PER_CPU(struct cpu, cpu_topology);
static int __init topology_init(void)
{
int cpu;
for_each_present_cpu(cpu)
register_cpu(&per_cpu(cpu_topology, cpu), cpu);
return 0;
}
subsys_initcall(topology_init);

360
arch/arc/kernel/signal.c Normal file
View file

@ -0,0 +1,360 @@
/*
* Signal Handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: Jan 2010 (Restarting of timer related syscalls)
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
* -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
* -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
* to avoid kernel synthesing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009
* -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
* in done in block copy rather than one word at a time.
* This saves around 2K of code and improves LMBench lat_sig <catch>
*
* rajeshwarr: Feb 2009
* - Support for Realtime Signals
*
* vineetg: Aug 11th 2008: Bug #94183
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
* This was cauing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,
* Signal is delivered (Ctrl + C) = >SIGINT.
* setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
* to run, but doesn't clear the Delay slot bit from status32. As a result,
* on resuming user mode, signal handler branches off to BTA of orig JMP
* -FIX: clear the DE bit from status32 in setup_frame( )
*
* Rahul Trivedi, Kanika Nema: Codito Technologies 2004
*/
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/tracehook.h>
#include <asm/ucontext.h>
struct rt_sigframe {
struct siginfo info;
struct ucontext uc;
#define MAGIC_SIGALTSTK 0x07302004
unsigned int sigret_magic;
};
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
{
sigset_t set;
int err;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (!err)
set_current_blocked(&set);
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
return err;
}
static inline int is_do_ss_needed(unsigned int magic)
{
if (MAGIC_SIGALTSTK == magic)
return 1;
else
return 0;
}
SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *sf;
unsigned int magic;
struct pt_regs *regs = current_pt_regs();
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* Since we stacked the signal on a word boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (regs->sp & 3)
goto badframe;
sf = (struct rt_sigframe __force __user *)(regs->sp);
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
goto badframe;
if (__get_user(magic, &sf->sigret_magic))
goto badframe;
if (unlikely(is_do_ss_needed(magic)))
if (restore_altstack(&sf->uc.uc_stack))
goto badframe;
if (restore_usr_regs(regs, sf))
goto badframe;
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs->status32 |= STATUS_U_MASK;
return regs->r0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* Determine which stack to use..
*/
static inline void __user *get_sigframe(struct ksignal *ksig,
struct pt_regs *regs,
unsigned long framesize)
{
unsigned long sp = sigsp(regs->sp, ksig);
void __user *frame;
/* No matter what happens, 'sp' must be word
* aligned otherwise nasty things could happen
*/
/* ATPCS B01 mandates 8-byte alignment */
frame = (void __user *)((sp - framesize) & ~7);
/* Check that we can actually write to the signal frame */
if (!access_ok(VERIFY_WRITE, frame, framesize))
frame = NULL;
return frame;
}
/*
* translate the signal
*/
static inline int map_sig(int sig)
{
struct thread_info *thread = current_thread_info();
if (thread->exec_domain && thread->exec_domain->signal_invmap
&& sig < 32)
sig = thread->exec_domain->signal_invmap[sig];
return sig;
}
static int
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *sf;
unsigned int magic = 0;
int err = 0;
sf = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
if (!sf)
return 1;
/*
* w/o SA_SIGINFO, struct ucontext is partially populated (only
* uc_mcontext/uc_sigmask) for kernel's normal user state preservation
* during signal handler execution. This works for SA_SIGINFO as well
* although the semantics are now overloaded (the same reg state can be
* inspected by userland: but are they allowed to fiddle with it ?
*/
err |= stash_usr_regs(sf, regs, set);
/*
* SA_SIGINFO requires 3 args to signal handler:
* #1: sig-no (common to any handler)
* #2: struct siginfo
* #3: struct ucontext (completely populated)
*/
if (unlikely(ksig->ka.sa.sa_flags & SA_SIGINFO)) {
err |= copy_siginfo_to_user(&sf->info, &ksig->info);
err |= __put_user(0, &sf->uc.uc_flags);
err |= __put_user(NULL, &sf->uc.uc_link);
err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
/* setup args 2 and 3 for user mode handler */
regs->r1 = (unsigned long)&sf->info;
regs->r2 = (unsigned long)&sf->uc;
/*
* small optim to avoid unconditonally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
}
err |= __put_user(magic, &sf->sigret_magic);
if (err)
return err;
/* #1 arg to the user Signal handler */
regs->r0 = map_sig(ksig->sig);
/* setup PC of user space signal handler */
regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
/*
* handler returns using sigreturn stub provided already by userpsace
* If not, nuke the process right away
*/
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
return 1;
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
regs->sp = (unsigned long)sf;
/*
* Bug 94183, Clear the DE bit, so that when signal handler
* starts to run, it doesn't use BTA
*/
regs->status32 &= ~STATUS_DE_MASK;
regs->status32 |= STATUS_L_MASK;
return err;
}
static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
{
switch (regs->r0) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
/*
* ERESTARTNOHAND means that the syscall should
* only be restarted if there was no handler for
* the signal, and since we only get here if there
* is a handler, we don't restart
*/
regs->r0 = -EINTR; /* ERESTART_xxx is internal */
break;
case -ERESTARTSYS:
/*
* ERESTARTSYS means to restart the syscall if
* there is no handler or the handler was
* registered with SA_RESTART
*/
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->r0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/*
* ERESTARTNOINTR means that the syscall should
* be called again after the signal handler returns.
* Setup reg state just as it was before doing the trap
* r0 has been clobbered with sys call ret code thus it
* needs to be reloaded with orig first arg to syscall
* in orig_r0. Rest of relevant reg-file:
* r8 (syscall num) and (r1 - r7) will be reset to
* their orig user space value when we ret from kernel
*/
regs->r0 = regs->orig_r0;
regs->ret -= 4;
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int failed;
/* Set up the stack frame */
failed = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
int restart_scall;
restart_scall = in_syscall(regs) && syscall_restartable(regs);
if (get_signal(&ksig)) {
if (restart_scall) {
arc_restart_syscall(&ksig.ka, regs);
syscall_wont_restart(regs); /* No more restarts */
}
handle_signal(&ksig, regs);
return;
}
if (restart_scall) {
/* No handler for syscall: restart it */
if (regs->r0 == -ERESTARTNOHAND ||
regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
regs->ret -= 4;
} else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
regs->r8 = __NR_restart_syscall;
regs->ret -= 4;
}
syscall_wont_restart(regs); /* No more restarts */
}
/* If there's no signal to deliver, restore the saved sigmask back */
restore_saved_sigmask();
}
void do_notify_resume(struct pt_regs *regs)
{
/*
* ASM glue gaurantees that this is only called when returning to
* user mode
*/
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
tracehook_notify_resume(regs);
}

340
arch/arc/kernel/smp.c Normal file
View file

@ -0,0 +1,340 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* RajeshwarR: Dec 11, 2007
* -- Added support for Inter Processor Interrupts
*
* Vineetg: Nov 1st, 2007
* -- Initial Write (Borrowed heavily from ARM)
*/
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct plat_smp_ops plat_smp_ops;
/* XXX: per cpu ? Only needed once in early seconday boot */
struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */
void __init smp_prepare_boot_cpu(void)
{
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
void __init smp_init_cpus(void)
{
unsigned int i;
for (i = 0; i < NR_CPUS; i++)
set_cpu_possible(i, true);
}
/* called from init ( ) => process 1 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
*/
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/*
* After power-up, a non Master CPU needs to wait for Master to kick start it
*
* The default implementation halts
*
* This relies on platform specific support allowing Master to directly set
* this CPU's PC (to be @first_lines_of_secondary() and kick start it.
*
* In lack of such h/w assist, platforms can override this function
* - make this function busy-spin on a token, eventually set by Master
* (from arc_platform_smp_wakeup_cpu())
* - Once token is available, jump to @first_lines_of_secondary
* (using inline asm).
*
* Alert: can NOT use stack here as it has not been determined/setup for CPU.
* If it turns out to be elaborate, it's better to code it in assembly
*
*/
void __weak arc_platform_smp_wait_to_boot(int cpu)
{
/*
* As a hack for debugging - since debugger will single-step over the
* FLAG insn - wrap the halt itself it in a self loop
*/
__asm__ __volatile__(
"1: \n"
" flag 1 \n"
" b 1b \n");
}
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
}
/*
* The very first "C" code executed by secondary
* Called from asm stub in head.S
* "current"/R25 already setup by low level boot code
*/
void start_kernel_secondary(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor();
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
if (machine_desc->init_smp)
machine_desc->init_smp(cpu);
arc_local_timer_setup();
local_irq_enable();
preempt_disable();
cpu_startup_entry(CPUHP_ONLINE);
}
/*
* Called from kernel_init( ) -> smp_init( ) - for each CPU
*
* At this point, Secondary Processor is "HALT"ed:
* -It booted, but was halted in head.S
* -It was configured to halt-on-reset
* So need to wake it up.
*
* Essential requirements being where to run from (PC) and stack (SP)
*/
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
unsigned long wait_till;
secondary_idle_tsk = idle;
pr_info("Idle Task [%d] %p", cpu, idle);
pr_info("Trying to bring up CPU%u ...\n", cpu);
if (plat_smp_ops.cpu_kick)
plat_smp_ops.cpu_kick(cpu,
(unsigned long)first_lines_of_secondary);
/* wait for 1 sec after kicking the secondary */
wait_till = jiffies + HZ;
while (time_before(jiffies, wait_till)) {
if (cpu_online(cpu))
break;
}
if (!cpu_online(cpu)) {
pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
return -1;
}
secondary_idle_tsk = NULL;
return 0;
}
/*
* not supported here
*/
int __init setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*****************************************************************************/
/* Inter Processor Interrupt Handling */
/*****************************************************************************/
enum ipi_msg_type {
IPI_EMPTY = 0,
IPI_RESCHEDULE = 1,
IPI_CALL_FUNC,
IPI_CPU_STOP,
};
/*
* In arches with IRQ for each msg type (above), receiver can use IRQ-id to
* figure out what msg was sent. For those which don't (ARC has dedicated IPI
* IRQ), the msg-type needs to be conveyed via per-cpu data
*/
static DEFINE_PER_CPU(unsigned long, ipi_data);
static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
{
unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
unsigned long old, new;
unsigned long flags;
pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
local_irq_save(flags);
/*
* Atomically write new msg bit (in case others are writing too),
* and read back old value
*/
do {
new = old = *ipi_data_ptr;
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
/*
* Call the platform specific IPI kick function, but avoid if possible:
* Only do so if there's no pending msg from other concurrent sender(s).
* Otherwise, recevier will see this msg as well when it takes the
* IPI corresponding to that msg. This is true, even if it is already in
* IPI handler, because !@old means it has not yet dequeued the msg(s)
* so @new msg can be a free-loader
*/
if (plat_smp_ops.ipi_send && !old)
plat_smp_ops.ipi_send(cpu);
local_irq_restore(flags);
}
static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
{
unsigned int cpu;
for_each_cpu(cpu, callmap)
ipi_send_msg_one(cpu, msg);
}
void smp_send_reschedule(int cpu)
{
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
ipi_send_msg(&targets, IPI_CPU_STOP);
}
void arch_send_call_function_single_ipi(int cpu)
{
ipi_send_msg_one(cpu, IPI_CALL_FUNC);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
ipi_send_msg(mask, IPI_CALL_FUNC);
}
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
static void ipi_cpu_stop(void)
{
machine_halt();
}
static inline void __do_IPI(unsigned long msg)
{
switch (msg) {
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
ipi_cpu_stop();
break;
default:
pr_warn("IPI with unexpected msg %ld\n", msg);
}
}
/*
* arch-common ISR to handle for inter-processor interrupts
* Has hooks for platform specific IPI
*/
irqreturn_t do_IPI(int irq, void *dev_id)
{
unsigned long pending;
pr_debug("IPI [%ld] received on cpu %d\n",
*this_cpu_ptr(&ipi_data), smp_processor_id());
if (plat_smp_ops.ipi_clear)
plat_smp_ops.ipi_clear(irq);
/*
* "dequeue" the msg corresponding to this IPI (and possibly other
* piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
*/
pending = xchg(this_cpu_ptr(&ipi_data), 0);
do {
unsigned long msg = __ffs(pending);
__do_IPI(msg);
pending &= ~(1U << msg);
} while (pending);
return IRQ_HANDLED;
}
/*
* API called by platform code to hookup arch-common ISR to their IPI IRQ
*/
static DEFINE_PER_CPU(int, ipi_dev);
int smp_ipi_irq_setup(int cpu, int irq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
return 0;
}

View file

@ -0,0 +1,250 @@
/*
* stacktrace.c : stacktracing APIs needed by rest of kernel
* (wrappers over ARC dwarf based unwinder)
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: aug 2009
* -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
* for displaying task's kernel mode call stack in /proc/<pid>/stack
* -Iterator based approach to have single copy of unwinding core and APIs
* needing unwinding, implement the logic in iterator regarding:
* = which frame onwards to start capture
* = which frame to stop capturing (wchan)
* = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
*
* vineetg: March 2009
* -Implemented correct versions of thread_saved_pc() and get_wchan()
*
* rajeshwarr: 2008
* -Initial implementation
*/
#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <asm/arcregs.h>
#include <asm/unwind.h>
#include <asm/switch_to.h>
/*-------------------------------------------------------------------------
* Unwinder Iterator
*-------------------------------------------------------------------------
*/
#ifdef CONFIG_ARC_DW2_UNWIND
static void seed_unwind_frame_info(struct task_struct *tsk,
struct pt_regs *regs,
struct unwind_frame_info *frame_info)
{
if (tsk == NULL && regs == NULL) {
unsigned long fp, sp, blink, ret;
frame_info->task = current;
__asm__ __volatile__(
"mov %0,r27\n\t"
"mov %1,r28\n\t"
"mov %2,r31\n\t"
"mov %3,r63\n\t"
: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
);
frame_info->regs.r27 = fp;
frame_info->regs.r28 = sp;
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
} else if (regs == NULL) {
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
frame_info->regs.r28 = TSK_K_ESP(tsk);
frame_info->regs.r31 = TSK_K_BLINK(tsk);
frame_info->regs.r63 = (unsigned int)__switch_to;
/* In the prologue of __switch_to, first FP is saved on stack
* and then SP is copied to FP. Dwarf assumes cfa as FP based
* but we didn't save FP. The value retrieved above is FP's
* state in previous frame.
* As a work around for this, we unwind from __switch_to start
* and adjust SP accordingly. The other limitation is that
* __switch_to macro is dwarf rules are not generated for inline
* assembly code
*/
frame_info->regs.r27 = 0;
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
} else {
frame_info->task = tsk;
frame_info->regs.r27 = regs->fp;
frame_info->regs.r28 = regs->sp;
frame_info->regs.r31 = regs->blink;
frame_info->regs.r63 = regs->ret;
frame_info->call_frame = 0;
}
}
#endif
static noinline unsigned int
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int ret = 0;
unsigned int address;
struct unwind_frame_info frame_info;
seed_unwind_frame_info(tsk, regs, &frame_info);
while (1) {
address = UNW_PC(&frame_info);
if (address && __kernel_text_address(address)) {
if (consumer_fn(address, arg) == -1)
break;
}
ret = arc_unwind(&frame_info);
if (ret == 0) {
frame_info.regs.r63 = frame_info.regs.r31;
continue;
} else {
break;
}
}
return address; /* return the last address it saw */
#else
/* On ARC, only Dward based unwinder works. fp based backtracing is
* not possible (-fno-omit-frame-pointer) because of the way function
* prelogue is setup (callee regs saved and then fp set and not other
* way around
*/
pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
}
/*-------------------------------------------------------------------------
* callbacks called by unwinder iterator to implement kernel APIs
*
* The callback can return -1 to force the iterator to stop, which by default
* keeps going till the bottom-most frame.
*-------------------------------------------------------------------------
*/
/* Call-back which plugs into unwinding core to dump the stack in
* case of panic/OOPs/BUG etc
*/
static int __print_sym(unsigned int address, void *unused)
{
__print_symbol(" %s\n", address);
return 0;
}
#ifdef CONFIG_STACKTRACE
/* Call-back which plugs into unwinding core to capture the
* traces needed by kernel on /proc/<pid>/stack
*/
static int __collect_all(unsigned int address, void *arg)
{
struct stack_trace *trace = arg;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries)
return -1;
return 0;
}
static int __collect_all_but_sched(unsigned int address, void *arg)
{
struct stack_trace *trace = arg;
if (in_sched_functions(address))
return 0;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries)
return -1;
return 0;
}
#endif
static int __get_first_nonsched(unsigned int address, void *unused)
{
if (in_sched_functions(address))
return 0;
return -1;
}
/*-------------------------------------------------------------------------
* APIs expected by various kernel sub-systems
*-------------------------------------------------------------------------
*/
noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
{
pr_info("\nStack Trace:\n");
arc_unwind_core(tsk, regs, __print_sym, NULL);
}
EXPORT_SYMBOL(show_stacktrace);
/* Expected by sched Code */
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
show_stacktrace(tsk, NULL);
}
/* Another API expected by schedular, shows up in "ps" as Wait Channel
* Ofcourse just returning schedule( ) would be pointless so unwind until
* the function is not in schedular code
*/
unsigned int get_wchan(struct task_struct *tsk)
{
return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
}
#ifdef CONFIG_STACKTRACE
/*
* API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
* A typical use is when /proc/<pid>/stack is queried by userland
*/
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
/* Assumes @tsk is sleeping so unwinds from __switch_to */
arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
}
void save_stack_trace(struct stack_trace *trace)
{
/* Pass NULL for task so it unwinds the current call frame */
arc_unwind_core(NULL, NULL, __collect_all, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif

16
arch/arc/kernel/sys.c Normal file
View file

@ -0,0 +1,16 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[NR_syscalls] = {
[0 ... NR_syscalls-1] = sys_ni_syscall,
#include <asm/unistd.h>
};

261
arch/arc/kernel/time.c Normal file
View file

@ -0,0 +1,261 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: Jan 1011
* -sched_clock( ) no longer jiffies based. Uses the same clocksource
* as gtod
*
* Rajeshwarr/Vineetg: Mar 2008
* -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
* for arch independent gettimeofday()
* -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
*
* Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
*/
/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
* Each can programmed to go from @count to @limit and optionally
* interrupt when that happens.
* A write to Control Register clears the Interrupt
*
* We've designated TIMER0 for events (clockevents)
* while TIMER1 for free running (clocksource)
*
* Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
*/
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/irq.h>
#include <asm/arcregs.h>
#include <asm/clk.h>
#include <asm/mach_desc.h>
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMER_MAX 0xFFFFFFFF
/********** Clock Source Device *********/
#ifdef CONFIG_ARC_HAS_RTSC
int arc_counter_setup(void)
{
/*
* For SMP this needs to be 0. However Kconfig glue doesn't
* enable this option for SMP configs
*/
return 1;
}
static cycle_t arc_counter_read(struct clocksource *cs)
{
unsigned long flags;
union {
#ifdef CONFIG_CPU_BIG_ENDIAN
struct { u32 high, low; };
#else
struct { u32 low, high; };
#endif
cycle_t full;
} stamp;
flags = arch_local_irq_save();
__asm__ __volatile(
" .extCoreRegister tsch, 58, r, cannot_shortcut \n"
" rtsc %0, 0 \n"
" mov %1, 0 \n"
: "=r" (stamp.low), "=r" (stamp.high));
arch_local_irq_restore(flags);
return stamp.full;
}
static struct clocksource arc_counter = {
.name = "ARC RTSC",
.rating = 300,
.read = arc_counter_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#else /* !CONFIG_ARC_HAS_RTSC */
static bool is_usable_as_clocksource(void)
{
#ifdef CONFIG_SMP
return 0;
#else
return 1;
#endif
}
/*
* set 32bit TIMER1 to keep counting monotonically and wraparound
*/
int arc_counter_setup(void)
{
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
return is_usable_as_clocksource();
}
static cycle_t arc_counter_read(struct clocksource *cs)
{
return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
}
static struct clocksource arc_counter = {
.name = "ARC Timer1",
.rating = 300,
.read = arc_counter_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
#endif
/********** Clock Event Device *********/
/*
* Arm the timer to interrupt after @cycles
* The distinction for oneshot/periodic is done in arc_event_timer_ack() below
*/
static void arc_timer_event_setup(unsigned int cycles)
{
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
}
static int arc_clkevent_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
arc_timer_event_setup(delta);
return 0;
}
static void arc_clkevent_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/*
* At X Hz, 1 sec = 1000ms -> X cycles;
* 10ms -> X / 100 cycles
*/
arc_timer_event_setup(arc_get_core_freq() / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
default:
break;
}
return;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
.name = "ARC Timer0",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.mode = CLOCK_EVT_MODE_UNUSED,
.rating = 300,
.irq = TIMER0_IRQ, /* hardwired, no need for resources */
.set_next_event = arc_clkevent_set_next_event,
.set_mode = arc_clkevent_set_mode,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
{
/*
* Note that generic IRQ core could have passed @evt for @dev_id if
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
/*
* Any write to CTRL reg ACks the interrupt, we rewrite the
* Count when [N]ot [H]alted bit.
* And re-arm it if perioid by [I]nterrupt [E]nable bit
*/
write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
evt->event_handler(evt);
return IRQ_HANDLED;
}
/*
* Setup the local event timer for @cpu
*/
void arc_local_timer_setup()
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int cpu = smp_processor_id();
evt->cpumask = cpumask_of(cpu);
clockevents_config_and_register(evt, arc_get_core_freq(),
0, ARC_TIMER_MAX);
/* setup the per-cpu timer IRQ handler - for all cpus */
arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
}
/*
* Called from start_kernel() - boot CPU only
*
* -Sets up h/w timers as applicable on boot cpu
* -Also sets up any global state needed for timer subsystem:
* - for "counting" timer, registers a clocksource, usable across CPUs
* (provided that underlying counter h/w is synchronized across cores)
* - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
*/
void __init time_init(void)
{
/*
* sets up the timekeeping free-flowing counter which also returns
* whether the counter is usable as clocksource
*/
if (arc_counter_setup())
/*
* CLK upto 4.29 GHz can be safely represented in 32 bits
* because Max 32 bit number is 4,294,967,295
*/
clocksource_register_hz(&arc_counter, arc_get_core_freq());
/* sets up the periodic event timer */
arc_local_timer_setup();
if (machine_desc->init_time)
machine_desc->init_time();
}

157
arch/arc/kernel/traps.c Normal file
View file

@ -0,0 +1,157 @@
/*
* Traps/Non-MMU Exception handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg: May 2011
* -user-space unaligned access emulation
*
* Rahul Trivedi: Codito Technologies 2004
*/
#include <linux/sched.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <asm/setup.h>
#include <asm/unaligned.h>
#include <asm/kprobes.h>
void __init trap_init(void)
{
return;
}
void die(const char *str, struct pt_regs *regs, unsigned long address)
{
show_kernel_fault_diag(str, regs, address);
/* DEAD END */
__asm__("flag 1");
}
/*
* Helper called for bulk of exceptions NOT needing specific handling
* -for user faults enqueues requested signal
* -for kernel, chk if due to copy_(to|from)_user, otherwise die()
*/
static noinline int
handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
{
if (user_mode(regs)) {
struct task_struct *tsk = current;
tsk->thread.fault_address = (__force unsigned int)info->si_addr;
force_sig_info(info->si_signo, info, tsk);
} else {
/* If not due to copy_(to|from)_user, we are doomed */
if (fixup_exception(regs))
return 0;
die(str, regs, (unsigned long)info->si_addr);
}
return 1;
}
#define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long address, struct pt_regs *regs) \
{ \
siginfo_t info = { \
.si_signo = signr, \
.si_errno = 0, \
.si_code = sicode, \
.si_addr = (void __user *)address, \
}; \
return handle_exception(str, regs, &info);\
}
/*
* Entry points for exceptions NOT needing specific handling
*/
DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
/*
* Entry Point for Misaligned Data access Exception, for emulating in software
*/
int do_misaligned_access(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
/* If emulation not enabled, or failed, kill the task */
if (misaligned_fixup(address, regs, cregs) != 0)
return do_misaligned_error(address, regs);
return 0;
}
/*
* Entry point for miscll errors such as Nested Exceptions
* -Duplicate TLB entry is handled seperately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{
die("Machine Check Exception", regs, address);
}
/*
* Entry point for traps induced by ARCompact TRAP_S <n> insn
* This is same family as TRAP0/SWI insn (use the same vector).
* The only difference being SWI insn take no operand, while TRAP_S does
* which reflects in ECR Reg as 8 bit param.
* Thus TRAP_S <n> can be used for specific purpose
* -1 used for software breakpointing (gdb)
* -2 used by kprobes
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
unsigned int param = regs->ecr_param;
switch (param) {
case 1:
trap_is_brkpt(address, regs);
break;
case 2:
trap_is_kprobe(address, regs);
break;
case 3:
case 4:
kgdb_trap(regs);
break;
default:
break;
}
}
/*
* Entry point for Instruction Error Exception
* -For a corner case, ARC kprobes implementation resorts to using
* this exception, hence the check
*/
void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
{
int rc;
/* Check if this exception is caused by kprobes */
rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL);
if (rc == NOTIFY_STOP)
return;
insterror_is_error(address, regs);
}

View file

@ -0,0 +1,334 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
*/
#include <linux/ptrace.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/fs_struct.h>
#include <linux/proc_fs.h>
#include <linux/file.h>
#include <asm/arcregs.h>
/*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR.
* -To continue, callee regs right after scratch, special handling of CR
*/
static noinline void print_reg_file(long *reg_rev, int start_num)
{
unsigned int i;
char buf[512];
int n = 0, len = sizeof(buf);
for (i = start_num; i < start_num + 13; i++) {
n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
i, (unsigned long)*reg_rev);
if (((i + 1) % 3) == 0)
n += scnprintf(buf + n, len - n, "\n");
/* because pt_regs has regs reversed: r12..r0, r25..r13 */
reg_rev--;
}
if (start_num != 0)
n += scnprintf(buf + n, len - n, "\n\n");
/* To continue printing callee regs on same line as scratch regs */
if (start_num == 0)
pr_info("%s", buf);
else
pr_cont("%s\n", buf);
}
static void show_callee_regs(struct callee_regs *cregs)
{
print_reg_file(&(cregs->r13), 13);
}
void print_task_path_n_nm(struct task_struct *tsk, char *buf)
{
struct path path;
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
mm = get_task_mm(tsk);
if (!mm)
goto done;
exe_file = get_mm_exe_file(mm);
mmput(mm);
if (exe_file) {
path = exe_file->f_path;
path_get(&exe_file->f_path);
fput(exe_file);
path_nm = d_path(&path, buf, 255);
path_put(&path);
}
done:
pr_info("Path: %s\n", path_nm);
}
EXPORT_SYMBOL(print_task_path_n_nm);
static void show_faulting_vma(unsigned long address, char *buf)
{
struct vm_area_struct *vma;
struct inode *inode;
unsigned long ino = 0;
dev_t dev = 0;
char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
down_read(&active_mm->mmap_sem);
vma = find_vma(active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
struct file *file = vma->vm_file;
if (file) {
struct path *path = &file->f_path;
nm = d_path(path, buf, PAGE_SIZE - 1);
inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
}
pr_info(" @off 0x%lx in [%s]\n"
" VMA: 0x%08lx to 0x%08lx\n",
vma->vm_start < TASK_UNMAPPED_BASE ?
address : address - vma->vm_start,
nm, vma->vm_start, vma->vm_end);
} else
pr_info(" @No matching VMA found\n");
up_read(&active_mm->mmap_sem);
}
static void show_ecr_verbose(struct pt_regs *regs)
{
unsigned int vec, cause_code;
unsigned long address;
pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
vec = regs->ecr_vec;
cause_code = regs->ecr_cause;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"),
address, regs->ret);
} else if (vec == ECR_V_ITLB_MISS) {
pr_cont("Insn could not be fetched\n");
} else if (vec == ECR_V_MACH_CHK) {
pr_cont("%s\n", (cause_code == 0x0) ?
"Double Fault" : "Other Fatal Err");
} else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n");
else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
pr_cont("Misaligned r/w from 0x%08lx\n", address);
else
pr_cont("%s access not allowed on page\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"));
} else if (vec == ECR_V_INSN_ERR) {
pr_cont("Illegal Insn\n");
} else {
pr_cont("Check Programmer's Manual\n");
}
}
/************************************************************************
* API called by rest of kernel
***********************************************************************/
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct callee_regs *cregs;
char *buf;
buf = (char *)__get_free_page(GFP_TEMPORARY);
if (!buf)
return;
print_task_path_n_nm(tsk, buf);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
current->thread.fault_address,
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
show_faulting_vma(regs->ret, buf); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : ""
if (!user_mode(regs))
pr_cont(" : %2s %2s %2s %2s %2s\n",
STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
regs->bta, regs->sp, regs->fp);
pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
regs->lp_start, regs->lp_end, regs->lp_count);
/* print regs->r0 thru regs->r12
* Sequential printing was generating horrible code
*/
print_reg_file(&(regs->r0), 0);
/* If Callee regs were saved, display them too */
cregs = (struct callee_regs *)current->thread.callee_reg;
if (cregs)
show_callee_regs(cregs);
free_page((unsigned long)buf);
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address)
{
current->thread.fault_address = address;
/* Caller and Callee regs */
show_regs(regs);
/* Show stack trace if this Fatality happened in kernel mode */
if (!user_mode(regs))
show_stacktrace(current, regs);
}
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
static struct dentry *test_dentry;
static struct dentry *test_dir;
static struct dentry *test_u32_dentry;
static u32 clr_on_read = 1;
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
u32 numitlb, numdtlb, num_pte_not_present;
static int fill_display_data(char *kbuf)
{
size_t num = 0;
num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
if (clr_on_read)
numitlb = numdtlb = num_pte_not_present = 0;
return num;
}
static int tlb_stats_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)__get_free_page(GFP_KERNEL);
return 0;
}
/* called on user read(): display the couters */
static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
char __user *user_buf, /* user buffer */
size_t len, /* length of buffer */
loff_t *offset) /* offset in the file */
{
size_t num;
char *kbuf = (char *)file->private_data;
/* All of the data can he shoved in one iteration */
if (*offset != 0)
return 0;
num = fill_display_data(kbuf);
/* simple_read_from_buffer() is helper for copy to user space
It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
@3 (offset) into the user space address starting at @1 (user_buf).
@5 (len) is max size of user buffer
*/
return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
}
/* called on user write : clears the counters */
static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
size_t length, loff_t *offset)
{
numitlb = numdtlb = num_pte_not_present = 0;
return length;
}
static int tlb_stats_close(struct inode *inode, struct file *file)
{
free_page((unsigned long)(file->private_data));
return 0;
}
static const struct file_operations tlb_stats_file_ops = {
.read = tlb_stats_output,
.write = tlb_stats_clear,
.open = tlb_stats_open,
.release = tlb_stats_close
};
#endif
static int __init arc_debugfs_init(void)
{
test_dir = debugfs_create_dir("arc", NULL);
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
&tlb_stats_file_ops);
#endif
test_u32_dentry =
debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
return 0;
}
module_init(arc_debugfs_init);
static void __exit arc_debugfs_exit(void)
{
debugfs_remove(test_u32_dentry);
debugfs_remove(test_dentry);
debugfs_remove(test_dir);
}
module_exit(arc_debugfs_exit);
#endif

263
arch/arc/kernel/unaligned.c Normal file
View file

@ -0,0 +1,263 @@
/*
* Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* vineetg : May 2011
* -Adapted (from .26 to .35)
* -original contribution by Tim.yao@amlogic.com
*
*/
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/disasm.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
#define BE 1
#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
#define FIRST_BYTE_32 "swape %1, %1\n"
#else
#define BE 0
#define FIRST_BYTE_16
#define FIRST_BYTE_32
#endif
#define __get8_unaligned_check(val, addr, err) \
__asm__( \
"1: ldb.ab %1, [%2, 1]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"3: mov %0, 1\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 3b\n" \
" .previous\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define get16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(v, a, err); \
val = v << ((BE) ? 8 : 0); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 0 : 8); \
if (err) \
goto fault; \
} while (0)
#define get32_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(v, a, err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 16 : 8); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 0 : 24); \
if (err) \
goto fault; \
} while (0)
#define put16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v = val, a = addr;\
\
__asm__( \
FIRST_BYTE_16 \
"1: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"2: stb %1, [%2]\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"4: mov %0, 1\n" \
" b 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
\
if (err) \
goto fault; \
} while (0)
#define put32_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v = val, a = addr;\
\
__asm__( \
FIRST_BYTE_32 \
"1: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"2: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"3: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"4: stb %1, [%2]\n" \
"5:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"6: mov %0, 1\n" \
" b 5b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
" .previous\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
\
if (err) \
goto fault; \
} while (0)
/* sysctl hooks */
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
struct callee_regs *cregs)
{
int val;
/* register write back */
if ((state->aa == 1) || (state->aa == 2)) {
set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
if (state->aa == 2)
state->src2 = 0;
}
if (state->zz == 0) {
get32_unaligned_check(val, state->src1 + state->src2);
} else {
get16_unaligned_check(val, state->src1 + state->src2);
if (state->x)
val = (val << 16) >> 16;
}
if (state->pref == 0)
set_reg(state->dest, val, regs, cregs);
return;
fault: state->fault = 1;
}
static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
struct callee_regs *cregs)
{
/* register write back */
if ((state->aa == 1) || (state->aa == 2)) {
set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
if (state->aa == 3)
state->src3 = 0;
} else if (state->aa == 3) {
if (state->zz == 2) {
set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
regs, cregs);
} else if (!state->zz) {
set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
regs, cregs);
} else {
goto fault;
}
}
/* write fix-up */
if (!state->zz)
put32_unaligned_check(state->src1, state->src2 + state->src3);
else
put16_unaligned_check(state->src1, state->src2 + state->src3);
return;
fault: state->fault = 1;
}
/*
* Handle an unaligned access
* Returns 0 if successfully handled, 1 if some error happened
*/
int misaligned_fixup(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
struct disasm_state state;
char buf[TASK_COMM_LEN];
/* handle user mode only and only if enabled by sysadmin */
if (!user_mode(regs) || !unaligned_enabled)
return 1;
if (no_unaligned_warning) {
pr_warn_once("%s(%d) made unaligned access which was emulated"
" by kernel assist\n. This can degrade application"
" performance significantly\n. To enable further"
" logging of such instances, please \n"
" echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
get_task_comm(buf, current), task_pid_nr(current));
} else {
/* Add rate limiting if it gets down to it */
pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
get_task_comm(buf, current), task_pid_nr(current),
address, regs->ret);
}
disasm_instr(regs->ret, &state, 1, regs, cregs);
if (state.fault)
goto fault;
/* ldb/stb should not have unaligned exception */
if ((state.zz == 1) || (state.di))
goto fault;
if (!state.write)
fixup_load(&state, regs, cregs);
else
fixup_store(&state, regs, cregs);
if (state.fault)
goto fault;
if (delay_mode(regs)) {
regs->ret = regs->bta;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
/* handle zero-overhead-loop */
if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
regs->ret = regs->lp_start;
regs->lp_count--;
}
}
return 0;
fault:
pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
state.words[0], address);
return 1;
}

1331
arch/arc/kernel/unwind.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,171 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/thread_info.h>
OUTPUT_ARCH(arc)
ENTRY(_stext)
#ifdef CONFIG_CPU_BIG_ENDIAN
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
/*
* ICCM starts at 0x8000_0000. So if kernel is relocated to some other
* address, make sure peripheral at 0x8z doesn't clash with ICCM
* Essentially vector is also in ICCM.
*/
. = CONFIG_LINUX_LINK_BASE;
_int_vec_base_lds = .;
.vector : {
*(.vector)
. = ALIGN(PAGE_SIZE);
}
#ifdef CONFIG_ARC_HAS_ICCM
.text.arcfp : {
*(.text.arcfp)
. = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
}
#endif
/*
* The reason for having a seperate subsection .init.ramfs is to
* prevent objump from including it in kernel dumps
*
* Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement
* between .init.text and .text, avoiding any possible relocation
* errors because of calls from .init.text to .text
* Yes such calls do exist. e.g.
* decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
*/
__init_begin = .;
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
_stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
/* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
.init.data : {
INIT_DATA
INIT_SETUP(L1_CACHE_BYTES)
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
PERCPU_SECTION(L1_CACHE_BYTES)
/*
* .exit.text is discard at runtime, not link time, to deal with
* references from .debug_frame
* It will be init freed, being inside [__init_start : __init_end]
*/
.exit.text : { EXIT_TEXT }
.exit.data : { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;
.text : {
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
}
EXCEPTION_TABLE(L1_CACHE_BYTES)
_etext = .;
_sdata = .;
RO_DATA_SECTION(PAGE_SIZE)
/*
* 1. this is .data essentially
* 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
*/
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(4, 4, 4)
#ifdef CONFIG_ARC_DW2_UNWIND
. = ALIGN(PAGE_SIZE);
.debug_frame : {
__start_unwind = .;
*(.debug_frame)
__end_unwind = .;
}
/*
* gcc 4.8 generates this for -fasynchonous-unwind-tables,
* while we still use the .debug_frame based unwinder
*/
/DISCARD/ : { *(.eh_frame) }
#else
/DISCARD/ : { *(.debug_frame) }
#endif
NOTES
. = ALIGN(PAGE_SIZE);
_end = . ;
STABS_DEBUG
DISCARDS
.arcextmap 0 : {
*(.gnu.linkonce.arcextmap.*)
*(.arcextmap.*)
}
#ifndef CONFIG_DEBUG_INFO
/* open-coded because we need .debug_frame seperately for unwinding */
/DISCARD/ : { *(.debug_aranges) }
/DISCARD/ : { *(.debug_pubnames) }
/DISCARD/ : { *(.debug_info) }
/DISCARD/ : { *(.debug_abbrev) }
/DISCARD/ : { *(.debug_line) }
/DISCARD/ : { *(.debug_str) }
/DISCARD/ : { *(.debug_loc) }
/DISCARD/ : { *(.debug_macinfo) }
/DISCARD/ : { *(.debug_ranges) }
#endif
#ifdef CONFIG_ARC_HAS_DCCM
. = CONFIG_ARC_DCCM_BASE;
__arc_dccm_base = .;
.data.arcfp : {
*(.data.arcfp)
}
. = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
#endif
}