Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,46 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/init.h>
#include <linux/serial_8250.h>
#define PORT(base, int) \
{ \
.iobase = base, \
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
static struct plat_serial8250_port uart8250_data[] = {
PORT(0x3F8, 4),
PORT(0x2F8, 3),
PORT(0x3E8, 4),
PORT(0x2E8, 3),
{ },
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static int __init uart8250_init(void)
{
return platform_device_register(&uart8250_device);
}
module_init(uart8250_init);
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250 UART probe driver");

129
arch/mips/kernel/Makefile Normal file
View file

@ -0,0 +1,129 @@
#
# Makefile for the Linux/MIPS kernel.
#
extra-y := head.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
prom.o ptrace.o reset.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o watch.o vdso.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_perf_event.o = -pg
CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_DEBUG_FS) += segment.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_IRQ_GIC) += irq-gic.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
obj-$(CONFIG_CPU_PM) += pm.o
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
#
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
# here because the compiler may use DSP ASE instructions (such as lwx) in
# code paths where we cannot check that the CPU we are running on supports it.
# Proper abstraction using HAVE_AS_DSP and macros is done in
# arch/mips/include/asm/mipsregs.h.
#
ifeq ($(CONFIG_CPU_MIPSR2), y)
CFLAGS_DSP = -DHAVE_AS_DSP
CFLAGS_signal.o = $(CFLAGS_DSP)
CFLAGS_signal32.o = $(CFLAGS_DSP)
CFLAGS_process.o = $(CFLAGS_DSP)
CFLAGS_branch.o = $(CFLAGS_DSP)
CFLAGS_ptrace.o = $(CFLAGS_DSP)
endif
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)

View file

@ -0,0 +1,495 @@
/*
* offset.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/smp-cps.h>
#include <linux/kvm_host.h>
void output_ptreg_defines(void)
{
COMMENT("MIPS pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_LO, pt_regs, lo);
OFFSET(PT_HI, pt_regs, hi);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
OFFSET(PT_ACX, pt_regs, acx);
#endif
OFFSET(PT_EPC, pt_regs, cp0_epc);
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause);
#ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp);
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void)
{
COMMENT("MIPS task_struct offsets.");
OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_CC_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void)
{
COMMENT("MIPS thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
BLANK();
}
void output_thread_defines(void)
{
COMMENT("MIPS specific thread_struct offsets.");
OFFSET(THREAD_REG16, task_struct, thread.reg16);
OFFSET(THREAD_REG17, task_struct, thread.reg17);
OFFSET(THREAD_REG18, task_struct, thread.reg18);
OFFSET(THREAD_REG19, task_struct, thread.reg19);
OFFSET(THREAD_REG20, task_struct, thread.reg20);
OFFSET(THREAD_REG21, task_struct, thread.reg21);
OFFSET(THREAD_REG22, task_struct, thread.reg22);
OFFSET(THREAD_REG23, task_struct, thread.reg23);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_REG30, task_struct, thread.reg30);
OFFSET(THREAD_REG31, task_struct, thread.reg31);
OFFSET(THREAD_STATUS, task_struct,
thread.cp0_status);
OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_BVADDR, task_struct, \
thread.cp0_badvaddr);
OFFSET(THREAD_BUADDR, task_struct, \
thread.cp0_baduaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
BLANK();
}
void output_thread_fpu_defines(void)
{
OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
/* the least significant 64 bits of each FP register */
OFFSET(THREAD_FPR0_LS64, task_struct,
thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR1_LS64, task_struct,
thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR2_LS64, task_struct,
thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR3_LS64, task_struct,
thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR4_LS64, task_struct,
thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR5_LS64, task_struct,
thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR6_LS64, task_struct,
thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR7_LS64, task_struct,
thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR8_LS64, task_struct,
thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR9_LS64, task_struct,
thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR10_LS64, task_struct,
thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR11_LS64, task_struct,
thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR12_LS64, task_struct,
thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR13_LS64, task_struct,
thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR14_LS64, task_struct,
thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR15_LS64, task_struct,
thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR16_LS64, task_struct,
thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR17_LS64, task_struct,
thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR18_LS64, task_struct,
thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR19_LS64, task_struct,
thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR20_LS64, task_struct,
thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR21_LS64, task_struct,
thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR22_LS64, task_struct,
thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR23_LS64, task_struct,
thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR24_LS64, task_struct,
thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR25_LS64, task_struct,
thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR26_LS64, task_struct,
thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR27_LS64, task_struct,
thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR28_LS64, task_struct,
thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR29_LS64, task_struct,
thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR30_LS64, task_struct,
thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FPR31_LS64, task_struct,
thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
}
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_ORDER, PMD_ORDER);
#endif
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(_PAGE_SIZE, PAGE_SIZE);
BLANK();
}
#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
OFFSET(SC_ACX, sigcontext, sc_acx);
OFFSET(SC_MDHI, sigcontext, sc_mdhi);
OFFSET(SC_MDLO, sigcontext, sc_mdlo);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
OFFSET(SC_HI1, sigcontext, sc_hi1);
OFFSET(SC_LO1, sigcontext, sc_lo1);
OFFSET(SC_HI2, sigcontext, sc_hi2);
OFFSET(SC_LO2, sigcontext, sc_lo2);
OFFSET(SC_HI3, sigcontext, sc_hi3);
OFFSET(SC_LO3, sigcontext, sc_lo3);
BLANK();
}
#endif
#ifdef CONFIG_64BIT
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
OFFSET(SC_MDHI, sigcontext, sc_mdhi);
OFFSET(SC_MDLO, sigcontext, sc_mdlo);
OFFSET(SC_PC, sigcontext, sc_pc);
OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
BLANK();
}
#endif
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
{
COMMENT("Linux 32-bit sigcontext offsets.");
OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
BLANK();
}
#endif
void output_signal_defined(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGEMT, SIGEMT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}
#ifdef CONFIG_CPU_CAVIUM_OCTEON
void output_octeon_cop2_state_defines(void)
{
COMMENT("Octeon specific octeon_cop2_state offsets.");
OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
OFFSET(THREAD_CP2, task_struct, thread.cp2);
OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
BLANK();
}
#endif
#ifdef CONFIG_HIBERNATION
void output_pbe_defines(void)
{
COMMENT(" Linux struct pbe offsets. ");
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
BLANK();
}
#endif
#ifdef CONFIG_CPU_PM
void output_pm_defines(void)
{
COMMENT(" PM offsets. ");
#ifdef CONFIG_EVA
OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
#endif
OFFSET(SSS_SP, mips_static_suspend_state, sp);
BLANK();
}
#endif
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
OFFSET(VCPU_RUN, kvm_vcpu, run);
OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
BLANK();
}
#ifdef CONFIG_MIPS_CPS
void output_cps_defines(void)
{
COMMENT(" MIPS CPS offsets. ");
OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
}
#endif

View file

@ -0,0 +1,133 @@
/*
* Support for n32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
((__h->e_flags & EF_MIPS_ABI) != 0)) \
__res = 0; \
\
__res; \
})
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
static __inline__ void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#define ELF_CORE_EFLAGS EF_MIPS_ABI2
MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
#include "../../../fs/binfmt_elf.c"

View file

@ -0,0 +1,152 @@
/*
* Support for o32 Linux/MIPS ELF binaries.
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
#define ELF_CLASS ELFCLASS32
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB;
#else /* __MIPSEL__ */
#define ELF_DATA ELFDATA2LSB;
#endif
/* ELF register definitions */
#define ELF_NGREG 45
#define ELF_NFPREG 33
typedef unsigned int elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* In order to be sure that we don't attempt to execute an O32 binary which
* requires 64 bit FP (FR=1) on a system which does not support it we refuse
* to execute any binary which has bits specified by the following macro set
* in its ELF header flags.
*/
#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
# define __MIPS_O32_FP64_MUST_BE_ZERO 0
#else
# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
__res = 0; \
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
__res = 0; \
\
__res; \
})
#ifdef CONFIG_KVM_GUEST
#define TASK32_SIZE 0x3fff8000UL
#else
#define TASK32_SIZE 0x7fff8000UL
#endif
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define elf_prpsinfo elf_prpsinfo32
struct elf_prpsinfo32
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__kernel_uid_t pr_uid;
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
static inline void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
* Convert jiffies to nanoseconds and separate with
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
u32 rem;
value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
static __inline__ void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
unsigned long jiffies = cputime_to_jiffies(cputime);
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
#include "../../../fs/binfmt_elf.c"

View file

@ -0,0 +1,282 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
*
* Reset/NMI/re-entry vectors for BMIPS processors
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
#include <asm/hazards.h>
#include <asm/bmips.h>
.macro BARRIER
.set mips32
_ssnop
_ssnop
_ssnop
.set mips0
.endm
/***********************************************************************
* Alternate CPU1 startup vector for BMIPS4350
*
* On some systems the bootloader has already started CPU1 and configured
* it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
* triggered by the SW1 interrupt. If that is the case we try to move
* it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
***********************************************************************/
LEAF(bmips_smp_movevec)
la k0, 1f
li k1, CKSEG1
or k0, k1
jr k0
1:
/* clear IV, pending IPIs */
mtc0 zero, CP0_CAUSE
/* re-enable IRQs to wait for SW1 */
li k0, ST0_IE | ST0_BEV | STATUSF_IP1
mtc0 k0, CP0_STATUS
/* set up CPU1 CBR; move BASE to 0xa000_0000 */
li k0, 0xff400000
mtc0 k0, $22, 6
/* set up relocation vector address based on thread ID */
mfc0 k1, $22, 3
srl k1, 16
andi k1, 0x8000
or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
or k0, k1
li k1, 0xa0080000
sw k1, 0(k0)
/* wait here for SW1 interrupt from bmips_boot_secondary() */
wait
la k0, bmips_reset_nmi_vec
li k1, CKSEG1
or k0, k1
jr k0
END(bmips_smp_movevec)
/***********************************************************************
* Reset/NMI vector
* For BMIPS processors that can relocate their exception vectors, this
* entire function gets copied to 0x8000_0000.
***********************************************************************/
NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
.set push
.set noat
.align 4
#ifdef CONFIG_SMP
/* if the NMI bit is clear, assume this is a CPU1 reset instead */
li k1, (1 << 19)
mfc0 k0, CP0_STATUS
and k0, k1
beqz k0, bmips_smp_entry
#if defined(CONFIG_CPU_BMIPS5000)
mfc0 k0, CP0_PRID
li k1, PRID_IMP_BMIPS5000
andi k0, 0xff00
bne k0, k1, 1f
/* if we're not on core 0, this must be the SMP boot signal */
li k1, (3 << 25)
mfc0 k0, $22
and k0, k1
bnez k0, bmips_smp_entry
1:
#endif /* CONFIG_CPU_BMIPS5000 */
#endif /* CONFIG_SMP */
/* nope, it's just a regular NMI */
SAVE_ALL
move a0, sp
/* clear EXL, ERL, BEV so that TLB refills still work */
mfc0 k0, CP0_STATUS
li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
or k0, k1
xor k0, k1
mtc0 k0, CP0_STATUS
BARRIER
/* jump to the NMI handler function */
la k0, nmi_handler
jr k0
RESTORE_ALL
.set arch=r4000
eret
/***********************************************************************
* CPU1 reset vector (used for the initial boot only)
* This is still part of bmips_reset_nmi_vec().
***********************************************************************/
#ifdef CONFIG_SMP
bmips_smp_entry:
/* set up CP0 STATUS; enable FPU */
li k0, 0x30000000
mtc0 k0, CP0_STATUS
BARRIER
/* set local CP0 CONFIG to make kseg0 cacheable, write-back */
mfc0 k0, CP0_CONFIG
ori k0, 0x07
xori k0, 0x04
mtc0 k0, CP0_CONFIG
mfc0 k0, CP0_PRID
andi k0, 0xff00
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
li k1, PRID_IMP_BMIPS43XX
bne k0, k1, 2f
/* initialize CPU1's local I-cache */
li k0, 0x80000000
li k1, 0x80010000
mtc0 zero, $28
mtc0 zero, $28, 1
BARRIER
1: cache Index_Store_Tag_I, 0(k0)
addiu k0, 16
bne k0, k1, 1b
b 3f
2:
#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
#if defined(CONFIG_CPU_BMIPS5000)
/* set exception vector base */
li k1, PRID_IMP_BMIPS5000
bne k0, k1, 3f
la k0, ebase
lw k0, 0(k0)
mtc0 k0, $15, 1
BARRIER
#endif /* CONFIG_CPU_BMIPS5000 */
3:
/* jump back to kseg0 in case we need to remap the kseg1 area */
la k0, 1f
jr k0
1:
la k0, bmips_enable_xks01
jalr k0
/* use temporary stack to set up upper memory TLB */
li sp, BMIPS_WARM_RESTART_VEC
la k0, plat_wired_tlb_setup
jalr k0
/* switch to permanent stack and continue booting */
.global bmips_secondary_reentry
bmips_secondary_reentry:
la k0, bmips_smp_boot_sp
lw sp, 0(k0)
la k0, bmips_smp_boot_gp
lw gp, 0(k0)
la k0, start_secondary
jr k0
#endif /* CONFIG_SMP */
.align 4
.global bmips_reset_nmi_vec_end
bmips_reset_nmi_vec_end:
END(bmips_reset_nmi_vec)
.set pop
/***********************************************************************
* CPU1 warm restart vector (used for second and subsequent boots).
* Also used for S2 standby recovery (PM).
* This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
***********************************************************************/
LEAF(bmips_smp_int_vec)
.align 4
mfc0 k0, CP0_STATUS
ori k0, 0x01
xori k0, 0x01
mtc0 k0, CP0_STATUS
eret
.align 4
.global bmips_smp_int_vec_end
bmips_smp_int_vec_end:
END(bmips_smp_int_vec)
/***********************************************************************
* XKS01 support
* Certain CPUs support extending kseg0 to 1024MB.
***********************************************************************/
LEAF(bmips_enable_xks01)
#if defined(CONFIG_XKS01)
mfc0 t0, CP0_PRID
andi t2, t0, 0xff00
#if defined(CONFIG_CPU_BMIPS4380)
li t1, PRID_IMP_BMIPS43XX
bne t2, t1, 1f
andi t0, 0xff
addiu t1, t0, -PRID_REV_BMIPS4380_HI
bgtz t1, 2f
addiu t0, -PRID_REV_BMIPS4380_LO
bltz t0, 2f
mfc0 t0, $22, 3
li t1, 0x1ff0
li t2, (1 << 12) | (1 << 9)
or t0, t1
xor t0, t1
or t0, t2
mtc0 t0, $22, 3
BARRIER
b 2f
1:
#endif /* CONFIG_CPU_BMIPS4380 */
#if defined(CONFIG_CPU_BMIPS5000)
li t1, PRID_IMP_BMIPS5000
bne t2, t1, 2f
mfc0 t0, $22, 5
li t1, 0x01ff
li t2, (1 << 8) | (1 << 5)
or t0, t1
xor t0, t1
or t0, t2
mtc0 t0, $22, 5
BARRIER
#endif /* CONFIG_CPU_BMIPS5000 */
2:
#endif /* defined(CONFIG_XKS01) */
jr ra
END(bmips_enable_xks01)

666
arch/mips/kernel/branch.c Normal file
View file

@ -0,0 +1,666 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
* Calculate and return exception PC in case of branch delay slot
* for microMIPS and MIPS16e. It does not clear the ISA mode bit.
*/
int __isa_exception_epc(struct pt_regs *regs)
{
unsigned short inst;
long epc = regs->cp0_epc;
/* Calculate exception PC in branch delay slot. */
if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
/* This should never happen because delay slot was checked. */
force_sig(SIGSEGV, current);
return epc;
}
if (cpu_has_mips16) {
if (((union mips16e_instruction)inst).ri.opcode
== MIPS16e_jal_op)
epc += 4;
else
epc += 2;
} else if (mm_insn_16bit(inst))
epc += 2;
else
epc += 4;
return epc;
}
/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
int bc_false = 0;
unsigned int fcr31;
unsigned int bit;
if (!cpu_has_mmips)
return 0;
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
mm_pool32axf_op) {
switch (insn.mm_i_format.simmediate >>
MM_POOL32A_MINOR_SHIFT) {
case mm_jalr_op:
case mm_jalrhb_op:
case mm_jalrs_op:
case mm_jalrshb_op:
if (insn.mm_i_format.rt != 0) /* Not mm_jr */
regs->regs[insn.mm_i_format.rt] =
regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
}
break;
case mm_pool32i_op:
switch (insn.mm_i_format.rt) {
case mm_bltzals_op:
case mm_bltzal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bltz_op:
if ((long)regs->regs[insn.mm_i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgezals_op:
case mm_bgezal_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
/* Fall through */
case mm_bgez_op:
if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_blez_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bgtz_op:
if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bc2f_op:
case mm_bc1f_op:
bc_false = 1;
/* Fall through */
case mm_bc2t_op:
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
if (bc_false)
fcr31 = ~fcr31;
bit = (insn.mm_i_format.rs >> 2);
bit += (bit != 0);
bit += 23;
if (fcr31 & (1 << bit))
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
}
break;
case mm_pool16c_op:
switch (insn.mm_i_format.rt) {
case mm_jalr16_op:
case mm_jalrs16_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_jr16_op:
*contpc = regs->regs[insn.mm_i_format.rs];
return 1;
}
break;
case mm_beqz16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_bnez16_op:
if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_b1_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_b16_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc +
(insn.mm_b0_format.simmediate << 1);
return 1;
case mm_beq32_op:
if (regs->regs[insn.mm_i_format.rs] ==
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
case mm_bne32_op:
if (regs->regs[insn.mm_i_format.rs] !=
regs->regs[insn.mm_i_format.rt])
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
(insn.mm_i_format.simmediate << 1);
else
*contpc = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
case mm_jalx32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 28;
*contpc <<= 28;
*contpc |= (insn.j_format.target << 2);
return 1;
case mm_jals32_op:
case mm_jal32_op:
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc + dec_insn.next_pc_inc;
/* Fall through */
case mm_j32_op:
*contpc = regs->cp0_epc + dec_insn.pc_inc;
*contpc >>= 27;
*contpc <<= 27;
*contpc |= (insn.j_format.target << 1);
set_isa16_mode(*contpc);
return 1;
}
return 0;
}
/*
* Compute return address and emulate branch in microMIPS mode after an
* exception only. It does not handle compact branches/jumps and cannot
* be used in interrupt context. (Compact branches/jumps do not cause
* exceptions.)
*/
int __microMIPS_compute_return_epc(struct pt_regs *regs)
{
u16 __user *pc16;
u16 halfword;
unsigned int word;
unsigned long contpc;
struct mm_decoded_insn mminsn = { 0 };
mminsn.micro_mips_mode = 1;
/* This load never faults. */
pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 2;
word = ((unsigned int)halfword << 16);
mminsn.pc_inc = 2;
if (!mm_insn_16bit(halfword)) {
__get_user(halfword, pc16);
pc16++;
contpc = regs->cp0_epc + 4;
mminsn.pc_inc = 4;
word |= halfword;
}
mminsn.insn = word;
if (get_user(halfword, pc16))
goto sigsegv;
mminsn.next_pc_inc = 2;
word = ((unsigned int)halfword << 16);
if (!mm_insn_16bit(halfword)) {
pc16++;
if (get_user(halfword, pc16))
goto sigsegv;
mminsn.next_pc_inc = 4;
word |= halfword;
}
mminsn.next_insn = word;
mm_isBranchInstr(regs, mminsn, &contpc);
regs->cp0_epc = contpc;
return 0;
sigsegv:
force_sig(SIGSEGV, current);
return -EFAULT;
}
/*
* Compute return address and emulate branch in MIPS16e mode after an
* exception only. It does not handle compact branches/jumps and cannot
* be used in interrupt context. (Compact branches/jumps do not cause
* exceptions.)
*/
int __MIPS16e_compute_return_epc(struct pt_regs *regs)
{
u16 __user *addr;
union mips16e_instruction inst;
u16 inst2;
u32 fullinst;
long epc;
epc = regs->cp0_epc;
/* Read the instruction. */
addr = (u16 __user *)msk_isa16_mode(epc);
if (__get_user(inst.full, addr)) {
force_sig(SIGSEGV, current);
return -EFAULT;
}
switch (inst.ri.opcode) {
case MIPS16e_extend_op:
regs->cp0_epc += 4;
return 0;
/*
* JAL and JALX in MIPS16e mode
*/
case MIPS16e_jal_op:
addr += 1;
if (__get_user(inst2, addr)) {
force_sig(SIGSEGV, current);
return -EFAULT;
}
fullinst = ((unsigned)inst.full << 16) | inst2;
regs->regs[31] = epc + 6;
epc += 4;
epc >>= 28;
epc <<= 28;
/*
* JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
*
* ......TARGET[15:0].................TARGET[20:16]...........
* ......TARGET[25:21]
*/
epc |=
((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
((fullinst & 0x1f0000) << 7);
if (!inst.jal.x)
set_isa16_mode(epc); /* Set ISA mode bit. */
regs->cp0_epc = epc;
return 0;
/*
* J(AL)R(C)
*/
case MIPS16e_rr_op:
if (inst.rr.func == MIPS16e_jr_func) {
if (inst.rr.ra)
regs->cp0_epc = regs->regs[31];
else
regs->cp0_epc =
regs->regs[reg16to32[inst.rr.rx]];
if (inst.rr.l) {
if (inst.rr.nd)
regs->regs[31] = epc + 2;
else
regs->regs[31] = epc + 4;
}
return 0;
}
break;
}
/*
* All other cases have no branch delay slot and are 16-bits.
* Branches do not cause an exception.
*/
regs->cp0_epc += 2;
return 0;
}
/**
* __compute_return_epc_for_insn - Computes the return address and do emulate
* branch simulation, if required.
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
* @returns: -EFAULT on error and forces SIGBUS, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*/
int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn)
{
unsigned int bit, fcr31, dspcontrol;
long epc = regs->cp0_epc;
int ret = 0;
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
*/
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
regs->regs[insn.r_format.rd] = epc + 8;
/* Fall through */
case jr_op:
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
break;
/*
* This group contains:
* bltz_op, bgez_op, bltzl_op, bgezl_op,
* bltzal_op, bgezal_op, bltzall_op, bgezall_op.
*/
case bcond_op:
switch (insn.i_format.rt) {
case bltz_op:
case bltzl_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bltzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
case bgezl_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bgezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bltzall_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgezal_op:
case bgezall_op:
regs->regs[31] = epc + 8;
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bgezall_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
dspcontrol = rddsp(0x01);
if (dspcontrol >= 32) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
} else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
/*
* These are unconditional and in j_format.
*/
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
epc += 4;
epc >>= 28;
epc <<= 28;
epc |= (insn.j_format.target << 2);
regs->cp0_epc = epc;
if (insn.i_format.opcode == jalx_op)
set_isa16_mode(regs->cp0_epc);
break;
/*
* These are conditional and in i_format.
*/
case beq_op:
case beql_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == beql_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bne_op:
case bnel_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == bnel_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case blez_op: /* not really i_format */
case blezl_op:
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == blezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case bgtz_op:
case bgtzl_op:
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.opcode == bgtzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
/*
* And now the FPA/cp1 branch instructions.
*/
case cop1_op:
preempt_disable();
if (is_fpu_owner())
fcr31 = read_32bit_cp1_register(CP1_STATUS);
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
bit = (insn.i_format.rt >> 2);
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
case 0: /* bc1f */
case 2: /* bc1fl */
if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 2)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
case 1: /* bc1t */
case 3: /* bc1tl */
if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 3)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
== 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case ldc2_op: /* This is bbit032 on Octeon */
if ((regs->regs[insn.i_format.rs] &
(1ull<<(insn.i_format.rt+32))) == 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case swc2_op: /* This is bbit1 on Octeon */
if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
case sdc2_op: /* This is bbit132 on Octeon */
if (regs->regs[insn.i_format.rs] &
(1ull<<(insn.i_format.rt+32)))
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
epc += 8;
regs->cp0_epc = epc;
break;
#endif
}
return ret;
sigill:
printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
int __compute_return_epc(struct pt_regs *regs)
{
unsigned int __user *addr;
long epc;
union mips_instruction insn;
epc = regs->cp0_epc;
if (epc & 3)
goto unaligned;
/*
* Read the instruction
*/
addr = (unsigned int __user *) epc;
if (__get_user(insn.word, addr)) {
force_sig(SIGSEGV, current);
return -EFAULT;
}
return __compute_return_epc_for_insn(regs, insn);
unaligned:
printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
}

View file

@ -0,0 +1,154 @@
/*
* Copyright (C) 2000,2001,2004 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250.h>
#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
/*
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
__raw_writeq(0, cfg);
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
cfg);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* Stop the timer until we actually program a shot */
case CLOCK_EVT_MODE_SHUTDOWN:
__raw_writeq(0, cfg);
break;
case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
case CLOCK_EVT_MODE_RESUME:
;
}
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq(delta - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE, cfg);
return 0;
}
static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = dev_id;
void __iomem *cfg;
unsigned long tmode;
if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
/* ACK interrupt */
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
____raw_writeq(tmode, cfg);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void sb1480_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
sprintf(name, "bcm1480-counter-%d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_mode = sibyte_set_mode;
clockevents_register_device(cd);
bcm1480_mask_irq(cpu, irq);
/*
* Map the timer interrupt to IP[4] of this cpu
*/
__raw_writeq(IMR_IP4_VAL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu,
R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
bcm1480_unmask_irq(cpu, irq);
action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
irq_set_affinity(irq, cpumask_of(cpu));
setup_irq(irq, action);
}

View file

@ -0,0 +1,130 @@
/*
* DS1287 clockevent driver
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/irq.h>
#include <asm/time.h>
int ds1287_timer_state(void)
{
return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
}
int ds1287_set_base_clock(unsigned int hz)
{
u8 rate;
switch (hz) {
case 128:
rate = 0x9;
break;
case 256:
rate = 0x8;
break;
case 1024:
rate = 0x6;
break;
default:
return -EINVAL;
}
CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
return 0;
}
static int ds1287_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return -EINVAL;
}
static void ds1287_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
u8 val;
spin_lock(&rtc_lock);
val = CMOS_READ(RTC_REG_B);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
val |= RTC_PIE;
break;
default:
val &= ~RTC_PIE;
break;
}
CMOS_WRITE(val, RTC_REG_B);
spin_unlock(&rtc_lock);
}
static void ds1287_event_handler(struct clock_event_device *dev)
{
}
static struct clock_event_device ds1287_clockevent = {
.name = "ds1287",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = ds1287_set_next_event,
.set_mode = ds1287_set_mode,
.event_handler = ds1287_event_handler,
};
static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = &ds1287_clockevent;
/* Ack the RTC interrupt. */
CMOS_READ(RTC_REG_C);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction ds1287_irqaction = {
.handler = ds1287_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "ds1287",
};
int __init ds1287_clockevent_init(int irq)
{
struct clock_event_device *cd;
cd = &ds1287_clockevent;
cd->rating = 100;
cd->irq = irq;
clockevent_set_clock(cd, 32768);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->cpumask = cpumask_of(0);
clockevents_register_device(&ds1287_clockevent);
return setup_irq(irq, &ds1287_irqaction);
}

105
arch/mips/kernel/cevt-gic.c Normal file
View file

@ -0,0 +1,105 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/gic.h>
#include <asm/mips-boards/maltaint.h>
DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
int gic_timer_irq_installed;
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
u64 cnt;
int res;
cnt = gic_read_count();
cnt += (u64)delta;
gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
void gic_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Nothing to do ... */
}
irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd;
int cpu = smp_processor_id();
gic_write_compare(gic_read_compare());
cd = &per_cpu(gic_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
struct irqaction gic_compare_irqaction = {
.handler = gic_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
void gic_event_handler(struct clock_event_device *dev)
{
}
int gic_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
if (!cpu_has_counter || !gic_frequency)
return -ENXIO;
irq = MIPS_GIC_IRQ_BASE;
cd = &per_cpu(gic_clockevent_device, cpu);
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clockevent_set_clock(cd, gic_frequency);
/* Calculate the min / max delta */
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = gic_next_event;
cd->set_mode = gic_set_clock_mode;
cd->event_handler = gic_event_handler;
clockevents_register_device(cd);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
if (gic_timer_irq_installed)
return 0;
gic_timer_irq_installed = 1;
setup_irq(irq, &gic_compare_irqaction);
irq_set_handler(irq, handle_percpu_irq);
return 0;
}

View file

@ -0,0 +1,141 @@
/*
* GT641xx clockevent routines.
*
* Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <asm/gt64120.h>
#include <asm/time.h>
static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
static unsigned int gt641xx_base_clock;
void gt641xx_set_base_clock(unsigned int clock)
{
gt641xx_base_clock = clock;
}
int gt641xx_timer0_state(void)
{
if (GT_READ(GT_TC0_OFS))
return 0;
GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
return 1;
}
static int gt641xx_timer0_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(&gt641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
ctrl |= GT_TC_CONTROL_ENTC0_MSK;
GT_WRITE(GT_TC0_OFS, delta);
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(&gt641xx_timer_lock);
return 0;
}
static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
u32 ctrl;
raw_spin_lock(&gt641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
break;
case CLOCK_EVT_MODE_ONESHOT:
ctrl |= GT_TC_CONTROL_ENTC0_MSK;
break;
default:
break;
}
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
raw_spin_unlock(&gt641xx_timer_lock);
}
static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
{
}
static struct clock_event_device gt641xx_timer0_clockevent = {
.name = "gt641xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.irq = GT641XX_TIMER0_IRQ,
.set_next_event = gt641xx_timer0_set_next_event,
.set_mode = gt641xx_timer0_set_mode,
.event_handler = gt641xx_timer0_event_handler,
};
static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = &gt641xx_timer0_clockevent;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction gt641xx_timer0_irqaction = {
.handler = gt641xx_timer0_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "gt641xx_timer0",
};
static int __init gt641xx_timer0_clockevent_init(void)
{
struct clock_event_device *cd;
if (!gt641xx_base_clock)
return 0;
GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
cd = &gt641xx_timer0_clockevent;
cd->rating = 200 + gt641xx_base_clock / 10000000;
clockevent_set_clock(cd, gt641xx_base_clock);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->cpumask = cpumask_of(0);
clockevents_register_device(&gt641xx_timer0_clockevent);
return setup_irq(GT641XX_TIMER0_IRQ, &gt641xx_timer0_irqaction);
}
arch_initcall(gt641xx_timer0_clockevent_init);

210
arch/mips/kernel/cevt-r4k.c Normal file
View file

@ -0,0 +1,210 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/gic.h>
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned int cnt;
int res;
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Nothing to do ... */
}
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd;
int cpu = smp_processor_id();
/*
* Suckage alert:
* Before R2 of the architecture there was no way to see if a
* performance counter interrupt was pending, so we have to run
* the performance counter interrupt handler anyway.
*/
if (handle_perf_irq(r2))
goto out;
/*
* The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 30))) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
out:
return IRQ_HANDLED;
}
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
void mips_event_handler(struct clock_event_device *dev)
{
}
/*
* FIXME: This doesn't hold for the relocated E9000 compare interrupt.
*/
static int c0_compare_int_pending(void)
{
#ifdef CONFIG_IRQ_GIC
if (cpu_has_veic)
return gic_get_timer_pending();
#endif
return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
}
/*
* Compare interrupt can be routed and latched outside the core,
* so wait up to worst case number of cycle counter ticks for timer interrupt
* changes to propagate to the cause register.
*/
#define COMPARE_INT_SEEN_TICKS 50
int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
#ifdef CONFIG_KVM_GUEST
return 1;
#endif
/*
* IP7 already pending? Try to clear it by acking the timer.
*/
if (c0_compare_int_pending()) {
cnt = read_c0_count();
write_c0_compare(cnt);
back_to_back_c0_hazard();
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (!c0_compare_int_pending())
break;
if (c0_compare_int_pending())
return 0;
}
for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
back_to_back_c0_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
}
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (c0_compare_int_pending())
break;
if (!c0_compare_int_pending())
return 0;
cnt = read_c0_count();
write_c0_compare(cnt);
back_to_back_c0_hazard();
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (!c0_compare_int_pending())
break;
if (c0_compare_int_pending())
return 0;
/*
* Feels like a real count / compare timer.
*/
return 1;
}
int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (!c0_compare_int_usable())
return -ENXIO;
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
clockevent_set_clock(cd, mips_hpt_frequency);
/* Calculate the min / max delta */
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}

View file

@ -0,0 +1,153 @@
/*
* Copyright (C) 2000, 2001 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#define IMR_IP2_VAL K_INT_MAP_I0
#define IMR_IP3_VAL K_INT_MAP_I1
#define IMR_IP4_VAL K_INT_MAP_I2
/*
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
__raw_writeq(0, cfg);
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
cfg);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* Stop the timer until we actually program a shot */
case CLOCK_EVT_MODE_SHUTDOWN:
__raw_writeq(0, cfg);
break;
case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
case CLOCK_EVT_MODE_RESUME:
;
}
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq(delta - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE, cfg);
return 0;
}
static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = dev_id;
void __iomem *cfg;
unsigned long tmode;
if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
/* ACK interrupt */
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
____raw_writeq(tmode, cfg);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void sb1250_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu;
struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
/* Only have 4 general purpose timers, and we use last one as hpt */
BUG_ON(cpu > 2);
sprintf(name, "sb1250-counter-%d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_mode = sibyte_set_mode;
clockevents_register_device(cd);
sb1250_mask_irq(cpu, irq);
/*
* Map the timer interrupt to IP[4] of this cpu
*/
__raw_writeq(IMR_IP4_VAL,
IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
(irq << 3)));
sb1250_unmask_irq(cpu, irq);
action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
irq_set_affinity(irq, cpumask_of(cpu));
setup_irq(irq, action);
}

View file

@ -0,0 +1,193 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Based on linux/arch/mips/kernel/cevt-r4k.c,
* linux/arch/mips/jmr3927/rbhma3100/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/txx9tmr.h>
#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
#define TIMER_CCD 0 /* 1/2 */
#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
struct txx9_clocksource {
struct clocksource cs;
struct txx9_tmr_reg __iomem *tmrptr;
};
static cycle_t txx9_cs_read(struct clocksource *cs)
{
struct txx9_clocksource *txx9_cs =
container_of(cs, struct txx9_clocksource, cs);
return __raw_readl(&txx9_cs->tmrptr->trr);
}
/* Use 1 bit smaller width to use full bits in that width */
#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
static struct txx9_clocksource txx9_clocksource = {
.cs = {
.name = "TXx9",
.rating = 200,
.read = txx9_cs_read,
.mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
};
void __init txx9_clocksource_init(unsigned long baseaddr,
unsigned int imbusclk)
{
struct txx9_tmr_reg __iomem *tmrptr;
clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
__raw_writel(TCR_BASE, &tmrptr->tcr);
__raw_writel(0, &tmrptr->tisr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
__raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
txx9_clocksource.tmrptr = tmrptr;
}
struct txx9_clock_event_device {
struct clock_event_device cd;
struct txx9_tmr_reg __iomem *tmrptr;
};
static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
{
/* stop and reset counter */
__raw_writel(TCR_BASE, &tmrptr->tcr);
/* clear pending interrupt */
__raw_writel(0, &tmrptr->tisr);
}
static void txx9tmr_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
&tmrptr->itmr);
/* start timer */
__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
evt->shift,
&tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
__raw_writel(0, &tmrptr->itmr);
break;
case CLOCK_EVT_MODE_ONESHOT:
__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
break;
case CLOCK_EVT_MODE_RESUME:
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->itmr);
break;
}
}
static int txx9tmr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
struct txx9_clock_event_device *txx9_cd =
container_of(evt, struct txx9_clock_event_device, cd);
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
/* start timer */
__raw_writel(delta, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
return 0;
}
static struct txx9_clock_event_device txx9_clock_event_device = {
.cd = {
.name = "TXx9",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_mode = txx9tmr_set_mode,
.set_next_event = txx9tmr_set_next_event,
},
};
static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
{
struct txx9_clock_event_device *txx9_cd = dev_id;
struct clock_event_device *cd = &txx9_cd->cd;
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
__raw_writel(0, &tmrptr->tisr); /* ack interrupt */
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction txx9tmr_irq = {
.handler = txx9tmr_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER,
.name = "txx9tmr",
.dev_id = &txx9_clock_event_device,
};
void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
unsigned int imbusclk)
{
struct clock_event_device *cd = &txx9_clock_event_device.cd;
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->itmr);
txx9_clock_event_device.tmrptr = tmrptr;
clockevent_set_clock(cd, TIMER_CLK(imbusclk));
cd->max_delta_ns =
clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->irq = irq;
cd->cpumask = cpumask_of(0),
clockevents_register_device(cd);
setup_irq(irq, &txx9tmr_irq);
printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
baseaddr, irq);
}
void __init txx9_tmr_init(unsigned long baseaddr)
{
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
/* Start once to make CounterResetEnable effective */
__raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
/* Stop and reset the counter */
__raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
__raw_writel(0, &tmrptr->tisr);
__raw_writel(0xffffffff, &tmrptr->cpra);
__raw_writel(0, &tmrptr->itmr);
__raw_writel(0, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->pgmr);
iounmap(tmrptr);
}

493
arch/mips/kernel/cps-vec.S Normal file
View file

@ -0,0 +1,493 @@
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/eva.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
.extern mips_cm_base
.set noreorder
/*
* Set dest to non-zero if the core supports the MT ASE, else zero. If
* MT is not supported then branch to nomt.
*/
.macro has_mt dest, nomt
mfc0 \dest, CP0_CONFIG
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 1
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 2
bgez \dest, \nomt
mfc0 \dest, CP0_CONFIG, 3
andi \dest, \dest, MIPS_CONF3_MT
beqz \dest, \nomt
.endm
.section .text.cps-vec
.balign 0x1000
LEAF(mips_cps_core_entry)
/*
* These first 12 bytes will be patched by cps_smp_setup to load the
* base address of the CM GCRs into register v1 and the CCA to use into
* register s0.
*/
.quad 0
.word 0
/* Check whether we're here due to an NMI */
mfc0 k0, CP0_STATUS
and k0, k0, ST0_NMI
beqz k0, not_nmi
nop
/* This is an NMI */
la k0, nmi_handler
jr k0
nop
not_nmi:
/* Setup Cause */
li t0, CAUSEF_IV
mtc0 t0, CP0_CAUSE
/* Setup Status */
li t0, ST0_CU1 | ST0_CU0
mtc0 t0, CP0_STATUS
/*
* Clear the bits used to index the caches. Note that the architecture
* dictates that writing to any of TagLo or TagHi selects 0 or 2 should
* be valid for all MIPS32 CPUs, even those for which said writes are
* unnecessary.
*/
mtc0 zero, CP0_TAGLO, 0
mtc0 zero, CP0_TAGHI, 0
mtc0 zero, CP0_TAGLO, 2
mtc0 zero, CP0_TAGHI, 2
ehb
/* Primary cache configuration is indicated by Config1 */
mfc0 v0, CP0_CONFIG, 1
/* Detect I-cache line size */
_EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
beqz t0, icache_done
li t1, 2
sllv t0, t1, t0
/* Detect I-cache size */
_EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
xori t2, t1, 0x7
beqz t2, 1f
li t3, 32
addiu t1, t1, 1
sllv t1, t3, t1
1: /* At this point t1 == I-cache sets per way */
_EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
addiu t2, t2, 1
mul t1, t1, t0
mul t1, t1, t2
li a0, KSEG0
add a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0)
add a0, a0, t0
bne a0, a1, 1b
nop
icache_done:
/* Detect D-cache line size */
_EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
beqz t0, dcache_done
li t1, 2
sllv t0, t1, t0
/* Detect D-cache size */
_EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
xori t2, t1, 0x7
beqz t2, 1f
li t3, 32
addiu t1, t1, 1
sllv t1, t3, t1
1: /* At this point t1 == D-cache sets per way */
_EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
addiu t2, t2, 1
mul t1, t1, t0
mul t1, t1, t2
li a0, KSEG0
addu a1, a0, t1
subu a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b
add a0, a0, t0
dcache_done:
/* Set Kseg0 CCA to that in s0 */
mfc0 t0, CP0_CONFIG
ori t0, 0x7
xori t0, 0x7
or t0, t0, s0
mtc0 t0, CP0_CONFIG
ehb
/* Enter the coherent domain */
li t0, 0xff
sw t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
la t0, 1f
jr t0
nop
/*
* We're up, cached & coherent. Perform any further required core-level
* initialisation.
*/
1: jal mips_cps_core_init
nop
/* Do any EVA initialization if necessary */
eva_init
/*
* Boot any other VPEs within this core that should be online, and
* deactivate this VPE if it should be offline.
*/
jal mips_cps_boot_vpes
nop
/* Off we go! */
lw t1, VPEBOOTCFG_PC(v0)
lw gp, VPEBOOTCFG_GP(v0)
lw sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
.org 0x200
LEAF(excep_tlbfill)
b .
nop
END(excep_tlbfill)
.org 0x280
LEAF(excep_xtlbfill)
b .
nop
END(excep_xtlbfill)
.org 0x300
LEAF(excep_cache)
b .
nop
END(excep_cache)
.org 0x380
LEAF(excep_genex)
b .
nop
END(excep_genex)
.org 0x400
LEAF(excep_intex)
b .
nop
END(excep_intex)
.org 0x480
LEAF(excep_ejtag)
la k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
LEAF(mips_cps_core_init)
#ifdef CONFIG_MIPS_MT
/* Check that the core implements the MT ASE */
has_mt t0, 3f
nop
.set push
.set mips32r2
.set mt
/* Only allow 1 TC per VPE to execute... */
dmt
/* ...and for the moment only 1 VPE */
dvpe
la t1, 1f
jr.hb t1
nop
/* Enter VPE configuration state */
1: mfc0 t0, CP0_MVPCONTROL
ori t0, t0, MVPCONTROL_VPC
mtc0 t0, CP0_MVPCONTROL
/* Retrieve the number of VPEs within the core */
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
addiu t7, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
li t5, 1
1: /* Operate on the appropriate TC */
mtc0 t5, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
mttc0 t5, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
sll t1, t5, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
/* Set TC non-active, non-allocatable */
mttc0 zero, CP0_TCSTATUS
/* Set TC halted */
li t0, TCHALT_H
mttc0 t0, CP0_TCHALT
/* Next VPE */
addiu t5, t5, 1
slt t0, t5, t7
bnez t0, 1b
nop
/* Leave VPE configuration state */
2: mfc0 t0, CP0_MVPCONTROL
xori t0, t0, MVPCONTROL_VPC
mtc0 t0, CP0_MVPCONTROL
3: .set pop
#endif
jr ra
nop
END(mips_cps_core_init)
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
la t0, mips_cm_base
lw t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
lw t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
la t1, mips_cps_core_bootcfg
lw t1, 0(t1)
addu t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
has_mt t6, 1f
li t9, 0
/* Find the number of VPEs present in the core */
mfc0 t1, CP0_MVPCONF0
srl t1, t1, MVPCONF0_PVPE_SHIFT
andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
addiu t1, t1, 1
/* Calculate a mask for the VPE ID from EBase.CPUNum */
clz t1, t1
li t2, 31
subu t1, t2, t1
li t2, 1
sll t1, t2, t1
addiu t1, t1, -1
/* Retrieve the VPE ID from EBase.CPUNum */
mfc0 t9, $15, 1
and t9, t9, t1
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
lw t7, COREBOOTCFG_VPECONFIG(t0)
addu v0, v0, t7
#ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */
bnez t6, 1f
nop
jr ra
nop
.set push
.set mips32r2
.set mt
1: /* Enter VPE configuration state */
dvpe
la t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
ori t1, t1, MVPCONTROL_VPC
mtc0 t1, CP0_MVPCONTROL
ehb
/* Loop through each VPE */
lw t6, COREBOOTCFG_VPEMASK(t0)
move t8, t6
li t5, 0
/* Check whether the VPE should be running. If not, skip it */
1: andi t0, t6, 1
beqz t0, 2f
nop
/* Operate on the appropriate TC */
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
or t0, t0, t5
mtc0 t0, CP0_VPECONTROL
ehb
/* Skip the VPE if its TC is not halted */
mftc0 t0, CP0_TCHALT
beqz t0, 2f
nop
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
mul t0, t0, t5
addu t0, t0, t7
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
mttc0 t1, CP0_TCRESTART
/* Set the TC stack pointer */
lw t1, VPEBOOTCFG_SP(t0)
mttgpr t1, sp
/* Set the TC global pointer */
lw t1, VPEBOOTCFG_GP(t0)
mttgpr t1, gp
/* Copy config from this VPE */
mfc0 t0, CP0_CONFIG
mttc0 t0, CP0_CONFIG
/* Ensure no software interrupts are pending */
mttc0 zero, CP0_CAUSE
mttc0 zero, CP0_STATUS
/* Set TC active, not interrupt exempt */
mftc0 t0, CP0_TCSTATUS
li t1, ~TCSTATUS_IXMT
and t0, t0, t1
ori t0, t0, TCSTATUS_A
mttc0 t0, CP0_TCSTATUS
/* Clear the TC halt bit */
mttc0 zero, CP0_TCHALT
/* Set VPE active */
mftc0 t0, CP0_VPECONF0
ori t0, t0, VPECONF0_VPA
mttc0 t0, CP0_VPECONF0
/* Next VPE */
2: srl t6, t6, 1
addiu t5, t5, 1
bnez t6, 1b
nop
/* Leave VPE configuration state */
mfc0 t1, CP0_MVPCONTROL
xori t1, t1, MVPCONTROL_VPC
mtc0 t1, CP0_MVPCONTROL
ehb
evpe
/* Check whether this VPE is meant to be running */
li t0, 1
sll t0, t0, t9
and t0, t0, t8
bnez t0, 2f
nop
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
la t0, 1f
1: jr.hb t0
nop
2: .set pop
#endif /* CONFIG_MIPS_MT */
/* Return */
jr ra
nop
END(mips_cps_boot_vpes)
#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
.macro psstate dest
.set push
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
la \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
la \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
LEAF(mips_cps_pm_save)
/* Save CPU state */
SUSPEND_SAVE_REGS
psstate t1
SUSPEND_SAVE_STATIC
jr v0
nop
END(mips_cps_pm_save)
LEAF(mips_cps_pm_restore)
/* Restore CPU state */
psstate t1
RESUME_RESTORE_STATIC
RESUME_RESTORE_REGS_RETURN
END(mips_cps_pm_restore)
#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */

View file

@ -0,0 +1,324 @@
/*
* Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/stddef.h>
#include <asm/bugs.h>
#include <asm/compiler.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/setup.h>
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
static char nowar[] __initdata =
"Please report to <linux-mips@linux-mips.org>.";
static char r4kwar[] __initdata =
"Enable CPU_R4000_WORKAROUNDS to rectify.";
static char daddiwar[] __initdata =
"Enable CPU_DADDI_WORKAROUNDS to rectify.";
static inline void align_mod(const int align, const int mod)
{
asm volatile(
".set push\n\t"
".set noreorder\n\t"
".balign %0\n\t"
".rept %1\n\t"
"nop\n\t"
".endr\n\t"
".set pop"
:
: GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
const int align, const int mod)
{
unsigned long flags;
int m1, m2;
long p, s, lv1, lv2, lw;
/*
* We want the multiply and the shift to be isolated from the
* rest of the code to disable gcc optimizations. Hence the
* asm statements that execute nothing, but make gcc not know
* what the values of m1, m2 and s are and what lv2 and p are
* used for.
*/
local_irq_save(flags);
/*
* The following code leads to a wrong result of the first
* dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
* 00000422 or 00000430, respectively).
*
* See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
* 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
* details. I got no permission to duplicate them here,
* sigh... --macro
*/
asm volatile(
""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (5), "1" (8), "2" (5));
align_mod(align, mod);
/*
* The trailing nop is needed to fulfill the two-instruction
* requirement between reading hi/lo and staring a mult/div.
* Leaving it out may cause gas insert a nop itself breaking
* the desired alignment of the next chunk.
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
"dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
"dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
: "r" (m1), "r" (m2), "r" (s), "I" (0)
: "hi", "lo", GCC_REG_ACCUM);
/* We have to use single integers for m1 and m2 and a double
* one for p to be sure the mulsidi3 gcc's RTL multiplication
* instruction has the workaround applied. Older versions of
* gcc have correct umulsi3 and mulsi3, but other
* multiplication variants lack the workaround.
*/
asm volatile(
""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (m1), "1" (m2), "2" (s));
align_mod(align, mod);
p = m1 * m2;
lv2 = s << 32;
asm volatile(
""
: "=r" (lv2)
: "0" (lv2), "r" (p));
local_irq_restore(flags);
*v1 = lv1;
*v2 = lv2;
*w = lw;
}
static inline void check_mult_sh(void)
{
long v1[8], v2[8], w[8];
int bug, fix, i;
printk("Checking for the multiply/shift bug... ");
/*
* Testing discovered false negatives for certain code offsets
* into cache lines. Hence we test all possible offsets for
* the worst assumption of an R4000 I-cache line width of 32
* bytes.
*
* We can't use a loop as alignment directives need to be
* immediates.
*/
mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
bug = 0;
for (i = 0; i < 8; i++)
if (v1[i] != w[i])
bug = 1;
if (bug == 0) {
printk("no.\n");
return;
}
printk("yes, workaround... ");
fix = 1;
for (i = 0; i < 8; i++)
if (v2[i] != w[i])
fix = 0;
if (fix == 1) {
printk("yes.\n");
return;
}
printk("no.\n");
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
daddi_ov = 1;
regs->cp0_epc += 4;
exception_exit(prev_state);
}
static inline void check_daddi(void)
{
extern asmlinkage void handle_daddi_ov(void);
unsigned long flags;
void *handler;
long v, tmp;
printk("Checking for the daddi bug... ");
local_irq_save(flags);
handler = set_except_vector(12, handle_daddi_ov);
/*
* The following code fails to trigger an overflow exception
* when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
* 00000430, respectively).
*
* See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
* 3.0" by MIPS Technologies, Inc., erratum #23 for details.
* I got no permission to duplicate it here, sigh... --macro
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"addiu %1, $0, %2\n\t"
"dsrl %1, %1, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
"daddi %0, %1, %3\n\t"
".set pop"
: "=r" (v), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
if (daddi_ov) {
printk("no.\n");
return;
}
printk("yes, workaround... ");
local_irq_save(flags);
handler = set_except_vector(12, handle_daddi_ov);
asm volatile(
"addiu %1, $0, %2\n\t"
"dsrl %1, %1, 1\n\t"
"daddi %0, %1, %3"
: "=r" (v), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
set_except_vector(12, handler);
local_irq_restore(flags);
if (daddi_ov) {
printk("yes.\n");
return;
}
printk("no.\n");
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
int daddiu_bug = -1;
static inline void check_daddiu(void)
{
long v, w, tmp;
printk("Checking for the daddiu bug... ");
/*
* The following code leads to a wrong result of daddiu when
* executed on R4400 rev. 1.0 (PRId 00000440).
*
* See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
* MIPS Technologies, Inc., erratum #7 for details.
*
* According to "MIPS R4000PC/SC Errata, Processor Revision
* 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
* problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
* 00000430, respectively), too. Testing failed to trigger it
* so far.
*
* I got no permission to duplicate the errata here, sigh...
* --macro
*/
asm volatile(
".set push\n\t"
".set noat\n\t"
".set noreorder\n\t"
".set nomacro\n\t"
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
"daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
daddiu_bug = v != w;
if (!daddiu_bug) {
printk("no.\n");
return;
}
printk("yes, workaround... ");
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
"daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
if (v == w) {
printk("yes.\n");
return;
}
printk("no.\n");
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
void __init check_bugs64_early(void)
{
check_mult_sh();
check_daddiu();
}
void __init check_bugs64(void)
{
check_daddi();
}

1328
arch/mips/kernel/cpu-probe.c Normal file

File diff suppressed because it is too large Load diff

70
arch/mips/kernel/crash.c Normal file
View file

@ -0,0 +1,70 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/bootmem.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/sched.h>
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void crash_shutdown_secondary(void *ignore)
{
struct pt_regs *regs;
int cpu = smp_processor_id();
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
local_irq_disable();
if (!cpu_isset(cpu, cpus_in_crash))
crash_save_cpu(regs, cpu);
cpu_set(cpu, cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
relocated_kexec_smp_wait(NULL);
/* NOTREACHED */
}
static void crash_kexec_prepare_cpus(void)
{
unsigned int msecs;
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
dump_send_ipi(crash_shutdown_secondary);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
pr_emerg("Sending IPI to other cpus...\n");
msecs = 10000;
while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
}
#else /* !defined(CONFIG_SMP) */
static void crash_kexec_prepare_cpus(void) {}
#endif /* !defined(CONFIG_SMP) */
void default_machine_crash_shutdown(struct pt_regs *regs)
{
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus();
cpu_set(crashing_cpu, cpus_in_crash);
}

View file

@ -0,0 +1,66 @@
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/crash_dump.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
static void *kdump_buf_page;
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel.
*
* Calling copy_to_user() in atomic context is not desirable. Hence first
* copying the data to a pre-allocated kernel page and then copying to user
* space in non-atomic context.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = kmap_atomic_pfn(pfn);
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
kunmap_atomic(vaddr);
} else {
if (!kdump_buf_page) {
pr_warning("Kdump: Kdump buffer page not allocated\n");
return -EFAULT;
}
copy_page(kdump_buf_page, vaddr);
kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT;
}
return csize;
}
static int __init kdump_buf_page_init(void)
{
int ret = 0;
kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!kdump_buf_page) {
pr_warning("Kdump: Failed to allocate kdump buffer page\n");
ret = -ENOMEM;
}
return ret;
}
arch_initcall(kdump_buf_page_init);

View file

@ -0,0 +1,53 @@
/*
* Copyright (C) 2000,2001,2004 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clocksource.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#include <asm/sibyte/bcm1480_scd.h>
#include <asm/sibyte/sb1250.h>
static cycle_t bcm1480_hpt_read(struct clocksource *cs)
{
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}
struct clocksource bcm1480_clocksource = {
.name = "zbbus-cycles",
.rating = 200,
.read = bcm1480_hpt_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
void __init sb1480_clocksource_init(void)
{
struct clocksource *cs = &bcm1480_clocksource;
unsigned int plldiv;
unsigned long zbbus;
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
clocksource_register_hz(cs, zbbus);
}

View file

@ -0,0 +1,40 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/time.h>
#include <asm/gic.h>
static cycle_t gic_hpt_read(struct clocksource *cs)
{
return gic_read_count();
}
static struct clocksource gic_clocksource = {
.name = "GIC",
.read = gic_hpt_read,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
void __init gic_clocksource_init(unsigned int frequency)
{
unsigned int config, bits;
/* Calculate the clocksource mask. */
GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
(GIC_SH_CONFIG_COUNTBITS_SHF - 2));
/* Set clocksource mask. */
gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
/* Calculate a somewhat reasonable rating value. */
gic_clocksource.rating = 200 + frequency / 10000000;
clocksource_register_hz(&gic_clocksource, frequency);
}

View file

@ -0,0 +1,69 @@
/*
* DEC I/O ASIC's counter clocksource
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <asm/ds1287.h>
#include <asm/time.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
{
return ioasic_read(IO_REG_FCTR);
}
static struct clocksource clocksource_dec = {
.name = "dec-ioasic",
.read = dec_ioasic_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int __init dec_ioasic_clocksource_init(void)
{
unsigned int freq;
u32 start, end;
int i = HZ / 8;
ds1287_timer_state();
while (!ds1287_timer_state())
;
start = dec_ioasic_hpt_read(&clocksource_dec);
while (i--)
while (!ds1287_timer_state())
;
end = dec_ioasic_hpt_read(&clocksource_dec);
freq = (end - start) * 8;
/* An early revision of the I/O ASIC didn't have the counter. */
if (!freq)
return -ENXIO;
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
clocksource_dec.rating = 200 + freq / 10000000;
clocksource_register_hz(&clocksource_dec, freq);
return 0;
}

View file

@ -0,0 +1,36 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <asm/time.h>
static cycle_t c0_hpt_read(struct clocksource *cs)
{
return read_c0_count();
}
static struct clocksource clocksource_mips = {
.name = "MIPS",
.read = c0_hpt_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
/* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
return 0;
}

View file

@ -0,0 +1,69 @@
/*
* Copyright (C) 2000, 2001 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clocksource.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#define SB1250_HPT_NUM 3
#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
/*
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
unsigned int count;
count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));
return SB1250_HPT_VALUE - count;
}
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
.rating = 200,
.read = sb1250_hpt_read,
.mask = CLOCKSOURCE_MASK(23),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
void __init sb1250_clocksource_init(void)
{
struct clocksource *cs = &bcm1250_clocksource;
/* Setup hpt using timer #3 but do not enable irq for it */
__raw_writeq(0,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_CFG)));
__raw_writeq(SB1250_HPT_VALUE,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_INIT)));
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_CFG)));
clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
}

View file

@ -0,0 +1,43 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/printk.h>
#include <linux/init.h>
#include <asm/setup.h>
extern void prom_putchar(char);
static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
if (*s == '\n')
prom_putchar('\r');
prom_putchar(*s);
s++;
}
}
static struct console early_console_prom = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
void __init setup_early_printk(void)
{
if (early_console)
return;
early_console = &early_console_prom;
register_console(&early_console_prom);
}

View file

@ -0,0 +1,66 @@
/*
* 8250/16550-type serial ports prom_putchar()
*
* Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
static void __iomem *serial8250_base;
static unsigned int serial8250_reg_shift;
static unsigned int serial8250_tx_timeout;
void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
unsigned int timeout)
{
serial8250_base = (void __iomem *)base;
serial8250_reg_shift = reg_shift;
serial8250_tx_timeout = timeout;
}
static inline u8 serial_in(int offset)
{
return readb(serial8250_base + (offset << serial8250_reg_shift));
}
static inline void serial_out(int offset, char value)
{
writeb(value, serial8250_base + (offset << serial8250_reg_shift));
}
void prom_putchar(char c)
{
unsigned int timeout;
int status, bits;
if (!serial8250_base)
return;
timeout = serial8250_tx_timeout;
bits = UART_LSR_TEMT | UART_LSR_THRE;
do {
status = serial_in(UART_LSR);
if (--timeout == 0)
break;
} while ((status & bits) != bits);
if (timeout)
serial_out(UART_TX, c);
}

175
arch/mips/kernel/entry.S Normal file
View file

@ -0,0 +1,175 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
#else
#define __ret_from_irq ret_from_exception
#endif
.text
.align 5
#ifndef CONFIG_PREEMPT
FEXPORT(ret_from_exception)
local_irq_disable # preempt stop
b __ret_from_irq
#endif
FEXPORT(ret_from_irq)
LONG_S s0, TI_REGS($28)
FEXPORT(__ret_from_irq)
/*
* We can be coming here from a syscall done in the kernel space,
* e.g. a failed kernel_execve().
*/
resume_userspace_check:
LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, resume_kernel
resume_userspace:
local_irq_disable # make sure we dont miss an
# interrupt setting need_resched
# between sampling and return
LONG_L a2, TI_FLAGS($28) # current->work
andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
bnez t0, work_pending
j restore_all
#ifdef CONFIG_PREEMPT
resume_kernel:
local_irq_disable
lw t0, TI_PRE_COUNT($28)
bnez t0, restore_all
need_resched:
LONG_L t0, TI_FLAGS($28)
andi t1, t0, _TIF_NEED_RESCHED
beqz t1, restore_all
LONG_L t0, PT_STATUS(sp) # Interrupts off?
andi t0, 1
beqz t0, restore_all
jal preempt_schedule_irq
b need_resched
#endif
FEXPORT(ret_from_kernel_thread)
jal schedule_tail # a0 = struct task_struct *prev
move a0, s1
jal s0
j syscall_exit
FEXPORT(ret_from_fork)
jal schedule_tail # a0 = struct task_struct *prev
FEXPORT(syscall_exit)
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, TI_FLAGS($28) # current->work
li t0, _TIF_ALLWORK_MASK
and t0, a2, t0
bnez t0, syscall_exit_work
restore_all: # restore full frame
.set noat
RESTORE_TEMP
RESTORE_AT
RESTORE_STATIC
restore_partial: # restore partial frame
#ifdef CONFIG_TRACE_IRQFLAGS
SAVE_STATIC
SAVE_AT
SAVE_TEMP
LONG_L v0, PT_STATUS(sp)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
and v0, ST0_IEP
#else
and v0, ST0_IE
#endif
beqz v0, 1f
jal trace_hardirqs_on
b 2f
1: jal trace_hardirqs_off
2:
RESTORE_TEMP
RESTORE_AT
RESTORE_STATIC
#endif
RESTORE_SOME
RESTORE_SP_AND_RET
.set at
work_pending:
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
jal schedule
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, TI_FLAGS($28)
andi t0, a2, _TIF_WORK_MASK # is there any work to be done
# other than syscall tracing?
beqz t0, restore_all
andi t0, a2, _TIF_NEED_RESCHED
bnez t0, work_resched
work_notifysig: # deal with pending signals and
# notify-resume requests
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
j resume_userspace_check
FEXPORT(syscall_exit_partial)
local_irq_disable # make sure need_resched doesn't
# change between and return
LONG_L a2, TI_FLAGS($28) # current->work
li t0, _TIF_ALLWORK_MASK
and t0, a2
beqz t0, restore_partial
SAVE_STATIC
syscall_exit_work:
LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, resume_kernel
li t0, _TIF_WORK_SYSCALL_EXIT
and t0, a2 # a2 is preloaded with TI_FLAGS
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
move a0, sp
jal syscall_trace_leave
b resume_userspace
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
/*
* MIPS32R2 Instruction Hazard Barrier - must be called
*
* For C code use the inline version named instruction_hazard().
*/
LEAF(mips_ihb)
.set MIPS_ISA_LEVEL_RAW
jr.hb ra
nop
END(mips_ihb)
#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */

440
arch/mips/kernel/ftrace.c Normal file
View file

@ -0,0 +1,440 @@
/*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
* Thanks goes to Steven Rostedt for writing the original x86 version.
*/
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/syscall.h>
#include <asm/uasm.h>
#include <asm/unistd.h>
#include <asm-generic/sections.h>
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* Arch override because MIPS doesn't need to run this from stop_machine() */
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
#endif
/*
* Check if the address is in kernel space
*
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
*/
static inline int in_kernel_space(unsigned long ip)
{
if (ip >= (unsigned long)_stext &&
ip <= (unsigned long)_etext)
return 1;
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
#define INSN_NOP 0x00000000 /* nop */
#define INSN_JAL(addr) \
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
static unsigned int insn_jal_ftrace_caller __read_mostly;
static unsigned int insn_la_mcount[2] __read_mostly;
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
static inline void ftrace_dyn_arch_init_insns(void)
{
u32 *buf;
unsigned int v1;
/* la v1, _mcount */
v1 = 3;
buf = (u32 *)&insn_la_mcount[0];
UASM_i_LA(&buf, v1, MCOUNT_ADDR);
/* jal (ftrace_caller + 8), jump over the first two instruction */
buf = (u32 *)&insn_jal_ftrace_caller;
uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* j ftrace_graph_caller */
buf = (u32 *)&insn_j_ftrace_graph_caller;
uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
#endif
}
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
old_fs = get_fs();
set_fs(get_ds());
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0;
}
#ifndef CONFIG_64BIT
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
mm_segment_t old_fs;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip -= 4;
old_fs = get_fs();
set_fs(get_ds());
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0;
}
static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
mm_segment_t old_fs;
ip += 4;
safe_store_code(new_code2, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip -= 4;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
old_fs = get_fs();
set_fs(get_ds());
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0;
}
#endif
/*
* The details about the calling site of mcount on MIPS
*
* 1. For kernel:
*
* move at, ra
* jal _mcount --> nop
* sub sp, sp, 8 --> nop (CONFIG_32BIT)
*
* 2. For modules:
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* move $12, ra_address
* jalr v1
* sub sp, sp, 8
* 1: offset = 5 instructions
* 2.2 For the Other situations
*
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
* 1: offset = 4 instructions
*/
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
/*
* If ip is in kernel space, no long call, otherwise, long call is
* needed.
*/
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
/*
* On 32 bit MIPS platforms, gcc adds a stack adjust
* instruction in the delay slot after the branch to
* mcount and expects mcount to restore the sp on return.
* This is based on a legacy API and does nothing but
* waste instructions so it's being removed at runtime.
*/
return ftrace_modify_code_2(ip, new, INSN_NOP);
#endif
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int new;
unsigned long ip = rec->ip;
new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
INSN_NOP : insn_la_mcount[1]);
#endif
}
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned int new;
new = INSN_JAL((unsigned long)func);
return ftrace_modify_code(FTRACE_CALL_IP, new);
}
int __init ftrace_dyn_arch_init(void)
{
/* Encode the instructions when booting */
ftrace_dyn_arch_init_insns();
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
insn_j_ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifndef KBUILD_MCOUNT_RA_ADDRESS
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
{
unsigned long sp, ip, tmp;
unsigned int code;
int faulted;
/*
* For module, move the ip from the return address after the
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16)
*/
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
* ra, offset(sp)" instruction
*/
do {
/* get the code at "ip": code = *(unsigned int *)ip; */
safe_load_code(code, ip, faulted);
if (unlikely(faulted))
return 0;
/*
* If we hit the non-store instruction before finding where the
* ra is stored, then this is a leaf function and it does not
* store the ra on the stack
*/
if ((code & S_R_SP) != S_R_SP)
return parent_ra_addr;
/* Move to the next instruction */
ip -= 4;
} while ((code & S_RA_SP) != S_RA_SP);
sp = fp + (code & OFFSET_MASK);
/* tmp = *(unsigned long *)sp; */
safe_load_stack(tmp, sp, faulted);
if (unlikely(faulted))
return 0;
if (tmp == old_parent_ra)
return sp;
return 0;
}
#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
unsigned long old_parent_ra;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
int faulted, insns;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
* "parent_ra_addr" is the stack address saved the return address of
* the caller of _mcount.
*
* if the gcc < 4.5, a leaf function does not save the return address
* in the stack address, so, we "emulate" one in _mcount's stack space,
* and hijack it directly, but for a non-leaf function, it save the
* return address to the its own stack space, we can not hijack it
* directly, but need to find the real stack address,
* ftrace_get_parent_addr() does it!
*
* if gcc>= 4.5, with the new -mmcount-ra-address option, for a
* non-leaf function, the location of the return address will be saved
* to $12 for us, and for a leaf function, only put a zero into $12. we
* do it in ftrace_graph_caller of mcount.S.
*/
/* old_parent_ra = *parent_ra_addr; */
safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
#ifndef KBUILD_MCOUNT_RA_ADDRESS
parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
old_parent_ra, (unsigned long)parent_ra_addr, fp);
/*
* If fails when getting the stack address of the non-leaf function's
* ra, stop function graph tracer and return
*/
if (parent_ra_addr == 0)
goto out;
#endif
/* *parent_ra_addr = return_hooker; */
safe_store_stack(return_hooker, parent_ra_addr, faulted);
if (unlikely(faulted))
goto out;
if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
== -EBUSY) {
*parent_ra_addr = old_parent_ra;
return;
}
/*
* Get the recorded ip of the current mcount calling site in the
* __mcount_loc section, which will be used to filter the function
* entries configured through the tracing/set_graph_function interface.
*/
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent_ra_addr = old_parent_ra;
}
return;
out:
ftrace_graph_stop();
WARN_ON(1);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifdef CONFIG_32BIT
unsigned long __init arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
}
#endif
#ifdef CONFIG_64BIT
unsigned long __init arch_syscall_addr(int nr)
{
#ifdef CONFIG_MIPS32_N32
if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
#endif
if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
return (unsigned long)sys_call_table[nr - __NR_64_Linux];
#ifdef CONFIG_MIPS32_O32
if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
#endif
return (unsigned long) &sys_ni_syscall;
}
#endif
#endif /* CONFIG_FTRACE_SYSCALLS */

540
arch/mips/kernel/genex.S Normal file
View file

@ -0,0 +1,540 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2002, 2007 Maciej W. Rozycki
* Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
#include <asm/thread_info.h>
__INIT
/*
* General exception vector for all other CPUs.
*
* Be careful when changing this, it has to be at most 128 bytes
* to fit into space reserved for the exception handler.
*/
NESTED(except_vec3_generic, 0, sp)
.set push
.set noat
#if R5432_CP0_INTERRUPT_WAR
mfc0 k0, CP0_INDEX
#endif
mfc0 k1, CP0_CAUSE
andi k1, k1, 0x7c
#ifdef CONFIG_64BIT
dsll k1, k1, 1
#endif
PTR_L k0, exception_handlers(k1)
jr k0
.set pop
END(except_vec3_generic)
/*
* General exception handler for CPUs with virtual coherency exception.
*
* Be careful when changing this, it has to be at most 256 (as a special
* exception) bytes to fit into space reserved for the exception handler.
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
.set arch=r4000
.set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
andi k1, k1, 0x7c
.set push
.set noreorder
.set nomacro
beq k1, k0, handle_vced
li k0, 14<<2
beq k1, k0, handle_vcei
#ifdef CONFIG_64BIT
dsll k1, k1, 1
#endif
.set pop
PTR_L k0, exception_handlers(k1)
jr k0
/*
* Big shit, we now may have two dirty primary cache lines for the same
* physical address. We can safely invalidate the line pointed to by
* c0_badvaddr because after return from this exception handler the
* load / store will be re-executed.
*/
handle_vced:
MFC0 k0, CP0_BADVADDR
li k1, -4 # Is this ...
and k0, k1 # ... really needed?
mtc0 zero, CP0_TAGLO
cache Index_Store_Tag_D, (k0)
cache Hit_Writeback_Inv_SD, (k0)
#ifdef CONFIG_PROC_FS
PTR_LA k0, vced_count
lw k1, (k0)
addiu k1, 1
sw k1, (k0)
#endif
eret
handle_vcei:
MFC0 k0, CP0_BADVADDR
cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
#ifdef CONFIG_PROC_FS
PTR_LA k0, vcei_count
lw k1, (k0)
addiu k1, 1
sw k1, (k0)
#endif
eret
.set pop
END(except_vec3_r4000)
__FINIT
.align 5 /* 32 byte rollback region */
LEAF(__r4k_wait)
.set push
.set noreorder
/* start of rollback region */
LONG_L t0, TI_FLAGS($28)
nop
andi t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
#ifdef CONFIG_CPU_MICROMIPS
nop
nop
nop
nop
#endif
.set arch=r4000
wait
/* end of rollback region (the region size must be power of two) */
1:
jr ra
nop
.set pop
END(__r4k_wait)
.macro BUILD_ROLLBACK_PROLOGUE handler
FEXPORT(rollback_\handler)
.set push
.set noat
MFC0 k0, CP0_EPC
PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f
bne k0, k1, 9f
MTC0 k0, CP0_EPC
9:
.set pop
.endm
.align 5
BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Check to see if the interrupted code has just disabled
* interrupts and ignore this interrupt for now if so.
*
* local_irq_disable() disables interrupts and then calls
* trace_hardirqs_off() to track the state. If an interrupt is taken
* after interrupts are disabled but before the state is updated
* it will appear to restore_all that it is incorrectly returning with
* interrupts disabled
*/
.set push
.set noat
mfc0 k0, CP0_STATUS
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
and k0, ST0_IEP
bnez k0, 1f
mfc0 k0, CP0_EPC
.set noreorder
j k0
rfe
#else
and k0, ST0_IE
bnez k0, 1f
eret
#endif
1:
.set pop
#endif
SAVE_ALL
CLI
TRACE_IRQS_OFF
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
PTR_LA v0, plat_irq_dispatch
jr v0
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(handle_int)
__INIT
/*
* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
* This is a dedicated interrupt exception vector which reduces the
* interrupt processing overhead. The jump instruction will be replaced
* at the initialization time.
*
* Be careful when changing this, it has to be at most 128 bytes
* to fit into space reserved for the exception handler.
*/
NESTED(except_vec4, 0, sp)
1: j 1b /* Dummy, will be replaced */
END(except_vec4)
/*
* EJTAG debug exception handler.
* The EJTAG debug exception entry point is 0xbfc00480, which
* normally is in the boot PROM, so the boot PROM must do an
* unconditional jump to this vector.
*/
NESTED(except_vec_ejtag_debug, 0, sp)
j ejtag_debug_handler
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(except_vec_ejtag_debug)
__FINIT
/*
* Vectored interrupt handler.
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
BUILD_ROLLBACK_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
.set push
.set noreorder
PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
jr v1
FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
EXPORT(except_vec_vi_end)
/*
* Common Vectored Interrupt code
* Complete the register saves and invoke the handler which is passed in $v0
*/
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
CLI
#ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0
TRACE_IRQS_OFF
move v0, s0
#endif
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
jr v0
END(except_vec_vi_handler)
/*
* EJTAG debug exception handler.
*/
NESTED(ejtag_debug_handler, PT_SIZE, sp)
.set push
.set noat
MTC0 k0, CP0_DESAVE
mfc0 k0, CP0_DEBUG
sll k0, k0, 30 # Check for SDBBP.
bgez k0, ejtag_return
PTR_LA k0, ejtag_debug_buffer
LONG_S k1, 0(k0)
SAVE_ALL
move a0, sp
jal ejtag_exception_handler
RESTORE_ALL
PTR_LA k0, ejtag_debug_buffer
LONG_L k1, 0(k0)
ejtag_return:
MFC0 k0, CP0_DESAVE
.set mips32
deret
.set pop
END(ejtag_debug_handler)
/*
* This buffer is reserved for the use of the EJTAG debug
* handler.
*/
.data
EXPORT(ejtag_debug_buffer)
.fill LONGSIZE
.previous
__INIT
/*
* NMI debug exception handler for MIPS reference boards.
* The NMI debug exception entry point is 0xbfc00000, which
* normally is in the boot PROM, so the boot PROM must do a
* unconditional jump to this vector.
*/
NESTED(except_vec_nmi, 0, sp)
j nmi_handler
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
END(except_vec_nmi)
__FINIT
NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
/*
* Clear ERL - restore segment mapping
* Clear BEV - required for page fault exception handler to work
*/
mfc0 k0, CP0_STATUS
ori k0, k0, ST0_EXL
li k1, ~(ST0_BEV | ST0_ERL)
and k0, k0, k1
mtc0 k0, CP0_STATUS
_ehb
SAVE_ALL
move a0, sp
jal nmi_exception_handler
/* nmi_exception_handler never returns */
.set pop
END(nmi_handler)
.macro __build_clear_none
.endm
.macro __build_clear_sti
TRACE_IRQS_ON
STI
.endm
.macro __build_clear_cli
CLI
TRACE_IRQS_OFF
.endm
.macro __build_clear_fpe
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
SET_HARDFLOAT
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
ctc1 a2, fcr31
.set pop
TRACE_IRQS_ON
STI
.endm
.macro __build_clear_ade
MFC0 t0, CP0_BADVADDR
PTR_S t0, PT_BVADDR(sp)
KMODE
.endm
.macro __BUILD_silent exception
.endm
/* Gas tries to parse the PRINT argument as a string containing
string escapes and emits bogus warnings if it believes to
recognize an unknown escape code. So make the arguments
start with an n and gas will believe \n is ok ... */
.macro __BUILD_verbose nexception
LONG_L a1, PT_EPC(sp)
#ifdef CONFIG_32BIT
PRINT("Got \nexception at %08lx\012")
#endif
#ifdef CONFIG_64BIT
PRINT("Got \nexception at %016lx\012")
#endif
.endm
.macro __BUILD_count exception
LONG_L t0,exception_count_\exception
LONG_ADDIU t0, 1
LONG_S t0,exception_count_\exception
.comm exception_count\exception, 8, 8
.endm
.macro __BUILD_HANDLER exception handler clear verbose ext
.align 5
NESTED(handle_\exception, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
__BUILD_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
PTR_LA ra, ret_from_exception
j do_\handler
END(handle_\exception)
.endm
.macro BUILD_HANDLER exception handler clear verbose
__BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER adel ade ade silent /* #4 */
BUILD_HANDLER ades ade ade silent /* #5 */
BUILD_HANDLER ibe be cli silent /* #6 */
BUILD_HANDLER dbe be cli silent /* #7 */
BUILD_HANDLER bp bp sti silent /* #9 */
BUILD_HANDLER ri ri sti silent /* #10 */
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
#ifdef CONFIG_HARDWARE_WATCHPOINTS
/*
* For watch, interrupts will be enabled after the watch
* registers are read.
*/
BUILD_HANDLER watch watch cli silent /* #23 */
#else
BUILD_HANDLER watch watch sti verbose /* #23 */
#endif
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
BUILD_HANDLER mt mt sti silent /* #25 */
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
LEAF(handle_ri_rdhwr_vivt)
.set push
.set noat
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
PTR_SRL k0, _PAGE_SHIFT + 1
PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
tlbp
tlb_probe_hazard
mfc0 k1, CP0_INDEX
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr)
.set push
.set noat
.set noreorder
/* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
MFC0 k1, CP0_EPC
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
and k0, k1, 1
beqz k0, 1f
xor k1, k0
lhu k0, (k1)
lhu k1, 2(k1)
ins k1, k0, 16, 16
lui k0, 0x007d
b docheck
ori k0, 0x6b3c
1:
lui k0, 0x7c03
lw k1, (k1)
ori k0, 0xe83b
#else
andi k0, k1, 1
bnez k0, handle_ri
lui k0, 0x7c03
lw k1, (k1)
ori k0, 0xe83b
#endif
.set reorder
docheck:
bne k0, k1, handle_ri /* if not ours */
isrdhwr:
/* The insn is rdhwr. No need to check CAUSE.BD here. */
get_saved_sp /* k1 := current_thread_info */
.set noreorder
MFC0 k0, CP0_EPC
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
LONG_ADDIU k0, 4
jr k0
rfe
#else
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
LONG_ADDIU k0, 4 /* stall on $k0 */
#else
.set at=v1
LONG_ADDIU k0, 4
.set noat
#endif
MTC0 k0, CP0_EPC
/* I hope three instructions between MTC0 and ERET are enough... */
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
.set arch=r4000
eret
.set mips0
#endif
.set pop
END(handle_ri_rdhwr)
#ifdef CONFIG_64BIT
/* A temporary overflow handler used by check_daddi(). */
__INIT
BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
#endif

View file

@ -0,0 +1,89 @@
/*
* A gpio chip driver for TXx9 SoCs
*
* Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/txx9pio.h>
static DEFINE_SPINLOCK(txx9_gpio_lock);
static struct txx9_pio_reg __iomem *txx9_pioptr;
static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
return __raw_readl(&txx9_pioptr->din) & (1 << offset);
}
static void txx9_gpio_set_raw(unsigned int offset, int value)
{
u32 val;
val = __raw_readl(&txx9_pioptr->dout);
if (value)
val |= 1 << offset;
else
val &= ~(1 << offset);
__raw_writel(val, &txx9_pioptr->dout);
}
static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
}
static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
__raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
__raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static struct gpio_chip txx9_gpio_chip = {
.get = txx9_gpio_get,
.set = txx9_gpio_set,
.direction_input = txx9_gpio_dir_in,
.direction_output = txx9_gpio_dir_out,
.label = "TXx9",
};
int __init txx9_gpio_init(unsigned long baseaddr,
unsigned int base, unsigned int num)
{
txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
if (!txx9_pioptr)
return -ENODEV;
txx9_gpio_chip.base = base;
txx9_gpio_chip.ngpio = num;
return gpiochip_add(&txx9_gpio_chip);
}

132
arch/mips/kernel/head.S Normal file
View file

@ -0,0 +1,132 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf Electronics
* Written by Ralf Baechle and Andreas Busse
* Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Modified for DECStation and hence R3000 support by Paul M. Antoine
* Further modifications by David S. Miller and Harald Koerfgen
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <kernel-entry-init.h>
/*
* For the moment disable interrupts, mark the kernel mode and
* set ST0_KX so that the CPU does not spit fire when using
* 64-bit addresses. A full initialization of the CPU's status
* register is done later in per_cpu_trap_init().
*/
.macro setup_c0_status set clr
.set push
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
.set pop
.endm
.macro setup_c0_status_pri
#ifdef CONFIG_64BIT
setup_c0_status ST0_KX 0
#else
setup_c0_status 0 0
#endif
.endm
.macro setup_c0_status_sec
#ifdef CONFIG_64BIT
setup_c0_status ST0_KX ST0_BEV
#else
setup_c0_status 0 ST0_BEV
#endif
.endm
#ifndef CONFIG_NO_EXCEPT_FILL
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KSEG0.
*/
.fill 0x400
#endif
EXPORT(_stext)
#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
FEXPORT(__kernel_entry)
j kernel_entry
#endif
__REF
NESTED(kernel_entry, 16, sp) # kernel entry point
kernel_entry_setup # cpu specific setup
setup_c0_status_pri
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
PTR_LA t0, 0f
jr t0
0:
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
1:
PTR_ADDIU t0, LONGSIZE
LONG_S zero, (t0)
bne t0, t1, 1b
LONG_S a0, fw_arg0 # firmware arguments
LONG_S a1, fw_arg1
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
/* Set the SP after an empty pt_regs. */
PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
PTR_ADDU sp, $28
back_to_back_c0_hazard
set_saved_sp sp, t0, t1
PTR_SUBU sp, 4 * SZREG # init stack pointer
j start_kernel
END(kernel_entry)
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
smp_slave_setup
setup_c0_status_sec
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */

39
arch/mips/kernel/i8253.c Normal file
View file

@ -0,0 +1,39 @@
/*
* i8253.c 8253/PIT functions
*
*/
#include <linux/clockchips.h>
#include <linux/i8253.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
i8253_clockevent.event_handler(&i8253_clockevent);
return IRQ_HANDLED;
}
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_NOBALANCING | IRQF_TIMER,
.name = "timer"
};
void __init setup_pit_timer(void)
{
clockevent_i8253_init(true);
setup_irq(0, &irq0);
}
static int __init init_pit_clocksource(void)
{
if (num_possible_cpus() > 1) /* PIT does not scale! */
return 0;
return clocksource_i8253_init();
}
arch_initcall(init_pit_clocksource);

331
arch/mips/kernel/i8259.c Normal file
View file

@ -0,0 +1,331 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/irq.h>
#include <asm/i8259.h>
#include <asm/io.h>
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
* plus some generic x86 specific things if generic specifics makes
* any sense at all.
* this file should become arch/i386/kernel/irq.c when the old irq.c
* moves to arch independent land
*/
static int i8259A_auto_eoi = -1;
DEFINE_RAW_SPINLOCK(i8259A_lock);
static void disable_8259A_irq(struct irq_data *d);
static void enable_8259A_irq(struct irq_data *d);
static void mask_and_ack_8259A(struct irq_data *d);
static void init_8259A(int auto_eoi);
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
.irq_mask = disable_8259A_irq,
.irq_disable = disable_8259A_irq,
.irq_unmask = enable_8259A_irq,
.irq_mask_ack = mask_and_ack_8259A,
};
/*
* 8259A PIC functions to handle ISA devices:
*/
/*
* This contains the irq mask for both 8259A irq controllers,
*/
static unsigned int cached_irq_mask = 0xffff;
#define cached_master_mask (cached_irq_mask)
#define cached_slave_mask (cached_irq_mask >> 8)
static void disable_8259A_irq(struct irq_data *d)
{
unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
mask = 1 << irq;
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
static void enable_8259A_irq(struct irq_data *d)
{
unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
mask = ~(1 << irq);
raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
{
unsigned int mask;
unsigned long flags;
int ret;
irq -= I8259A_IRQ_BASE;
mask = 1 << irq;
raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
/*
* This function assumes to be called rarely. Switching between
* 8259A registers is slow.
* This has to be protected by the irq controller spinlock
* before being called.
*/
static inline int i8259A_irq_real(unsigned int irq)
{
int value;
int irqmask = 1 << irq;
if (irq < 8) {
outb(0x0B, PIC_MASTER_CMD); /* ISR register */
value = inb(PIC_MASTER_CMD) & irqmask;
outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
/*
* Careful! The 8259A is a fragile beast, it pretty
* much _has_ to be done exactly like this (mask it
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
static void mask_and_ack_8259A(struct irq_data *d)
{
unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
irqmask = 1 << irq;
raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
* of hardware problems, so we only do the checks we can
* do without slowing down good hardware unnecessarily.
*
* Note that IRQ7 and IRQ15 (the two spurious IRQs
* usually resulting from the 8259A-1|2 PICs) occur
* even if the IRQ is masked in the 8259A. Thus we
* can check spurious 8259A IRQs without doing the
* quite slow i8259A_irq_real() call for every IRQ.
* This does not cover 100% of spurious interrupts,
* but should be enough to warn the user that there
* is something bad going on ...
*/
if (cached_irq_mask & irqmask)
goto spurious_8259A_irq;
cached_irq_mask |= irqmask;
handle_real_irq:
if (irq & 8) {
inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
outb(cached_slave_mask, PIC_SLAVE_IMR);
outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
/*
* this is the slow path - should happen rarely.
*/
if (i8259A_irq_real(irq))
/*
* oops, the IRQ _is_ in service according to the
* 8259A - not spurious, go handle it.
*/
goto handle_real_irq;
{
static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
atomic_inc(&irq_err_count);
/*
* Theoretically we do not have to handle this IRQ,
* but in Linux this does not cause problems and is
* simpler for us.
*/
goto handle_real_irq;
}
}
static void i8259A_resume(void)
{
if (i8259A_auto_eoi >= 0)
init_8259A(i8259A_auto_eoi);
}
static void i8259A_shutdown(void)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
* out of.
*/
if (i8259A_auto_eoi >= 0) {
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
}
}
static struct syscore_ops i8259_syscore_ops = {
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
static int __init i8259A_init_sysfs(void)
{
register_syscore_ops(&i8259_syscore_ops);
return 0;
}
device_initcall(i8259A_init_sysfs);
static void init_8259A(int auto_eoi)
{
unsigned long flags;
i8259A_auto_eoi = auto_eoi;
raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
/*
* outb_p - this has to work on a wide range of PC hardware.
*/
outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
if (auto_eoi) /* master does Auto EOI */
outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
else /* master expects normal EOI */
outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
if (auto_eoi)
/*
* In AEOI mode we just have to mask the interrupt
* when acking.
*/
i8259A_chip.irq_mask_ack = disable_8259A_irq;
else
i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
.name = "pic1",
.start = PIC_MASTER_CMD,
.end = PIC_MASTER_IMR,
.flags = IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
.name = "pic2",
.start = PIC_SLAVE_CMD,
.end = PIC_SLAVE_IMR,
.flags = IORESOURCE_BUSY
};
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init init_i8259_irqs(void)
{
int i;
insert_resource(&ioport_resource, &pic1_io_resource);
insert_resource(&ioport_resource, &pic2_io_resource);
init_8259A(0);
for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
irq_set_probe(i);
}
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
}

246
arch/mips/kernel/idle.c Normal file
View file

@ -0,0 +1,246 @@
/*
* MIPS idle loop and WAIT instruction support.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/cpu-type.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
* points to the function that implements CPU specific wait.
* The wait instruction stops the pipeline and reduces the power consumption of
* the CPU very much.
*/
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
static void r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
local_irq_enable();
}
static void r39xx_wait(void)
{
if (!need_resched())
write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
local_irq_enable();
}
void r4k_wait(void)
{
local_irq_enable();
__r4k_wait();
}
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
* implementation-dependent whether the pipeline restarts when a non-enabled
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
void r4k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
" .set push \n"
" .set arch=r4000 \n"
" wait \n"
" .set pop \n");
local_irq_enable();
}
/*
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
static void rm7k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
" .set push \n"
" .set arch=r4000 \n"
" .set noat \n"
" mfc0 $1, $12 \n"
" sync \n"
" mtc0 $1, $12 # stalls until W stage \n"
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
local_irq_enable();
}
/*
* Au1 'wait' is only useful when the 32kHz counter is used as timer,
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
static void au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__(
" .set arch=r4000 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
" mtc0 %1, $12 \n" /* wr c0status */
" wait \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" .set mips0 \n"
: : "r" (au1k_wait), "r" (c0status));
}
static int __initdata nowait;
static int __init wait_disable(char *s)
{
nowait = 1;
return 1;
}
__setup("nowait", wait_disable);
void __init check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
if (nowait) {
printk("Wait instruction disabled.\n");
return;
}
switch (current_cpu_type()) {
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
break;
case CPU_TX3927:
cpu_wait = r39xx_wait;
break;
case CPU_R4200:
/* case CPU_R4300: */
case CPU_R4600:
case CPU_R4640:
case CPU_R4650:
case CPU_R4700:
case CPU_R5000:
case CPU_R5500:
case CPU_NEVADA:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
case CPU_BMIPS3300:
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
case CPU_JZRISC:
case CPU_LOONGSON1:
case CPU_XLR:
case CPU_XLP:
cpu_wait = r4k_wait;
break;
case CPU_RM7000:
cpu_wait = rm7k_wait_irqoff;
break;
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_M5150:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
break;
case CPU_74K:
cpu_wait = r4k_wait;
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
cpu_wait = r4k_wait_irqoff;
break;
case CPU_TX49XX:
cpu_wait = r4k_wait_irqoff;
break;
case CPU_ALCHEMY:
cpu_wait = au1k_wait;
break;
case CPU_20KC:
/*
* WAIT on Rev1.0 has E1, E2, E3 and E16.
* WAIT on Rev2.0 and Rev3.0 has E16.
* Rev3.1 WAIT is nop, why bother
*/
if ((c->processor_id & 0xff) <= 0x64)
break;
/*
* Another rev is incremeting c0_count at a reduced clock
* rate while in WAIT mode. So we basically have the choice
* between using the cp0 timer as clocksource or avoiding
* the WAIT instruction. Until more details are known,
* disable the use of WAIT for 20Kc entirely.
cpu_wait = r4k_wait;
*/
break;
default:
break;
}
}
void arch_cpu_idle(void)
{
if (cpu_wait)
cpu_wait();
else
local_irq_enable();
}
#ifdef CONFIG_CPU_IDLE
int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
arch_cpu_idle();
return index;
}
#endif

402
arch/mips/kernel/irq-gic.c Normal file
View file

@ -0,0 +1,402 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/bitmap.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
unsigned int gic_frequency;
unsigned int gic_present;
unsigned long _gic_base;
unsigned int gic_irq_base;
unsigned int gic_irq_flags[GIC_NUM_INTRS];
/* The index into this array is the vector # of the interrupt. */
struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
struct gic_pcpu_mask {
DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
};
struct gic_pending_regs {
DECLARE_BITMAP(pending, GIC_NUM_INTRS);
};
struct gic_intrmask_regs {
DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
};
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static struct gic_pending_regs pending_regs[NR_CPUS];
static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
cycle_t gic_read_count(void)
{
unsigned int hi, hi2, lo;
do {
GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
} while (hi2 != hi);
return (((cycle_t) hi) << 32) + lo;
}
void gic_write_compare(cycle_t cnt)
{
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
(int)(cnt >> 32));
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
(int)(cnt & 0xffffffff));
}
void gic_write_cpu_compare(cycle_t cnt, int cpu)
{
unsigned long flags;
local_irq_save(flags);
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
(int)(cnt >> 32));
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
(int)(cnt & 0xffffffff));
local_irq_restore(flags);
}
cycle_t gic_read_compare(void)
{
unsigned int hi, lo;
GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
return (((cycle_t) hi) << 32) + lo;
}
#endif
unsigned int gic_get_timer_pending(void)
{
unsigned int vpe_pending;
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
return (vpe_pending & GIC_VPE_PEND_TIMER_MSK);
}
void gic_bind_eic_interrupt(int irq, int set)
{
/* Convert irq vector # to hw int # */
irq -= GIC_PIN_TO_VEC_OFFSET;
/* Set irq to use shadow set */
GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
}
void gic_send_ipi(unsigned int intr)
{
GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
}
static void gic_eic_irq_dispatch(void)
{
unsigned int cause = read_c0_cause();
int irq;
irq = (cause & ST0_IM) >> STATUSB_IP2;
if (irq == 0)
irq = -1;
if (irq >= 0)
do_IRQ(gic_irq_base + irq);
else
spurious_interrupt();
}
static void __init vpe_local_setup(unsigned int numvpes)
{
unsigned long timer_intr = GIC_INT_TMR;
unsigned long perf_intr = GIC_INT_PERFCTR;
unsigned int vpe_ctl;
int i;
if (cpu_has_veic) {
/*
* GIC timer interrupt -> CPU HW Int X (vector X+2) ->
* map to pin X+2-1 (since GIC adds 1)
*/
timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
/*
* GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
* map to pin X+2-1 (since GIC adds 1)
*/
perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
}
/*
* Setup the default performance counter timer interrupts
* for all VPEs
*/
for (i = 0; i < numvpes; i++) {
GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
/* Are Interrupts locally routable? */
GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
GIC_MAP_TO_PIN_MSK | timer_intr);
if (cpu_has_veic) {
set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
gic_eic_irq_dispatch);
gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
}
if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
GIC_MAP_TO_PIN_MSK | perf_intr);
if (cpu_has_veic) {
set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
}
}
}
unsigned int gic_compare_int(void)
{
unsigned int pending;
GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
if (pending & GIC_VPE_PEND_CMP_MSK)
return 1;
else
return 0;
}
void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
{
unsigned int i;
unsigned long *pending, *intrmask, *pcpu_mask;
unsigned long *pending_abs, *intrmask_abs;
/* Get per-cpu bitmaps */
pending = pending_regs[smp_processor_id()].pending;
intrmask = intrmask_regs[smp_processor_id()].intrmask;
pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
GIC_SH_PEND_31_0_OFS);
intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
GIC_SH_MASK_31_0_OFS);
for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
GICREAD(*pending_abs, pending[i]);
GICREAD(*intrmask_abs, intrmask[i]);
pending_abs++;
intrmask_abs++;
}
bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
bitmap_and(dst, src, pending, GIC_NUM_INTRS);
}
unsigned int gic_get_int(void)
{
DECLARE_BITMAP(interrupts, GIC_NUM_INTRS);
bitmap_fill(interrupts, GIC_NUM_INTRS);
gic_get_int_mask(interrupts, interrupts);
return find_first_bit(interrupts, GIC_NUM_INTRS);
}
static void gic_mask_irq(struct irq_data *d)
{
GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
}
static void gic_unmask_irq(struct irq_data *d)
{
GIC_SET_INTR_MASK(d->irq - gic_irq_base);
}
#ifdef CONFIG_SMP
static DEFINE_SPINLOCK(gic_lock);
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
unsigned int irq = (d->irq - gic_irq_base);
cpumask_t tmp = CPU_MASK_NONE;
unsigned long flags;
int i;
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
return -1;
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
cpumask_copy(d->affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY;
}
#endif
static struct irq_chip gic_irq_controller = {
.name = "MIPS GIC",
.irq_ack = gic_irq_ack,
.irq_mask = gic_mask_irq,
.irq_mask_ack = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_finish_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
};
static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
unsigned int pin, unsigned int polarity, unsigned int trigtype,
unsigned int flags)
{
struct gic_shared_intr_map *map_ptr;
/* Setup Intr to Pin mapping */
if (pin & GIC_MAP_TO_NMI_MSK) {
int i;
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
/* FIXME: hack to route NMI to all cpu's */
for (i = 0; i < NR_CPUS; i += 32) {
GICWRITE(GIC_REG_ADDR(SHARED,
GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
0xffffffff);
}
} else {
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
GIC_MAP_TO_PIN_MSK | pin);
/* Setup Intr to CPU mapping */
GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
if (cpu_has_veic) {
set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
gic_eic_irq_dispatch);
map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET];
if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR)
BUG();
map_ptr->intr_list[map_ptr->num_shared_intr++] = intr;
}
}
/* Setup Intr Polarity */
GIC_SET_POLARITY(intr, polarity);
/* Setup Intr Trigger Type */
GIC_SET_TRIGGER(intr, trigtype);
/* Init Intr Masks */
GIC_CLR_INTR_MASK(intr);
/* Initialise per-cpu Interrupt software masks */
set_bit(intr, pcpu_masks[cpu].pcpu_mask);
if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
GIC_SET_INTR_MASK(intr);
if (trigtype == GIC_TRIG_EDGE)
gic_irq_flags[intr] |= GIC_TRIG_EDGE;
}
static void __init gic_basic_init(int numintrs, int numvpes,
struct gic_intr_map *intrmap, int mapsize)
{
unsigned int i, cpu;
unsigned int pin_offset = 0;
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
/* Setup defaults */
for (i = 0; i < numintrs; i++) {
GIC_SET_POLARITY(i, GIC_POL_POS);
GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
GIC_CLR_INTR_MASK(i);
if (i < GIC_NUM_INTRS) {
gic_irq_flags[i] = 0;
gic_shared_intr_map[i].num_shared_intr = 0;
gic_shared_intr_map[i].local_intr_mask = 0;
}
}
/*
* In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
* one because the GIC will add one (since 0=no intr).
*/
if (cpu_has_veic)
pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
/* Setup specifics */
for (i = 0; i < mapsize; i++) {
cpu = intrmap[i].cpunum;
if (cpu == GIC_UNUSED)
continue;
gic_setup_intr(i,
intrmap[i].cpunum,
intrmap[i].pin + pin_offset,
intrmap[i].polarity,
intrmap[i].trigtype,
intrmap[i].flags);
}
vpe_local_setup(numvpes);
}
void __init gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size,
struct gic_intr_map *intr_map, unsigned int intr_map_size,
unsigned int irqbase)
{
unsigned int gicconfig;
int numvpes, numintrs;
_gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
gic_addrspace_size);
gic_irq_base = irqbase;
GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
GIC_SH_CONFIG_NUMINTRS_SHF;
numintrs = ((numintrs + 1) * 8);
numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
GIC_SH_CONFIG_NUMVPES_SHF;
numvpes = numvpes + 1;
gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
gic_platform_init(numintrs, &gic_irq_controller);
}

View file

@ -0,0 +1,131 @@
/*
* GT641xx IRQ routines.
*
* Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/gt64120.h>
#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
static void ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause;
raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
cause = GT_READ(GT_INTRCAUSE_OFS);
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
static void mask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
static void mask_ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause, mask;
raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
cause = GT_READ(GT_INTRCAUSE_OFS);
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
static void unmask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask |= GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
static struct irq_chip gt641xx_irq_chip = {
.name = "GT641xx",
.irq_ack = ack_gt641xx_irq,
.irq_mask = mask_gt641xx_irq,
.irq_mask_ack = mask_ack_gt641xx_irq,
.irq_unmask = unmask_gt641xx_irq,
};
void gt641xx_irq_dispatch(void)
{
u32 cause, mask;
int i;
cause = GT_READ(GT_INTRCAUSE_OFS);
mask = GT_READ(GT_INTRMASK_OFS);
cause &= mask;
/*
* bit0 : logical or of all the interrupt bits.
* bit30: logical or of bits[29:26,20:1].
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++) {
if (cause & (1U << i)) {
do_IRQ(GT641XX_IRQ_BASE + i);
return;
}
}
atomic_inc(&irq_err_count);
}
void __init gt641xx_irq_init(void)
{
int i;
GT_WRITE(GT_INTRMASK_OFS, 0);
GT_WRITE(GT_INTRCAUSE_OFS, 0);
/*
* bit0 : logical or of all the interrupt bits.
* bit30: logical or of bits[29:26,20:1].
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++)
irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
&gt641xx_irq_chip, handle_level_irq);
}

View file

@ -0,0 +1,159 @@
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copyright (c) 2004 MIPS Inc
* Author: chris@mips.com
*
* Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/msc01_ic.h>
#include <asm/traps.h>
static unsigned long _icctrl_msc;
#define MSC01_IC_REG_BASE _icctrl_msc
#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
static unsigned int irq_base;
/* mask off an interrupt */
static inline void mask_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
else
MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32));
}
/* unmask an interrupt */
static inline void unmask_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
else
MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
}
/*
* Masks and ACKs an IRQ
*/
static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
}
/*
* Masks and ACKs an IRQ
*/
static void edge_mask_and_ack_msc_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
u32 r;
MSCIC_READ(MSC01_IC_SUP+irq*8, r);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
}
/*
* Interrupt handler for interrupts coming from SOC-it.
*/
void ll_msc_irq(void)
{
unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
if (irq < 64)
do_IRQ(irq + irq_base);
else {
/* Ignore spurious interrupt */
}
}
static void msc_bind_eic_interrupt(int irq, int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
static struct irq_chip msc_levelirq_type = {
.name = "SOC-it-Level",
.irq_ack = level_mask_and_ack_msc_irq,
.irq_mask = mask_msc_irq,
.irq_mask_ack = level_mask_and_ack_msc_irq,
.irq_unmask = unmask_msc_irq,
.irq_eoi = unmask_msc_irq,
};
static struct irq_chip msc_edgeirq_type = {
.name = "SOC-it-Edge",
.irq_ack = edge_mask_and_ack_msc_irq,
.irq_mask = mask_msc_irq,
.irq_mask_ack = edge_mask_and_ack_msc_irq,
.irq_unmask = unmask_msc_irq,
.irq_eoi = unmask_msc_irq,
};
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
{
_icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_set_chip_and_handler_name(irqbase + n,
&msc_edgeirq_type,
handle_edge_irq,
"edge");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_set_chip_and_handler_name(irqbase + n,
&msc_levelirq_type,
handle_level_irq,
"level");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
}
}
irq_base = irqbase;
MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
}

View file

@ -0,0 +1,49 @@
/*
* Copyright (C) 2003 Ralf Baechle
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Handler for RM7000 extended interrupts. These are a non-standard
* feature so we handle them separately from standard interrupts.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
static inline void unmask_rm7k_irq(struct irq_data *d)
{
set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static inline void mask_rm7k_irq(struct irq_data *d)
{
clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
.name = "RM7000",
.irq_ack = mask_rm7k_irq,
.irq_mask = mask_rm7k_irq,
.irq_mask_ack = mask_rm7k_irq,
.irq_unmask = unmask_rm7k_irq,
.irq_eoi = unmask_rm7k_irq
};
void __init rm7k_cpu_irq_init(void)
{
int base = RM7K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x00000f00); /* Mask all */
for (i = base; i < base + 4; i++)
irq_set_chip_and_handler(i, &rm7k_irq_controller,
handle_percpu_irq);
}

147
arch/mips/kernel/irq.c Normal file
View file

@ -0,0 +1,147 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#ifdef CONFIG_KGDB
int kgdb_early_setup;
#endif
static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
int allocate_irqno(void)
{
int irq;
again:
irq = find_first_zero_bit(irq_map, NR_IRQS);
if (irq >= NR_IRQS)
return -ENOSPC;
if (test_and_set_bit(irq, irq_map))
goto again;
return irq;
}
/*
* Allocate the 16 legacy interrupts for i8259 devices. This happens early
* in the kernel initialization so treating allocation failure as BUG() is
* ok.
*/
void __init alloc_legacy_irqno(void)
{
int i;
for (i = 0; i <= 16; i++)
BUG_ON(test_and_set_bit(i, irq_map));
}
void free_irqno(unsigned int irq)
{
smp_mb__before_atomic();
clear_bit(irq, irq_map);
smp_mb__after_atomic();
}
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
asmlinkage void spurious_interrupt(void)
{
atomic_inc(&irq_err_count);
}
void __init init_IRQ(void)
{
int i;
#ifdef CONFIG_KGDB
if (kgdb_early_setup)
return;
#endif
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
arch_init_irq();
#ifdef CONFIG_KGDB
if (!kgdb_early_setup)
kgdb_early_setup = 1;
#endif
}
#ifdef DEBUG_STACKOVERFLOW
static inline void check_stack_overflow(void)
{
unsigned long sp;
__asm__ __volatile__("move %0, $sp" : "=r" (sp));
sp &= THREAD_MASK;
/*
* Check for stack overflow: is there less than STACK_WARN free?
* STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
*/
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
printk("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
}
}
#else
static inline void check_stack_overflow(void) {}
#endif
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
check_stack_overflow();
generic_handle_irq(irq);
irq_exit();
}

161
arch/mips/kernel/irq_cpu.c Normal file
View file

@ -0,0 +1,161 @@
/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/*
* Almost all MIPS CPUs define 8 interrupt sources. They are typically
* level triggered (i.e., cannot be cleared from CPU; must be cleared from
* device). The first two are software interrupts which we don't really
* use or support. The last one is usually the CPU timer interrupt if
* counter register is present or, for CPUs with an external FPU, by
* convention it's the FPU exception interrupt.
*
* Don't even think about using this on SMP. You have been warned.
*
* This file exports one global function:
* void mips_cpu_irq_init(void);
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
static inline void unmask_mips_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
static inline void mask_mips_irq(struct irq_data *d)
{
clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
}
static struct irq_chip mips_cpu_irq_controller = {
.name = "MIPS",
.irq_ack = mask_mips_irq,
.irq_mask = mask_mips_irq,
.irq_mask_ack = mask_mips_irq,
.irq_unmask = unmask_mips_irq,
.irq_eoi = unmask_mips_irq,
.irq_disable = mask_mips_irq,
.irq_enable = unmask_mips_irq,
};
/*
* Basically the same as above but taking care of all the MT stuff
*/
static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
unmask_mips_irq(d);
return 0;
}
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
*/
static void mips_mt_cpu_irq_ack(struct irq_data *d)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
mask_mips_irq(d);
}
static struct irq_chip mips_mt_cpu_irq_controller = {
.name = "MIPS",
.irq_startup = mips_mt_cpu_irq_startup,
.irq_ack = mips_mt_cpu_irq_ack,
.irq_mask = mask_mips_irq,
.irq_mask_ack = mips_mt_cpu_irq_ack,
.irq_unmask = unmask_mips_irq,
.irq_eoi = unmask_mips_irq,
.irq_disable = mask_mips_irq,
.irq_enable = unmask_mips_irq,
};
void __init mips_cpu_irq_init(void)
{
int irq_base = MIPS_CPU_IRQ_BASE;
int i;
/* Mask interrupts. */
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
/* Software interrupts are used for MT/CMT IPI */
for (i = irq_base; i < irq_base + 2; i++)
irq_set_chip_and_handler(i, cpu_has_mipsmt ?
&mips_mt_cpu_irq_controller :
&mips_cpu_irq_controller,
handle_percpu_irq);
for (i = irq_base + 2; i < irq_base + 8; i++)
irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
handle_percpu_irq);
}
#ifdef CONFIG_IRQ_DOMAIN
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
static struct irq_chip *chip;
if (hw < 2 && cpu_has_mipsmt) {
/* Software interrupts are used for MT/CMT IPI */
chip = &mips_mt_cpu_irq_controller;
} else {
chip = &mips_cpu_irq_controller;
}
irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
return 0;
}
static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
.map = mips_cpu_intc_map,
.xlate = irq_domain_xlate_onecell,
};
int __init mips_cpu_intc_init(struct device_node *of_node,
struct device_node *parent)
{
struct irq_domain *domain;
/* Mask interrupts. */
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
&mips_cpu_intc_irq_domain_ops, NULL);
if (!domain)
panic("Failed to add irqdomain for MIPS CPU");
return 0;
}
#endif /* CONFIG_IRQ_DOMAIN */

191
arch/mips/kernel/irq_txx9.c Normal file
View file

@ -0,0 +1,191 @@
/*
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
* linux/arch/mips/tx4927/common/tx4927_irq.c,
* linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ahennessy@mvista.com
* source@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/txx9irq.h>
struct txx9_irc_reg {
u32 cer;
u32 cr[2];
u32 unused0;
u32 ilr[8];
u32 unused1[4];
u32 imr;
u32 unused2[7];
u32 scr;
u32 unused3[7];
u32 ssr;
u32 unused4[7];
u32 csr;
};
/* IRCER : Int. Control Enable */
#define TXx9_IRCER_ICE 0x00000001
/* IRCR : Int. Control */
#define TXx9_IRCR_LOW 0x00000000
#define TXx9_IRCR_HIGH 0x00000001
#define TXx9_IRCR_DOWN 0x00000002
#define TXx9_IRCR_UP 0x00000003
#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
/* IRSCR : Int. Status Control */
#define TXx9_IRSCR_EIClrE 0x00000100
#define TXx9_IRSCR_EIClr_MASK 0x0000000f
/* IRCSR : Int. Current Status */
#define TXx9_IRCSR_IF 0x00010000
#define TXx9_IRCSR_ILV_MASK 0x00000700
#define TXx9_IRCSR_IVL_MASK 0x0000001f
#define irc_dlevel 0
#define irc_elevel 1
static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
static struct {
unsigned char level;
unsigned char mode;
} txx9irq[TXx9_MAX_IR] __read_mostly;
static void txx9_irq_unmask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (txx9irq[irq_nr].level << ofs),
ilrp);
#ifdef CONFIG_CPU_TX39XX
/* update IRCSR */
__raw_writel(0, &txx9_ircptr->imr);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
#endif
}
static inline void txx9_irq_mask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (irc_dlevel << ofs),
ilrp);
#ifdef CONFIG_CPU_TX39XX
/* update IRCSR */
__raw_writel(0, &txx9_ircptr->imr);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
/* flush write buffer */
__raw_readl(&txx9_ircptr->ssr);
#else
mmiowb();
#endif
}
static void txx9_irq_mask_ack(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
txx9_irq_mask(d);
/* clear edge detection */
if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
__raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
}
static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 cr;
u32 __iomem *crp;
int ofs;
int mode;
if (flow_type & IRQF_TRIGGER_PROBE)
return 0;
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
}
crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
cr = __raw_readl(crp);
ofs = (irq_nr & (8 - 1)) * 2;
cr &= ~(0x3 << ofs);
cr |= (mode & 0x3) << ofs;
__raw_writel(cr, crp);
txx9irq[irq_nr].mode = mode;
return 0;
}
static struct irq_chip txx9_irq_chip = {
.name = "TXX9",
.irq_ack = txx9_irq_mask_ack,
.irq_mask = txx9_irq_mask,
.irq_mask_ack = txx9_irq_mask_ack,
.irq_unmask = txx9_irq_unmask,
.irq_set_type = txx9_irq_set_type,
};
void __init txx9_irq_init(unsigned long baseaddr)
{
int i;
txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
for (i = 0; i < TXx9_MAX_IR; i++) {
txx9irq[i].level = 4; /* middle level */
txx9irq[i].mode = TXx9_IRCR_LOW;
irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
handle_level_irq);
}
/* mask all IRC interrupts */
__raw_writel(0, &txx9_ircptr->imr);
for (i = 0; i < 8; i++)
__raw_writel(0, &txx9_ircptr->ilr[i]);
/* setup IRC interrupt mode (Low Active) */
for (i = 0; i < 2; i++)
__raw_writel(0, &txx9_ircptr->cr[i]);
/* enable interrupt control */
__raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
}
int __init txx9_irq_set_pri(int irc_irq, int new_pri)
{
int old_pri;
if ((unsigned int)irc_irq >= TXx9_MAX_IR)
return 0;
old_pri = txx9irq[irc_irq].level;
txx9irq[irc_irq].level = new_pri;
return old_pri;
}
int txx9_irq(void)
{
u32 csr = __raw_readl(&txx9_ircptr->csr);
if (likely(!(csr & TXx9_IRCSR_IF)))
return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
return -1;
}

View file

@ -0,0 +1,76 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2010 Cavium Networks, Inc.
*/
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#ifdef HAVE_JUMP_LABEL
/*
* Define parameters for the standard MIPS and the microMIPS jump
* instruction encoding respectively:
*
* - the ISA bit of the target, either 0 or 1 respectively,
*
* - the amount the jump target address is shifted right to fit in the
* immediate field of the machine instruction, either 2 or 1,
*
* - the mask determining the size of the jump region relative to the
* delay-slot instruction, either 256MB or 128MB,
*
* - the jump target alignment, either 4 or 2 bytes.
*/
#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
#define J_RANGE_SHIFT (2 - J_ISA_BIT)
#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
union mips_instruction *insn_p;
union mips_instruction insn;
insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
/* Jump only works within an aligned region its delay slot is in. */
BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
/* Target must have the right alignment and ISA must be preserved. */
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
if (type == JUMP_LABEL_ENABLE) {
insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
insn.j_format.target = e->target >> J_RANGE_SHIFT;
} else {
insn.word = 0; /* nop */
}
get_online_cpus();
mutex_lock(&text_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
insn_p->halfword[1] = insn.word;
} else
*insn_p = insn;
flush_icache_range((unsigned long)insn_p,
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
put_online_cpus();
}
#endif /* HAVE_JUMP_LABEL */

409
arch/mips/kernel/kgdb.c Normal file
View file

@ -0,0 +1,409 @@
/*
* Originally written by Glenn Engel, Lake Stevens Instrument Division
*
* Contributed by HP Systems
*
* Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
* Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
*
* Copyright (C) 1995 Andreas Busse
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2004-2005 MontaVista Software Inc.
* Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
*
* Copyright (C) 2007-2008 Wind River Systems, Inc.
* Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/ptrace.h> /* for linux pt_regs struct */
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/inst.h>
#include <asm/fpu.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/uaccess.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 6, SIGBUS }, /* instruction bus error */
{ 7, SIGBUS }, /* data bus error */
{ 9, SIGTRAP }, /* break */
/* { 11, SIGILL }, */ /* CPU unusable */
{ 12, SIGFPE }, /* overflow */
{ 13, SIGTRAP }, /* trap */
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
{ 15, SIGFPE }, /* floating point exception */
{ 23, SIGSEGV }, /* watch */
{ 31, SIGSEGV }, /* virtual data cache coherency */
{ 0, 0} /* Must be last */
};
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
{ "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
{ "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
{ "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
{ "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
{ "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
{ "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
{ "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
{ "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
{ "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
{ "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
{ "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
{ "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
{ "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
{ "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
{ "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
{ "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
{ "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
{ "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
{ "f0", GDB_SIZEOF_REG, 0 },
{ "f1", GDB_SIZEOF_REG, 1 },
{ "f2", GDB_SIZEOF_REG, 2 },
{ "f3", GDB_SIZEOF_REG, 3 },
{ "f4", GDB_SIZEOF_REG, 4 },
{ "f5", GDB_SIZEOF_REG, 5 },
{ "f6", GDB_SIZEOF_REG, 6 },
{ "f7", GDB_SIZEOF_REG, 7 },
{ "f8", GDB_SIZEOF_REG, 8 },
{ "f9", GDB_SIZEOF_REG, 9 },
{ "f10", GDB_SIZEOF_REG, 10 },
{ "f11", GDB_SIZEOF_REG, 11 },
{ "f12", GDB_SIZEOF_REG, 12 },
{ "f13", GDB_SIZEOF_REG, 13 },
{ "f14", GDB_SIZEOF_REG, 14 },
{ "f15", GDB_SIZEOF_REG, 15 },
{ "f16", GDB_SIZEOF_REG, 16 },
{ "f17", GDB_SIZEOF_REG, 17 },
{ "f18", GDB_SIZEOF_REG, 18 },
{ "f19", GDB_SIZEOF_REG, 19 },
{ "f20", GDB_SIZEOF_REG, 20 },
{ "f21", GDB_SIZEOF_REG, 21 },
{ "f22", GDB_SIZEOF_REG, 22 },
{ "f23", GDB_SIZEOF_REG, 23 },
{ "f24", GDB_SIZEOF_REG, 24 },
{ "f25", GDB_SIZEOF_REG, 25 },
{ "f26", GDB_SIZEOF_REG, 26 },
{ "f27", GDB_SIZEOF_REG, 27 },
{ "f28", GDB_SIZEOF_REG, 28 },
{ "f29", GDB_SIZEOF_REG, 29 },
{ "f30", GDB_SIZEOF_REG, 30 },
{ "f31", GDB_SIZEOF_REG, 31 },
{ "fsr", GDB_SIZEOF_REG, 0 },
{ "fir", GDB_SIZEOF_REG, 0 },
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
return 0;
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy((void *)&current->thread.fpu.fcr31, mem,
dbg_reg_def[regno].size);
goto out_save;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
goto out_save;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
dbg_reg_def[regno].size);
out_save:
restore_fp(current);
}
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
/* First 38 registers */
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
goto out;
save_fp(current);
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy(mem, (void *)&current->thread.fpu.fcr31,
dbg_reg_def[regno].size);
goto out;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
memset(mem, 0, dbg_reg_def[regno].size);
goto out;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
dbg_reg_def[regno].size);
}
out:
return dbg_reg_def[regno].name;
}
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__(
".globl breakinst\n\t"
".set\tnoreorder\n\t"
"nop\n"
"breakinst:\tbreak\n\t"
"nop\n\t"
".set\treorder");
}
static void kgdb_call_nmi_hook(void *ignored)
{
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(get_ds());
kgdb_nmicallback(raw_smp_processor_id(), NULL);
set_fs(old_fs);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
struct thread_info *ti = task_thread_info(p);
unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
u64 *ptr = (u64 *)gdb_regs;
#endif
for (reg = 0; reg < 16; reg++)
*(ptr++) = regs->regs[reg];
/* S0 - S7 */
for (reg = 16; reg < 24; reg++)
*(ptr++) = regs->regs[reg];
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
for (reg = 28; reg < 32; reg++)
*(ptr++) = regs->regs[reg];
*(ptr++) = regs->cp0_status;
*(ptr++) = regs->lo;
*(ptr++) = regs->hi;
*(ptr++) = regs->cp0_badvaddr;
*(ptr++) = regs->cp0_cause;
*(ptr++) = regs->cp0_epc;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->cp0_epc = pc;
}
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
*/
static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
void *ptr)
{
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
mm_segment_t old_fs;
#ifdef CONFIG_KPROBES
/*
* Return immediately if the kprobes fault notifier has set
* DIE_PAGE_FAULT.
*/
if (cmd == DIE_PAGE_FAULT)
return NOTIFY_DONE;
#endif /* CONFIG_KPROBES */
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
/* Kernel mode. Set correct address limit */
old_fs = get_fs();
set_fs(get_ds());
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
set_fs(old_fs);
return NOTIFY_DONE;
}
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
regs->cp0_epc += 4;
/* In SMP mode, __flush_cache_all does IPI */
local_irq_enable();
__flush_cache_all();
set_fs(old_fs);
return NOTIFY_STOP;
}
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
return kgdb_mips_notify(NULL, cmd, &args);
}
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
/*
* Handle the 'c' command
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
char *ptr;
unsigned long address;
switch (remcom_in_buffer[0]) {
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->cp0_epc = address;
return 0;
}
return -1;
}
struct kgdb_arch arch_kgdb_ops;
/*
* We use kgdb_early_setup so that functions we need to call now don't
* cause trouble when called again later.
*/
int kgdb_arch_init(void)
{
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
.func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
register_die_notifier(&kgdb_notifier);
return 0;
}
/*
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}

679
arch/mips/kernel/kprobes.c Normal file
View file

@ -0,0 +1,679 @@
/*
* Kernel Probes (KProbes)
* arch/mips/kernel/kprobes.c
*
* Copyright 2006 Sony Corp.
* Copyright 2010 Cavium Networks
*
* Some portions copied from the powerpc version.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kprobes.h>
#include <linux/preempt.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/inst.h>
static const union mips_instruction breakpoint_insn = {
.b_format = {
.opcode = spec_op,
.code = BRK_KPROBE_BP,
.func = break_op
}
};
static const union mips_instruction breakpoint2_insn = {
.b_format = {
.opcode = spec_op,
.code = BRK_KPROBE_SSTEPBP,
.func = break_op
}
};
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static int __kprobes insn_has_delayslot(union mips_instruction insn)
{
switch (insn.i_format.opcode) {
/*
* This group contains:
* jr and jalr are in r_format format.
*/
case spec_op:
switch (insn.r_format.func) {
case jr_op:
case jalr_op:
break;
default:
goto insn_ok;
}
/*
* This group contains:
* bltz_op, bgez_op, bltzl_op, bgezl_op,
* bltzal_op, bgezal_op, bltzall_op, bgezall_op.
*/
case bcond_op:
/*
* These are unconditional and in j_format.
*/
case jal_op:
case j_op:
/*
* These are conditional and in i_format.
*/
case beq_op:
case beql_op:
case bne_op:
case bnel_op:
case blez_op:
case blezl_op:
case bgtz_op:
case bgtzl_op:
/*
* These are the FPA/cp1 branch instructions.
*/
case cop1_op:
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
case ldc2_op: /* This is bbit032 on Octeon */
case swc2_op: /* This is bbit1 on Octeon */
case sdc2_op: /* This is bbit132 on Octeon */
#endif
return 1;
default:
break;
}
insn_ok:
return 0;
}
/*
* insn_has_ll_or_sc function checks whether instruction is ll or sc
* one; putting breakpoint on top of atomic ll/sc pair is bad idea;
* so we need to prevent it and refuse kprobes insertion for such
* instructions; cannot do much about breakpoint in the middle of
* ll/sc pair; it is upto user to avoid those places
*/
static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
{
int ret = 0;
switch (insn.i_format.opcode) {
case ll_op:
case lld_op:
case sc_op:
case scd_op:
ret = 1;
break;
default:
break;
}
return ret;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
union mips_instruction insn;
union mips_instruction prev_insn;
int ret = 0;
insn = p->addr[0];
if (insn_has_ll_or_sc(insn)) {
pr_notice("Kprobes for ll and sc instructions are not"
"supported\n");
ret = -EINVAL;
goto out;
}
if ((probe_kernel_read(&prev_insn, p->addr - 1,
sizeof(mips_instruction)) == 0) &&
insn_has_delayslot(prev_insn)) {
pr_notice("Kprobes for branch delayslot are not supported\n");
ret = -EINVAL;
goto out;
}
/* insn: must be on special executable page on mips. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) {
ret = -ENOMEM;
goto out;
}
/*
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break trap instruction at
* index one.
*
* On MIPS arch if the instruction at probed address is a
* branch instruction, we need to execute the instruction at
* Branch Delayslot (BD) at the time of probe hit. As MIPS also
* doesn't have single stepping support, the BD instruction can
* not be executed in-line and it would be executed on SSOL slot
* using a normal breakpoint instruction in the next slot.
* So, read the instruction and save it for later execution.
*/
if (insn_has_delayslot(insn))
memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
else
memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
p->ainsn.insn[1] = breakpoint2_insn;
p->opcode = *p->addr;
out:
return ret;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = breakpoint_insn;
flush_insn_slot(p);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_insn_slot(p);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
}
static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
}
static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
kcb->kprobe_saved_epc = regs->cp0_epc;
}
/**
* evaluate_branch_instrucion -
*
* Evaluate the branch instruction at probed address during probe hit. The
* result of evaluation would be the updated epc. The insturction in delayslot
* would actually be single stepped using a normal breakpoint) on SSOL slot.
*
* The result is also saved in the kprobe control block for later use,
* in case we need to execute the delayslot instruction. The latter will be
* false for NOP instruction in dealyslot and the branch-likely instructions
* when the branch is taken. And for those cases we set a flag as
* SKIP_DELAYSLOT in the kprobe control block
*/
static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
union mips_instruction insn = p->opcode;
long epc;
int ret = 0;
epc = regs->cp0_epc;
if (epc & 3)
goto unaligned;
if (p->ainsn.insn->word == 0)
kcb->flags |= SKIP_DELAYSLOT;
else
kcb->flags &= ~SKIP_DELAYSLOT;
ret = __compute_return_epc_for_insn(regs, insn);
if (ret < 0)
return ret;
if (ret == BRANCH_LIKELY_TAKEN)
kcb->flags |= SKIP_DELAYSLOT;
kcb->target_epc = regs->cp0_epc;
return 0;
unaligned:
pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
}
static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
int ret = 0;
regs->cp0_status &= ~ST0_IE;
/* single step inline if the instruction is a break */
if (p->opcode.word == breakpoint_insn.word ||
p->opcode.word == breakpoint2_insn.word)
regs->cp0_epc = (unsigned long)p->addr;
else if (insn_has_delayslot(p->opcode)) {
ret = evaluate_branch_instruction(p, regs, kcb);
if (ret < 0) {
pr_notice("Kprobes: Error in evaluating branch\n");
return;
}
}
regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "break 0"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*
* This function prepares to return from the post-single-step
* breakpoint trap. In case of branch instructions, the target
* epc to be restored.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
if (insn_has_delayslot(p->opcode))
regs->cp0_epc = kcb->target_epc;
else {
unsigned long orig_epc = kcb->kprobe_saved_epc;
regs->cp0_epc = orig_epc + 4;
}
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *) regs->cp0_epc;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
p->ainsn.insn->word == breakpoint_insn.word) {
regs->cp0_status &= ~ST0_IE;
regs->cp0_status |= kcb->kprobe_saved_SR;
goto no_kprobe;
}
/*
* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_REENTER;
if (kcb->flags & SKIP_DELAYSLOT) {
resume_execution(p, regs, kcb);
restore_previous_kprobe(kcb);
preempt_enable_no_resched();
}
return 1;
} else {
if (addr->word != breakpoint_insn.word) {
/*
* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
ret = 1;
goto no_kprobe;
}
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
if (addr->word != breakpoint_insn.word) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
return 1;
}
ss_probe:
prepare_singlestep(p, regs, kcb);
if (kcb->flags & SKIP_DELAYSLOT) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (p->post_handler)
p->post_handler(p, regs, 0);
resume_execution(p, regs, kcb);
preempt_enable_no_resched();
} else
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
static inline int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_saved_SR;
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR;
reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
}
/*
* Wrapper routine for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_BREAK:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_SSTEPBP:
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running()
&& kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
preempt_enable();
break;
default:
break;
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_sp = regs->regs[29];
memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
regs->cp0_epc = (unsigned long)(jp->entry);
return 1;
}
/* Defined in the inline asm below. */
void jprobe_return_end(void);
void __kprobes jprobe_return(void)
{
/* Assembler quirk necessitates this '0,code' business. */
asm volatile(
"break 0,%0\n\t"
".globl jprobe_return_end\n"
"jprobe_return_end:\n"
: : "n" (BRK_KPROBE_BP) : "memory");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (regs->cp0_epc >= (unsigned long)jprobe_return &&
regs->cp0_epc <= (unsigned long)jprobe_return_end) {
*regs = kcb->jprobe_saved_regs;
memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
preempt_enable_no_resched();
return 1;
}
return 0;
}
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
* - When the probed function returns, this probe causes the
* handlers to fire
*/
static void __used kretprobe_trampoline_holder(void)
{
asm volatile(
".set push\n\t"
/* Keep the assembler from reordering and placing JR here. */
".set noreorder\n\t"
"nop\n\t"
".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n\t"
"nop\n\t"
".set pop"
: : : "memory");
}
void kretprobe_trampoline(void);
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
/* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long)kretprobe_trampoline;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
instruction_pointer(regs) = orig_ret_address;
reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
return 1;
return 0;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}

167
arch/mips/kernel/linux32.c Normal file
View file

@ -0,0 +1,167 @@
/*
* Conversion between 32-bit and 64-bit native system calls.
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson (ulfc@engr.sgi.com)
*/
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/highuid.h>
#include <linux/resource.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/times.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/shm.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/utime.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/dnotify.h>
#include <linux/module.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/scm.h>
#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/mman.h>
/* Use this to get at 32-bit user passed pointers. */
/* A() macro should be used for places where you e.g.
have some internal variable u32 and just want to get
rid of a compiler warning. AA() has to be used in
places where you want to convert a function argument
to 32bit pointer or when you e.g. access pt_regs
structure and want to consider 32bit registers only.
*/
#define A(__x) ((unsigned long)(__x))
#define AA(__x) ((unsigned long)((int)__x))
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
#ifdef __MIPSEL__
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
unsigned long error;
error = -EINVAL;
if (pgoff & (~PAGE_MASK >> 12))
goto out;
error = sys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT-12));
out:
return error;
}
#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
int rlim_cur;
int rlim_max;
};
SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
return sys_truncate(path, merge_64(a2, a3));
}
SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
unsigned long, a2, unsigned long, a3)
{
return sys_ftruncate(fd, merge_64(a2, a3));
}
SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
unsigned int, offset_low, loff_t __user *, result,
unsigned int, origin)
{
return sys_llseek(fd, offset_high, offset_low, result, origin);
}
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
non-seekable files. */
SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
unsigned long, unused, unsigned long, a4, unsigned long, a5)
{
return sys_pread64(fd, buf, count, merge_64(a4, a5));
}
SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
size_t, count, u32, unused, u64, a4, u64, a5)
{
return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
}
SYSCALL_DEFINE1(32_personality, unsigned long, personality)
{
unsigned int p = personality & 0xffffffff;
int ret;
if (personality(current->personality) == PER_LINUX32 &&
personality(p) == PER_LINUX)
p = (p & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(p);
if (ret != -1 && personality(ret) == PER_LINUX32)
ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
size_t count)
{
return sys_readahead(fd, merge_64(a2, a3), count);
}
asmlinkage long sys32_sync_file_range(int fd, int __pad,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
int flags)
{
return sys_sync_file_range(fd,
merge_64(a2, a3), merge_64(a4, a5),
flags);
}
asmlinkage long sys32_fadvise64_64(int fd, int __pad,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
int flags)
{
return sys_fadvise64_64(fd,
merge_64(a2, a3), merge_64(a4, a5),
flags);
}
asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
merge_64(len_a4, len_a5));
}

View file

@ -0,0 +1,114 @@
/*
* machine_kexec.c for kexec
* Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/compiler.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
void (*_machine_kexec_shutdown)(void) = NULL;
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
#ifdef CONFIG_SMP
void (*relocated_kexec_smp_wait) (void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
int
machine_kexec_prepare(struct kimage *kimage)
{
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
return 0;
}
void
machine_kexec_cleanup(struct kimage *kimage)
{
}
void
machine_shutdown(void)
{
if (_machine_kexec_shutdown)
_machine_kexec_shutdown();
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
if (_machine_crash_shutdown)
_machine_crash_shutdown(regs);
else
default_machine_crash_shutdown(regs);
}
typedef void (*noretfun_t)(void) __noreturn;
void
machine_kexec(struct kimage *image)
{
unsigned long reboot_code_buffer;
unsigned long entry;
unsigned long *ptr;
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
kexec_start_address =
(unsigned long) phys_to_virt(image->start);
if (image->type == KEXEC_TYPE_DEFAULT) {
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
} else {
kexec_indirection_page = (unsigned long)&image->head;
}
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
* phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/*
* we do not want to be bothered.
*/
local_irq_disable();
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer +
(void *)(kexec_smp_wait - relocate_new_kernel);
smp_wmb();
atomic_set(&kexec_ready_to_reboot, 1);
#endif
((noretfun_t) reboot_code_buffer)();
}

220
arch/mips/kernel/mcount.S Normal file
View file

@ -0,0 +1,220 @@
/*
* MIPS specific _mcount support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*
* Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
* Copyright (C) 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/ftrace.h>
.text
.set noreorder
.set noat
.macro MCOUNT_SAVE_REGS
PTR_SUBU sp, PT_SIZE
PTR_S ra, PT_R31(sp)
PTR_S AT, PT_R1(sp)
PTR_S a0, PT_R4(sp)
PTR_S a1, PT_R5(sp)
PTR_S a2, PT_R6(sp)
PTR_S a3, PT_R7(sp)
#ifdef CONFIG_64BIT
PTR_S a4, PT_R8(sp)
PTR_S a5, PT_R9(sp)
PTR_S a6, PT_R10(sp)
PTR_S a7, PT_R11(sp)
#endif
.endm
.macro MCOUNT_RESTORE_REGS
PTR_L ra, PT_R31(sp)
PTR_L AT, PT_R1(sp)
PTR_L a0, PT_R4(sp)
PTR_L a1, PT_R5(sp)
PTR_L a2, PT_R6(sp)
PTR_L a3, PT_R7(sp)
#ifdef CONFIG_64BIT
PTR_L a4, PT_R8(sp)
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
#endif
PTR_ADDIU sp, PT_SIZE
.endm
.macro RETURN_BACK
jr ra
move ra, AT
.endm
/*
* The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
* the location of the parent's return address.
*/
#define MCOUNT_RA_ADDRESS_REG $12
#ifdef CONFIG_DYNAMIC_FTRACE
NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
#ifdef CONFIG_32BIT
addiu sp,sp,8
#else
nop
#endif
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
MCOUNT_SAVE_REGS
#ifdef KBUILD_MCOUNT_RA_ADDRESS
PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
#endif
PTR_SUBU a0, ra, 8 /* arg1: self address */
PTR_LA t1, _stext
sltu t2, a0, t1 /* t2 = (a0 < _stext) */
PTR_LA t1, _etext
sltu t3, t1, a0 /* t3 = (a0 > _etext) */
or t1, t2, t3
beqz t1, ftrace_call
nop
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
#else
PTR_SUBU a0, a0, 12
#endif
.globl ftrace_call
ftrace_call:
nop /* a placeholder for the call to a real tracing function */
move a1, AT /* arg2: parent's return address */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
nop
nop
#endif
MCOUNT_RESTORE_REGS
.globl ftrace_stub
ftrace_stub:
RETURN_BACK
END(ftrace_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
NESTED(_mcount, PT_SIZE, ra)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
bne t1, t2, static_trace
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
PTR_LA t1, ftrace_graph_entry_stub
PTR_L t3, ftrace_graph_entry
bne t1, t3, ftrace_graph_caller
nop
#endif
b ftrace_stub
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#else
nop
#endif
static_trace:
MCOUNT_SAVE_REGS
move a0, ra /* arg1: self return address */
jalr t2 /* (1) call *ftrace_trace_function */
move a1, AT /* arg2: parent's return address */
MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
.globl ftrace_stub
ftrace_stub:
RETURN_BACK
END(_mcount)
#endif /* ! CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
NESTED(ftrace_graph_caller, PT_SIZE, ra)
#ifndef CONFIG_DYNAMIC_FTRACE
MCOUNT_SAVE_REGS
#endif
/* arg1: Get the location of the parent's return address */
#ifdef KBUILD_MCOUNT_RA_ADDRESS
#ifdef CONFIG_DYNAMIC_FTRACE
PTR_L a0, PT_R12(sp)
#else
move a0, MCOUNT_RA_ADDRESS_REG
#endif
bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
nop
#endif
PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
1:
/* arg2: Get self return address */
#ifdef CONFIG_DYNAMIC_FTRACE
PTR_L a1, PT_R31(sp)
#else
move a1, ra
#endif
/* arg3: Get frame pointer of current stack */
#ifdef CONFIG_64BIT
PTR_LA a2, PT_SIZE(sp)
#else
PTR_LA a2, (PT_SIZE+8)(sp)
#endif
jal prepare_ftrace_return
nop
MCOUNT_RESTORE_REGS
#ifndef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
#endif
RETURN_BACK
END(ftrace_graph_caller)
.align 2
.globl return_to_handler
return_to_handler:
PTR_SUBU sp, PT_SIZE
PTR_S v0, PT_R2(sp)
jal ftrace_return_to_handler
PTR_S v1, PT_R3(sp)
/* restore the real parent address: v0 -> ra */
move ra, v0
PTR_L v0, PT_R2(sp)
PTR_L v1, PT_R3(sp)
jr ra
PTR_ADDIU sp, PT_SIZE
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.set at
.set reorder

121
arch/mips/kernel/mips-cm.c Normal file
View file

@ -0,0 +1,121 @@
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/errno.h>
#include <asm/mips-cm.h>
#include <asm/mipsregs.h>
void __iomem *mips_cm_base;
void __iomem *mips_cm_l2sync_base;
phys_t __mips_cm_phys_base(void)
{
u32 config3 = read_c0_config3();
u32 cmgcr;
/* Check the CMGCRBase register is implemented */
if (!(config3 & MIPS_CONF3_CMGCR))
return 0;
/* Read the address from CMGCRBase */
cmgcr = read_c0_cmgcrbase();
return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
}
phys_t mips_cm_phys_base(void)
__attribute__((weak, alias("__mips_cm_phys_base")));
phys_t __mips_cm_l2sync_phys_base(void)
{
u32 base_reg;
/*
* If the L2-only sync region is already enabled then leave it at it's
* current location.
*/
base_reg = read_gcr_l2_only_sync_base();
if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
/* Default to following the CM */
return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
}
phys_t mips_cm_l2sync_phys_base(void)
__attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
static void mips_cm_probe_l2sync(void)
{
unsigned major_rev;
phys_t addr;
/* L2-only sync was introduced with CM major revision 6 */
major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
CM_GCR_REV_MAJOR_SHF;
if (major_rev < 6)
return;
/* Find a location for the L2 sync region */
addr = mips_cm_l2sync_phys_base();
BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
if (!addr)
return;
/* Set the region base address & enable it */
write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
/* Map the region */
mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
}
int mips_cm_probe(void)
{
phys_t addr;
u32 base_reg;
addr = mips_cm_phys_base();
BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
if (!addr)
return -ENODEV;
mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
if (!mips_cm_base)
return -ENXIO;
/* sanity check that we're looking at a CM */
base_reg = read_gcr_base();
if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
(unsigned long)addr);
mips_cm_base = NULL;
return -ENODEV;
}
/* set default target to memory */
base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
write_gcr_base(base_reg);
/* disable CM regions */
write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
/* probe for an L2-only sync region */
mips_cm_probe_l2sync();
return 0;
}

View file

@ -0,0 +1,80 @@
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
void __iomem *mips_cpc_base;
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
phys_t __weak mips_cpc_phys_base(void)
{
u32 cpc_base;
if (!mips_cm_present())
return 0;
if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
return 0;
/* If the CPC is already enabled, leave it so */
cpc_base = read_gcr_cpc_base();
if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
/* Otherwise, give it the default address & enable it */
cpc_base = mips_cpc_default_phys_base();
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
return cpc_base;
}
int mips_cpc_probe(void)
{
phys_t addr;
unsigned cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
addr = mips_cpc_phys_base();
if (!addr)
return -ENODEV;
mips_cpc_base = ioremap_nocache(addr, 0x8000);
if (!mips_cpc_base)
return -ENXIO;
return 0;
}
void mips_cpc_lock_other(unsigned int core)
{
unsigned curr_core;
preempt_disable();
curr_core = current_cpu_data.core;
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
}
void mips_cpc_unlock_other(void)
{
unsigned curr_core = current_cpu_data.core;
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
preempt_enable();
}

View file

@ -0,0 +1,214 @@
/*
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/types.h>
#include <asm/uaccess.h>
/*
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
*/
cpumask_t mt_fpu_cpumask;
static int fpaff_threshold = -1;
unsigned long mt_fpemul_threshold;
/*
* Replacement functions for the sys_sched_setaffinity() and
* sys_sched_getaffinity() system calls, so that we can integrate
* FPU affinity with the user's requested processor affinity.
* This code is 98% identical with the sys_sched_setaffinity()
* and sys_sched_getaffinity() system calls, and should be
* updated when kernel/sched/core.c changes.
*/
/*
* find_process_by_pid - find a process with a matching PID value.
* used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
* cloned here.
*/
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
match = (uid_eq(cred->euid, pcred->euid) ||
uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
cpumask_var_t cpus_allowed, new_mask, effective_mask;
struct thread_info *ti;
struct task_struct *p;
int retval;
if (len < sizeof(new_mask))
return -EINVAL;
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
get_online_cpus();
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_new_mask;
}
retval = -EPERM;
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
/* Record new user-specified CPU set for future reference */
cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
again:
/* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p);
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
cpus_intersects(*new_mask, mt_fpu_cpumask)) {
cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
cpumask_copy(effective_mask, new_mask);
clear_ti_thread_flag(ti, TIF_FPUBOUND);
retval = set_cpus_allowed_ptr(p, new_mask);
}
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(effective_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(effective_mask);
out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
cpumask_t mask;
int retval;
struct task_struct *p;
real_len = sizeof(mask);
if (len < real_len)
return -EINVAL;
get_online_cpus();
read_lock(&tasklist_lock);
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
out_unlock:
read_unlock(&tasklist_lock);
put_online_cpus();
if (retval)
return retval;
if (copy_to_user(user_mask_ptr, &mask, real_len))
return -EFAULT;
return real_len;
}
static int __init fpaff_thresh(char *str)
{
get_option(&str, &fpaff_threshold);
return 1;
}
__setup("fpaff=", fpaff_thresh);
/*
* FPU Use Factor empirically derived from experiments on 34K
*/
#define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void)
{
if (fpaff_threshold >= 0) {
mt_fpemul_threshold = fpaff_threshold;
} else {
mt_fpemul_threshold =
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
}
printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
mt_fpemul_threshold);
return 0;
}
arch_initcall(mt_fp_affinity_init);

315
arch/mips/kernel/mips-mt.c Normal file
View file

@ -0,0 +1,315 @@
/*
* General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
int vpelimit;
static int __init maxvpes(char *str)
{
get_option(&str, &vpelimit);
return 1;
}
__setup("maxvpes=", maxvpes);
int tclimit;
static int __init maxtcs(char *str)
{
get_option(&str, &tclimit);
return 1;
}
__setup("maxtcs=", maxtcs);
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
void mips_mt_regdump(unsigned long mvpctl)
{
unsigned long flags;
unsigned long vpflags;
unsigned long mvpconf0;
int nvpe;
int ntc;
int i;
int tc;
unsigned long haltval;
unsigned long tcstatval;
local_irq_save(flags);
vpflags = dvpe();
printk("=== MIPS MT State Dump ===\n");
printk("-- Global State --\n");
printk(" MVPControl Passed: %08lx\n", mvpctl);
printk(" MVPControl Read: %08lx\n", vpflags);
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
for (i = 0; i < nvpe; i++) {
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
printk(" VPE %d\n", i);
printk(" VPEControl : %08lx\n",
read_vpe_c0_vpecontrol());
printk(" VPEConf0 : %08lx\n",
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
printk(" VPE%d.EPC : %08lx %pS\n",
i, read_vpe_c0_epc(),
(void *) read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
i, read_vpe_c0_config7());
break; /* Next VPE */
}
}
}
printk("-- per-TC State --\n");
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
printk(" TC %d (current TC with VPE EPC above)\n", tc);
} else {
haltval = read_tc_c0_tchalt();
write_tc_c0_tchalt(1);
tcstatval = read_tc_c0_tcstatus();
printk(" TC %d\n", tc);
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
printk(" TCRestart : %08lx %pS\n",
read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
{
mt_opt_norps = 1;
return 1;
}
__setup("norps", rps_disable);
static int __init rpsctl_set(char *str)
{
get_option(&str, &mt_opt_rpsctl);
return 1;
}
__setup("rpsctl=", rpsctl_set);
static int __init nblsu_set(char *str)
{
get_option(&str, &mt_opt_nblsu);
return 1;
}
__setup("nblsu=", nblsu_set);
static int __init config7_set(char *str)
{
get_option(&str, &mt_opt_config7);
mt_opt_forceconfig7 = 1;
return 1;
}
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
int mt_protiflush;
int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
static int __init set_protiflush(char *s)
{
mt_protiflush = 1;
return 1;
}
__setup("protiflush", set_protiflush);
static int __init set_protdflush(char *s)
{
mt_protdflush = 1;
return 1;
}
__setup("protdflush", set_protdflush);
static int __init niflush(char *s)
{
get_option(&s, &mt_n_iflushes);
return 1;
}
__setup("niflush=", niflush);
static int __init ndflush(char *s)
{
get_option(&s, &mt_n_dflushes);
return 1;
}
__setup("ndflush=", ndflush);
static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
get_option(&str, &itc_base);
return 1;
}
__setup("itcbase=", set_itc_base);
void mips_mt_set_cpuoptions(void)
{
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
mt_opt_rpsctl);
if (mt_opt_rpsctl)
nconfig7 |= (1 << 2);
else
nconfig7 &= ~(1 << 2);
}
if (mt_opt_nblsu >= 0) {
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
if (mt_opt_nblsu)
nconfig7 |= (1 << 5);
else
nconfig7 &= ~(1 << 5);
}
if (mt_opt_forceconfig7) {
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
nconfig7 = mt_opt_config7;
}
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
/* Report Cache management debug options */
if (mt_protiflush)
printk("I-cache flushes single-threaded\n");
if (mt_protdflush)
printk("D-cache flushes single-threaded\n");
if (mt_n_iflushes != 1)
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
* specific to the 34K core family, which uses
* a special mode bit ("ITC") in the ErrCtl
* register to enable access to ITC control
* registers via cache "tag" operations.
*/
unsigned long ectlval;
unsigned long itcblkgrn;
/* ErrCtl register is known as "ecc" to Linux */
ectlval = read_c0_ecc();
write_c0_ecc(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
/* Read "cache tag" for Dcache pseudo-index 8 */
cache_op(Index_Load_Tag_D, INDEX_8);
ehb();
itcblkgrn = read_c0_dtaglo();
itcblkgrn &= 0xfffe0000;
/* Set for 128 byte pitch of ITC cells */
itcblkgrn |= 0x00000c00;
/* Stage in Tag register */
write_c0_dtaglo(itcblkgrn);
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_8);
/* Now set base address, and turn ITC on with 0x1 bit */
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
write_c0_ecc(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
}
}
/*
* Function to protect cache flushes from concurrent execution
* depends on MP software model chosen.
*/
void mt_cflush_lockdown(void)
{
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
struct class *mt_class;
static int __init mt_init(void)
{
struct class *mtc;
mtc = class_create(THIS_MODULE, "mt");
if (IS_ERR(mtc))
return PTR_ERR(mtc);
mt_class = mtc;
return 0;
}
subsys_initcall(mt_init);

View file

@ -0,0 +1,94 @@
/*
* Export MIPS-specific functions needed for loadable modules.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>
#include <asm/fpu.h>
#include <asm/msa.h>
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
const char *__from, long __len);
extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
long __len);
extern long __strncpy_from_user_nocheck_asm(char *__to,
const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
long __len);
extern long __strlen_kernel_nocheck_asm(const char *s);
extern long __strlen_kernel_asm(const char *s);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
extern long __strnlen_kernel_nocheck_asm(const char *s);
extern long __strnlen_kernel_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);
/*
* Core architecture code
*/
EXPORT_SYMBOL_GPL(_save_fp);
#ifdef CONFIG_CPU_HAS_MSA
EXPORT_SYMBOL_GPL(_save_msa);
#endif
/*
* String functions
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
/*
* Functions that operate on entire pages. Mostly used by memory management.
*/
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__copy_user_inatomic);
#ifdef CONFIG_EVA
EXPORT_SYMBOL(__copy_from_user_eva);
EXPORT_SYMBOL(__copy_in_user_eva);
EXPORT_SYMBOL(__copy_to_user_eva);
EXPORT_SYMBOL(__copy_user_inatomic_eva);
#endif
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_kernel_asm);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
EXPORT_SYMBOL(__strlen_kernel_asm);
EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
EXPORT_SYMBOL(__strnlen_kernel_asm);
EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_kernel);
EXPORT_SYMBOL(__csum_partial_copy_to_user);
EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_FUNCTION_TRACER
/* _mcount is defined in arch/mips/kernel/mcount.S */
EXPORT_SYMBOL(_mcount);
#endif

View file

@ -0,0 +1,66 @@
/*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/mips_machine.h>
#include <asm/prom.h>
static struct mips_machine *mips_machine __initdata;
#define for_each_machine(mach) \
for ((mach) = (struct mips_machine *)&__mips_machines_start; \
(mach) && \
(unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
(mach)++)
__init int mips_machtype_setup(char *id)
{
struct mips_machine *mach;
for_each_machine(mach) {
if (mach->mach_id == NULL)
continue;
if (strcmp(mach->mach_id, id) == 0) {
mips_machtype = mach->mach_type;
return 0;
}
}
pr_err("MIPS: no machine found for id '%s', supported machines:\n", id);
pr_err("%-24s %s\n", "id", "name");
for_each_machine(mach)
pr_err("%-24s %s\n", mach->mach_id, mach->mach_name);
return 1;
}
__setup("machtype=", mips_machtype_setup);
__init void mips_machine_setup(void)
{
struct mips_machine *mach;
for_each_machine(mach) {
if (mips_machtype == mach->mach_type) {
mips_machine = mach;
break;
}
}
if (!mips_machine)
return;
mips_set_machine_name(mips_machine->mach_name);
if (mips_machine->mach_setup)
mips_machine->mach_setup();
}

View file

@ -0,0 +1,145 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 Thiemo Seufer
*/
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/moduleloader.h>
extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v);
static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = v;
return 0;
}
static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) | (v & 0xffff);
return 0;
}
static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
{
*(Elf_Addr *)location = v;
return 0;
}
static int apply_r_mips_higher_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
static int apply_r_mips_highest_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rela,
[R_MIPS_26] = apply_r_mips_26_rela,
[R_MIPS_HI16] = apply_r_mips_hi16_rela,
[R_MIPS_LO16] = apply_r_mips_lo16_rela,
[R_MIPS_64] = apply_r_mips_64_rela,
[R_MIPS_HIGHER] = apply_r_mips_higher_rela,
[R_MIPS_HIGHEST] = apply_r_mips_highest_rela
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value + rel[i].r_addend;
res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
return 0;
}

298
arch/mips/kernel/module.c Normal file
View file

@ -0,0 +1,298 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 Thiemo Seufer
*/
#undef DEBUG
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/jump_label.h>
#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
Elf_Addr *addr;
Elf_Addr value;
};
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
return 0;
}
static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
{
*location += v;
return 0;
}
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) |
((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
{
struct mips_hi16 *n;
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
n = kmalloc(sizeof *n, GFP_KERNEL);
if (!n)
return -ENOMEM;
n->addr = (Elf_Addr *)location;
n->value = v;
n->next = me->arch.r_mips_hi16_list;
me->arch.r_mips_hi16_list = n;
return 0;
}
static void free_relocation_chain(struct mips_hi16 *l)
{
struct mips_hi16 *next;
while (l) {
next = l->next;
kfree(l);
l = next;
}
}
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
struct mips_hi16 *l;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (me->arch.r_mips_hi16_list != NULL) {
l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
if (v != l->value)
goto out_danger;
/*
* Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except
* where to find the low 16 bits of the addend needed
* by the LO16.
*/
insn = *l->addr;
val = ((insn & 0xffff) << 16) + vallo;
val += v;
/*
* Account for the sign extension that will happen in
* the low bits.
*/
val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
insn = (insn & ~0xffff) | val;
*l->addr = insn;
next = l->next;
kfree(l);
l = next;
}
me->arch.r_mips_hi16_list = NULL;
}
/*
* Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
out_danger:
free_relocation_chain(l);
me->arch.r_mips_hi16_list = NULL;
pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
return -ENOEXEC;
}
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rel,
[R_MIPS_26] = apply_r_mips_26_rel,
[R_MIPS_HI16] = apply_r_mips_hi16_rel,
[R_MIPS_LO16] = apply_r_mips_lo16_rel
};
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value;
res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
/*
* Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
* relocations not followed by a R_MIPS_LO16 relocation. In that
* case, free up the list and return an error.
*/
if (me->arch.r_mips_hi16_list) {
free_relocation_chain(me->arch.r_mips_hi16_list);
me->arch.r_mips_hi16_list = NULL;
return -ENOEXEC;
}
return 0;
}
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct mod_arch_specific *dbe;
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
if (e)
break;
}
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
return e;
}
/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* Make jump label nops. */
jump_label_apply_nops(me);
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
continue;
me->arch.dbe_start = (void *)s->sh_addr;
me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
spin_lock_irq(&dbe_lock);
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
return 0;
}
void module_arch_cleanup(struct module *mod)
{
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);
}

View file

@ -0,0 +1,519 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1994, 1995, 1996, by Andreas Busse
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 MIPS Technologies, Inc.
* written by Carsten Langgaard, carstenl@mips.com
*/
#define USE_ALTERNATE_RESUME_IMPL 1
.set push
.set arch=mips64r2
#include "r4k_switch.S"
.set pop
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti, int usedfpu)
*/
.align 7
LEAF(resume)
.set arch=octeon
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
/*
* check if we need to save FPU registers
*/
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t2, t0, t1
beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
LONG_S t0, TI_FLAGS(t3)
/*
* clear saved user stack CU1 bit
*/
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
.set push
.set arch=mips64r2
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
.set pop
1:
/* check if we need to save COP2 registers */
PTR_L t2, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t2)
bbit0 t0, 30, 1f
/* Disable COP2 in the stored process state */
li t1, ST0_CU2
xor t0, t1
LONG_S t0, ST_OFF(t2)
/* Enable COP2 so we can save it */
mfc0 t0, CP0_STATUS
or t0, t1
mtc0 t0, CP0_STATUS
/* Save COP2 */
daddu a0, THREAD_CP2
jal octeon_cop2_save
dsubu a0, THREAD_CP2
/* Disable COP2 now that we are done */
mfc0 t0, CP0_STATUS
li t1, ST0_CU2
xor t0, t1
mtc0 t0, CP0_STATUS
1:
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
mfc0 t0, $11,7 /* CvmMemCtl */
bbit0 t0, 6, 3f /* Is user access enabled? */
/* Store the CVMSEG state */
/* Extract the size of CVMSEG */
andi t0, 0x3f
/* Multiply * (cache line size/sizeof(long)/2) */
sll t0, 7-LONGLOG-1
li t1, -32768 /* Base address of CVMSEG */
LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
synciobdma
2:
.set noreorder
LONG_L t8, 0(t1) /* Load from CVMSEG */
subu t0, 1 /* Decrement loop var */
LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
bnez t0, 2b /* Loop until we've copied it all */
LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
.set reorder
/* Disable access to CVMSEG */
mfc0 t0, $11,7 /* CvmMemCtl */
xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
mtc0 t0, $11,7 /* CvmMemCtl */
#endif
3:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
*/
move $28, a2
cpu_restore_nonscratch a1
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
LONG_L a2, THREAD_STATUS(a1)
nor a3, $0, a3
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
move v0, a0
jr ra
END(resume)
/*
* void octeon_cop2_save(struct octeon_cop2_state *a0)
*/
.align 7
LEAF(octeon_cop2_save)
dmfc0 t9, $9,7 /* CvmCtl register. */
/* Save the COP2 CRC state */
dmfc2 t0, 0x0201
dmfc2 t1, 0x0202
dmfc2 t2, 0x0200
sd t0, OCTEON_CP2_CRC_IV(a0)
sd t1, OCTEON_CP2_CRC_LENGTH(a0)
sd t2, OCTEON_CP2_CRC_POLY(a0)
/* Skip next instructions if CvmCtl[NODFA_CP2] set */
bbit1 t9, 28, 1f
/* Save the LLM state */
dmfc2 t0, 0x0402
dmfc2 t1, 0x040A
sd t0, OCTEON_CP2_LLM_DAT(a0)
sd t1, OCTEON_CP2_LLM_DAT+8(a0)
1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
/* Save the COP2 crypto state */
/* this part is mostly common to both pass 1 and later revisions */
dmfc2 t0, 0x0084
dmfc2 t1, 0x0080
dmfc2 t2, 0x0081
dmfc2 t3, 0x0082
sd t0, OCTEON_CP2_3DES_IV(a0)
dmfc2 t0, 0x0088
sd t1, OCTEON_CP2_3DES_KEY(a0)
dmfc2 t1, 0x0111 /* only necessary for pass 1 */
sd t2, OCTEON_CP2_3DES_KEY+8(a0)
dmfc2 t2, 0x0102
sd t3, OCTEON_CP2_3DES_KEY+16(a0)
dmfc2 t3, 0x0103
sd t0, OCTEON_CP2_3DES_RESULT(a0)
dmfc2 t0, 0x0104
sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
dmfc2 t1, 0x0105
sd t2, OCTEON_CP2_AES_IV(a0)
dmfc2 t2, 0x0106
sd t3, OCTEON_CP2_AES_IV+8(a0)
dmfc2 t3, 0x0107
sd t0, OCTEON_CP2_AES_KEY(a0)
dmfc2 t0, 0x0110
sd t1, OCTEON_CP2_AES_KEY+8(a0)
dmfc2 t1, 0x0100
sd t2, OCTEON_CP2_AES_KEY+16(a0)
dmfc2 t2, 0x0101
sd t3, OCTEON_CP2_AES_KEY+24(a0)
mfc0 t3, $15,0 /* Get the processor ID register */
sd t0, OCTEON_CP2_AES_KEYLEN(a0)
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
sd t1, OCTEON_CP2_AES_RESULT(a0)
sd t2, OCTEON_CP2_AES_RESULT+8(a0)
/* Skip to the Pass1 version of the remainder of the COP2 state */
beq t3, t0, 2f
/* the non-pass1 state when !CvmCtl[NOCRYPTO] */
dmfc2 t1, 0x0240
dmfc2 t2, 0x0241
dmfc2 t3, 0x0242
dmfc2 t0, 0x0243
sd t1, OCTEON_CP2_HSH_DATW(a0)
dmfc2 t1, 0x0244
sd t2, OCTEON_CP2_HSH_DATW+8(a0)
dmfc2 t2, 0x0245
sd t3, OCTEON_CP2_HSH_DATW+16(a0)
dmfc2 t3, 0x0246
sd t0, OCTEON_CP2_HSH_DATW+24(a0)
dmfc2 t0, 0x0247
sd t1, OCTEON_CP2_HSH_DATW+32(a0)
dmfc2 t1, 0x0248
sd t2, OCTEON_CP2_HSH_DATW+40(a0)
dmfc2 t2, 0x0249
sd t3, OCTEON_CP2_HSH_DATW+48(a0)
dmfc2 t3, 0x024A
sd t0, OCTEON_CP2_HSH_DATW+56(a0)
dmfc2 t0, 0x024B
sd t1, OCTEON_CP2_HSH_DATW+64(a0)
dmfc2 t1, 0x024C
sd t2, OCTEON_CP2_HSH_DATW+72(a0)
dmfc2 t2, 0x024D
sd t3, OCTEON_CP2_HSH_DATW+80(a0)
dmfc2 t3, 0x024E
sd t0, OCTEON_CP2_HSH_DATW+88(a0)
dmfc2 t0, 0x0250
sd t1, OCTEON_CP2_HSH_DATW+96(a0)
dmfc2 t1, 0x0251
sd t2, OCTEON_CP2_HSH_DATW+104(a0)
dmfc2 t2, 0x0252
sd t3, OCTEON_CP2_HSH_DATW+112(a0)
dmfc2 t3, 0x0253
sd t0, OCTEON_CP2_HSH_IVW(a0)
dmfc2 t0, 0x0254
sd t1, OCTEON_CP2_HSH_IVW+8(a0)
dmfc2 t1, 0x0255
sd t2, OCTEON_CP2_HSH_IVW+16(a0)
dmfc2 t2, 0x0256
sd t3, OCTEON_CP2_HSH_IVW+24(a0)
dmfc2 t3, 0x0257
sd t0, OCTEON_CP2_HSH_IVW+32(a0)
dmfc2 t0, 0x0258
sd t1, OCTEON_CP2_HSH_IVW+40(a0)
dmfc2 t1, 0x0259
sd t2, OCTEON_CP2_HSH_IVW+48(a0)
dmfc2 t2, 0x025E
sd t3, OCTEON_CP2_HSH_IVW+56(a0)
dmfc2 t3, 0x025A
sd t0, OCTEON_CP2_GFM_MULT(a0)
dmfc2 t0, 0x025B
sd t1, OCTEON_CP2_GFM_MULT+8(a0)
sd t2, OCTEON_CP2_GFM_POLY(a0)
sd t3, OCTEON_CP2_GFM_RESULT(a0)
sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
jr ra
2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
dmfc2 t3, 0x0040
dmfc2 t0, 0x0041
dmfc2 t1, 0x0042
dmfc2 t2, 0x0043
sd t3, OCTEON_CP2_HSH_DATW(a0)
dmfc2 t3, 0x0044
sd t0, OCTEON_CP2_HSH_DATW+8(a0)
dmfc2 t0, 0x0045
sd t1, OCTEON_CP2_HSH_DATW+16(a0)
dmfc2 t1, 0x0046
sd t2, OCTEON_CP2_HSH_DATW+24(a0)
dmfc2 t2, 0x0048
sd t3, OCTEON_CP2_HSH_DATW+32(a0)
dmfc2 t3, 0x0049
sd t0, OCTEON_CP2_HSH_DATW+40(a0)
dmfc2 t0, 0x004A
sd t1, OCTEON_CP2_HSH_DATW+48(a0)
sd t2, OCTEON_CP2_HSH_IVW(a0)
sd t3, OCTEON_CP2_HSH_IVW+8(a0)
sd t0, OCTEON_CP2_HSH_IVW+16(a0)
3: /* pass 1 or CvmCtl[NOCRYPTO] set */
jr ra
END(octeon_cop2_save)
/*
* void octeon_cop2_restore(struct octeon_cop2_state *a0)
*/
.align 7
.set push
.set noreorder
LEAF(octeon_cop2_restore)
/* First cache line was prefetched before the call */
pref 4, 128(a0)
dmfc0 t9, $9,7 /* CvmCtl register. */
pref 4, 256(a0)
ld t0, OCTEON_CP2_CRC_IV(a0)
pref 4, 384(a0)
ld t1, OCTEON_CP2_CRC_LENGTH(a0)
ld t2, OCTEON_CP2_CRC_POLY(a0)
/* Restore the COP2 CRC state */
dmtc2 t0, 0x0201
dmtc2 t1, 0x1202
bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
dmtc2 t2, 0x4200
/* Restore the LLM state */
ld t0, OCTEON_CP2_LLM_DAT(a0)
ld t1, OCTEON_CP2_LLM_DAT+8(a0)
dmtc2 t0, 0x0402
dmtc2 t1, 0x040A
2:
bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
nop
/* Restore the COP2 crypto state common to pass 1 and pass 2 */
ld t0, OCTEON_CP2_3DES_IV(a0)
ld t1, OCTEON_CP2_3DES_KEY(a0)
ld t2, OCTEON_CP2_3DES_KEY+8(a0)
dmtc2 t0, 0x0084
ld t0, OCTEON_CP2_3DES_KEY+16(a0)
dmtc2 t1, 0x0080
ld t1, OCTEON_CP2_3DES_RESULT(a0)
dmtc2 t2, 0x0081
ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
dmtc2 t0, 0x0082
ld t0, OCTEON_CP2_AES_IV(a0)
dmtc2 t1, 0x0098
ld t1, OCTEON_CP2_AES_IV+8(a0)
dmtc2 t2, 0x010A /* only really needed for pass 1 */
ld t2, OCTEON_CP2_AES_KEY(a0)
dmtc2 t0, 0x0102
ld t0, OCTEON_CP2_AES_KEY+8(a0)
dmtc2 t1, 0x0103
ld t1, OCTEON_CP2_AES_KEY+16(a0)
dmtc2 t2, 0x0104
ld t2, OCTEON_CP2_AES_KEY+24(a0)
dmtc2 t0, 0x0105
ld t0, OCTEON_CP2_AES_KEYLEN(a0)
dmtc2 t1, 0x0106
ld t1, OCTEON_CP2_AES_RESULT(a0)
dmtc2 t2, 0x0107
ld t2, OCTEON_CP2_AES_RESULT+8(a0)
mfc0 t3, $15,0 /* Get the processor ID register */
dmtc2 t0, 0x0110
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
dmtc2 t1, 0x0100
bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
dmtc2 t2, 0x0101
/* this code is specific for pass 1 */
ld t0, OCTEON_CP2_HSH_DATW(a0)
ld t1, OCTEON_CP2_HSH_DATW+8(a0)
ld t2, OCTEON_CP2_HSH_DATW+16(a0)
dmtc2 t0, 0x0040
ld t0, OCTEON_CP2_HSH_DATW+24(a0)
dmtc2 t1, 0x0041
ld t1, OCTEON_CP2_HSH_DATW+32(a0)
dmtc2 t2, 0x0042
ld t2, OCTEON_CP2_HSH_DATW+40(a0)
dmtc2 t0, 0x0043
ld t0, OCTEON_CP2_HSH_DATW+48(a0)
dmtc2 t1, 0x0044
ld t1, OCTEON_CP2_HSH_IVW(a0)
dmtc2 t2, 0x0045
ld t2, OCTEON_CP2_HSH_IVW+8(a0)
dmtc2 t0, 0x0046
ld t0, OCTEON_CP2_HSH_IVW+16(a0)
dmtc2 t1, 0x0048
dmtc2 t2, 0x0049
b done_restore /* unconditional branch */
dmtc2 t0, 0x004A
3: /* this is post-pass1 code */
ld t2, OCTEON_CP2_HSH_DATW(a0)
ld t0, OCTEON_CP2_HSH_DATW+8(a0)
ld t1, OCTEON_CP2_HSH_DATW+16(a0)
dmtc2 t2, 0x0240
ld t2, OCTEON_CP2_HSH_DATW+24(a0)
dmtc2 t0, 0x0241
ld t0, OCTEON_CP2_HSH_DATW+32(a0)
dmtc2 t1, 0x0242
ld t1, OCTEON_CP2_HSH_DATW+40(a0)
dmtc2 t2, 0x0243
ld t2, OCTEON_CP2_HSH_DATW+48(a0)
dmtc2 t0, 0x0244
ld t0, OCTEON_CP2_HSH_DATW+56(a0)
dmtc2 t1, 0x0245
ld t1, OCTEON_CP2_HSH_DATW+64(a0)
dmtc2 t2, 0x0246
ld t2, OCTEON_CP2_HSH_DATW+72(a0)
dmtc2 t0, 0x0247
ld t0, OCTEON_CP2_HSH_DATW+80(a0)
dmtc2 t1, 0x0248
ld t1, OCTEON_CP2_HSH_DATW+88(a0)
dmtc2 t2, 0x0249
ld t2, OCTEON_CP2_HSH_DATW+96(a0)
dmtc2 t0, 0x024A
ld t0, OCTEON_CP2_HSH_DATW+104(a0)
dmtc2 t1, 0x024B
ld t1, OCTEON_CP2_HSH_DATW+112(a0)
dmtc2 t2, 0x024C
ld t2, OCTEON_CP2_HSH_IVW(a0)
dmtc2 t0, 0x024D
ld t0, OCTEON_CP2_HSH_IVW+8(a0)
dmtc2 t1, 0x024E
ld t1, OCTEON_CP2_HSH_IVW+16(a0)
dmtc2 t2, 0x0250
ld t2, OCTEON_CP2_HSH_IVW+24(a0)
dmtc2 t0, 0x0251
ld t0, OCTEON_CP2_HSH_IVW+32(a0)
dmtc2 t1, 0x0252
ld t1, OCTEON_CP2_HSH_IVW+40(a0)
dmtc2 t2, 0x0253
ld t2, OCTEON_CP2_HSH_IVW+48(a0)
dmtc2 t0, 0x0254
ld t0, OCTEON_CP2_HSH_IVW+56(a0)
dmtc2 t1, 0x0255
ld t1, OCTEON_CP2_GFM_MULT(a0)
dmtc2 t2, 0x0256
ld t2, OCTEON_CP2_GFM_MULT+8(a0)
dmtc2 t0, 0x0257
ld t0, OCTEON_CP2_GFM_POLY(a0)
dmtc2 t1, 0x0258
ld t1, OCTEON_CP2_GFM_RESULT(a0)
dmtc2 t2, 0x0259
ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
dmtc2 t0, 0x025E
dmtc2 t1, 0x025A
dmtc2 t2, 0x025B
done_restore:
jr ra
nop
END(octeon_cop2_restore)
.set pop
/*
* void octeon_mult_save()
* sp is assumed to point to a struct pt_regs
*
* NOTE: This is called in SAVE_SOME in stackframe.h. It can only
* safely modify k0 and k1.
*/
.align 7
.set push
.set noreorder
LEAF(octeon_mult_save)
dmfc0 k0, $9,7 /* CvmCtl register. */
bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */
nop
/* Save the multiplier state */
v3mulu k0, $0, $0
v3mulu k1, $0, $0
sd k0, PT_MTP(sp) /* PT_MTP has P0 */
v3mulu k0, $0, $0
sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
ori k1, $0, 1
v3mulu k1, k1, $0
sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
v3mulu k0, $0, $0
sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
v3mulu k1, $0, $0
sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
jr ra
sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
1: /* Resume here if CvmCtl[NOMUL] */
jr ra
END(octeon_mult_save)
.set pop
/*
* void octeon_mult_restore()
* sp is assumed to point to a struct pt_regs
*
* NOTE: This is called in RESTORE_SOME in stackframe.h.
*/
.align 7
.set push
.set noreorder
LEAF(octeon_mult_restore)
dmfc0 k1, $9,7 /* CvmCtl register. */
ld v0, PT_MPL(sp) /* MPL0 */
ld v1, PT_MPL+8(sp) /* MPL1 */
ld k0, PT_MPL+16(sp) /* MPL2 */
bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
/* Normally falls through, so no time wasted here */
nop
/* Restore the multiplier state */
ld k1, PT_MTP+16(sp) /* P2 */
MTM0 v0 /* MPL0 */
ld v0, PT_MTP+8(sp) /* P1 */
MTM1 v1 /* MPL1 */
ld v1, PT_MTP(sp) /* P0 */
MTM2 k0 /* MPL2 */
MTP2 k1 /* P2 */
MTP1 v0 /* P1 */
jr ra
MTP0 v1 /* P0 */
1: /* Resume here if CvmCtl[NOMUL] */
jr ra
nop
END(octeon_mult_restore)
.set pop

View file

@ -0,0 +1,69 @@
/*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/perf_event.h>
#include <asm/stacktrace.h>
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we will add it here.
*/
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}

File diff suppressed because it is too large Load diff

724
arch/mips/kernel/pm-cps.c Normal file
View file

@ -0,0 +1,724 @@
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/idle.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
#include <asm/smp-cps.h>
#include <asm/uasm.h>
/*
* cps_nc_entry_fn - type of a generated non-coherent state entry function
* @online: the count of online coupled VPEs
* @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
*
* The code entering & exiting non-coherent states is generated at runtime
* using uasm, in order to ensure that the compiler cannot insert a stray
* memory access at an unfortunate time and to allow the generation of optimal
* core-specific code particularly for cache routines. If coupled_coherence
* is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
* returns the number of VPEs that were in the wait state at the point this
* VPE left it. Returns garbage if coupled_coherence is zero or this is not
* the entry function for CPS_PM_NC_WAIT.
*/
typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
/*
* The entry point of the generated non-coherent idle state entry/exit
* functions. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
nc_asm_enter);
/* Bitmap indicating which states are supported by the system */
DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
/*
* Indicates the number of coupled VPEs ready to operate in a non-coherent
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/*
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
/* A somewhat arbitrary number of labels & relocs for uasm */
static struct uasm_label labels[32] __initdata;
static struct uasm_reloc relocs[32] __initdata;
/* CPU dependant sync types */
static unsigned stype_intervention;
static unsigned stype_memory;
static unsigned stype_ordering;
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9, k0, k1, gp, sp, fp, ra,
};
bool cps_pm_support_state(enum cps_pm_state state)
{
return test_bit(state, state_support);
}
static void coupled_barrier(atomic_t *a, unsigned online)
{
/*
* This function is effectively the same as
* cpuidle_coupled_parallel_barrier, which can't be used here since
* there's no cpuidle device.
*/
if (!coupled_coherence)
return;
smp_mb__before_atomic();
atomic_inc(a);
while (atomic_read(a) < online)
cpu_relax();
if (atomic_inc_return(a) == online * 2) {
atomic_set(a, 0);
return;
}
while (atomic_read(a) > online)
cpu_relax();
}
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
unsigned core = current_cpu_data.core;
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
void *nc_addr;
cps_nc_entry_fn entry;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
/* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state];
if (!entry)
return -EINVAL;
/* Calculate which coupled CPUs (VPEs) are online */
#ifdef CONFIG_MIPS_MT
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
online = cpumask_weight(coupled_mask);
cpumask_clear_cpu(cpu, coupled_mask);
} else
#endif
{
cpumask_clear(coupled_mask);
online = 1;
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
core_cfg = &mips_cps_core_bootcfg[core];
vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
vpe_cfg->sp = 0;
}
/* Indicate that this CPU might not be coherent */
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
ACCESS_ONCE(*nc_core_ready_count) = 0;
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
/* Remove the non-coherent mapping of ready_count */
kunmap_noncoherent();
/* Indicate that this CPU is definitely coherent */
cpumask_set_cpu(cpu, &cpu_coherent_mask);
/*
* If this VPE is the first to leave the non-coherent wait state then
* it needs to wake up any coupled VPEs still running their wait
* instruction so that they return to cpuidle, which can then complete
* coordination between the coupled VPEs & provide the governor with
* a chance to reflect on the length of time the VPEs were in the
* idle state.
*/
if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
arch_send_call_function_ipi_mask(coupled_mask);
return 0;
}
static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cache_desc *cache,
unsigned op, int lbl)
{
unsigned cache_size = cache->ways << cache->waybit;
unsigned i;
const unsigned unroll_lines = 32;
/* If the cache isn't present this function has it easy */
if (cache->flags & MIPS_CACHE_NOT_PRESENT)
return;
/* Load base address */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Calculate end address */
if (cache_size < 0x8000)
uasm_i_addiu(pp, t1, t0, cache_size);
else
UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
/* Start of cache op loop */
uasm_build_label(pl, *pp, lbl);
/* Generate the cache ops */
for (i = 0; i < unroll_lines; i++)
uasm_i_cache(pp, op, i * cache->linesz, t0);
/* Update the base address */
uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
/* Loop if we haven't reached the end address yet */
uasm_il_bne(pp, pr, t0, t1, lbl);
uasm_i_nop(pp);
}
static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cpuinfo_mips *cpu_info,
int lbl)
{
unsigned i, fsb_size = 8;
unsigned num_loads = (fsb_size * 3) / 2;
unsigned line_stride = 2;
unsigned line_size = cpu_info->dcache.linesz;
unsigned perf_counter, perf_event;
unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
/*
* Determine whether this CPU requires an FSB flush, and if so which
* performance counter/event reflect stalls due to a full FSB.
*/
switch (__get_cpu_type(cpu_info->cputype)) {
case CPU_INTERAPTIV:
perf_counter = 1;
perf_event = 51;
break;
case CPU_PROAPTIV:
/* Newer proAptiv cores don't require this workaround */
if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
return 0;
/* On older ones it's unavailable */
return -1;
/* CPUs which do not require the workaround */
case CPU_P5600:
return 0;
default:
WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
return -1;
}
/*
* Ensure that the fill/store buffer (FSB) is not holding the results
* of a prefetch, since if it is then the CPC sequencer may become
* stuck in the D3 (ClrBus) state whilst entering a low power state.
*/
/* Preserve perf counter setup */
uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Setup perf counter to count FSB full pipeline stalls */
uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
/* Base address for loads */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Start of clear loop */
uasm_build_label(pl, *pp, lbl);
/* Perform some loads to fill the FSB */
for (i = 0; i < num_loads; i++)
uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
/*
* Invalidate the new D-cache entries so that the cache will need
* refilling (via the FSB) if the loop is executed again.
*/
for (i = 0; i < num_loads; i++) {
uasm_i_cache(pp, Hit_Invalidate_D,
i * line_size * line_stride, t0);
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
i * line_size * line_stride, t0);
}
/* Completion barrier */
uasm_i_sync(pp, stype_memory);
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Loop if it didn't */
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
/* Restore perf counter 1. The count may well now be wrong... */
uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
return 0;
}
static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
unsigned r_addr, int lbl)
{
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
uasm_i_ll(pp, t1, 0, r_addr);
uasm_i_or(pp, t1, t1, t0);
uasm_i_sc(pp, t1, 0, r_addr);
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
}
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *buf, *p;
const unsigned r_online = a0;
const unsigned r_nc_count = a1;
const unsigned r_pcohctl = t7;
const unsigned max_instrs = 256;
unsigned cpc_cmd;
int err;
enum {
lbl_incready = 1,
lbl_poll_cont,
lbl_secondary_hang,
lbl_disable_coherence,
lbl_flush_fsb,
lbl_invicache,
lbl_flushdcache,
lbl_hang,
lbl_set_cont,
lbl_secondary_cont,
lbl_decready,
};
/* Allocate a buffer to hold the generated code */
p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
if (!buf)
return NULL;
/* Clear labels & relocs ready for (re)use */
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
/*
* Save CPU state. Note the non-standard calling convention
* with the return address placed in v0 to avoid clobbering
* the ra register before it is saved.
*/
UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, v0, t0);
uasm_i_nop(&p);
}
/*
* Load addresses of required CM & CPC registers. This is done early
* because they're needed in both the enable & disable coherence steps
* but in the coupled case the enable step will only run on one VPE.
*/
UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
if (coupled_coherence) {
/* Increment ready_count */
uasm_i_sync(&p, stype_ordering);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_incready);
uasm_i_addiu(&p, t1, t1, 1);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
/*
* If this is the last VPE to become ready for non-coherence
* then it should branch below.
*/
uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
uasm_i_nop(&p);
if (state < CPS_PM_POWER_GATED) {
/*
* Otherwise this is not the last VPE to become ready
* for non-coherence. It needs to wait until coherence
* has been disabled before proceeding, which it will do
* by polling for the top bit of ready_count being set.
*/
uasm_i_addiu(&p, t1, zero, -1);
uasm_build_label(&l, p, lbl_poll_cont);
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_ehb(&p);
uasm_i_yield(&p, zero, t1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
/*
* The core will lose power & this VPE will not continue
* so it can simply halt here.
*/
uasm_i_addiu(&p, t0, zero, TCHALT_H);
uasm_i_mtc0(&p, t0, 2, 4);
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);
}
}
/*
* This is the point of no return - this VPE will now proceed to
* disable coherence. At this point we *must* be sure that no other
* VPE within the core will interfere with the L1 dcache.
*/
uasm_build_label(&l, p, lbl_disable_coherence);
/* Invalidate the L1 icache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
Index_Invalidate_I, lbl_invicache);
/* Writeback & invalidate the L1 dcache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
Index_Writeback_Inv_D, lbl_flushdcache);
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
/*
* Disable all but self interventions. The load from COHCTL is defined
* by the interAptiv & proAptiv SUMs as ensuring that the operation
* resulting from the preceeding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Sync to ensure previous interventions are complete */
uasm_i_sync(&p, stype_intervention);
uasm_i_ehb(&p);
/* Disable coherence */
uasm_i_sw(&p, zero, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
if (state >= CPS_PM_CLOCK_GATED) {
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
lbl_flush_fsb);
if (err)
goto out_err;
/* Determine the CPC command to issue */
switch (state) {
case CPS_PM_CLOCK_GATED:
cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
break;
case CPS_PM_POWER_GATED:
cpc_cmd = CPC_Cx_CMD_PWRDOWN;
break;
default:
BUG();
goto out_err;
}
/* Issue the CPC command */
UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, t1, zero, cpc_cmd);
uasm_i_sw(&p, t1, 0, t0);
if (state == CPS_PM_POWER_GATED) {
/* If anything goes wrong just hang */
uasm_build_label(&l, p, lbl_hang);
uasm_il_b(&p, &r, lbl_hang);
uasm_i_nop(&p);
/*
* There's no point generating more code, the core is
* powered down & if powered back up will run from the
* reset vector not from here.
*/
goto gen_done;
}
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
}
if (state == CPS_PM_NC_WAIT) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
if (coupled_coherence)
cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
lbl_set_cont);
/*
* VPEs which did not disable coherence will continue
* executing, after coherence has been disabled, from this
* point.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Now perform our wait */
uasm_i_wait(&p, 0);
}
/*
* Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
* will run this. The first will actually re-enable coherence & the
* rest will just be performing a rather unusual nop.
*/
uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Completion barrier */
uasm_i_sync(&p, stype_memory);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, stype_ordering);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_decready);
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
/*
* This core will be reliant upon another core sending a
* power-up command to the CPC in order to resume operation.
* Thus an arbitrary VPE can't trigger the core leaving the
* idle state and the one that disables coherence might as well
* be the one to re-enable it. The rest will continue from here
* after that has been done.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Ordering barrier */
uasm_i_sync(&p, stype_ordering);
}
/* The core is coherent, time to return to C code */
uasm_i_jr(&p, ra);
uasm_i_nop(&p);
gen_done:
/* Ensure the code didn't exceed the resources allocated for it */
BUG_ON((p - buf) > max_instrs);
BUG_ON((l - labels) > ARRAY_SIZE(labels));
BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
/* Patch branch offsets */
uasm_resolve_relocs(relocs, labels);
/* Flush the icache */
local_flush_icache_range((unsigned long)buf, (unsigned long)p);
return buf;
out_err:
kfree(buf);
return NULL;
}
static int __init cps_gen_core_entries(unsigned cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state])
continue;
if (!test_bit(state, state_support))
continue;
entry_fn = cps_gen_entry_code(cpu, state);
if (!entry_fn) {
pr_err("Failed to generate core %u state %u entry\n",
core, state);
clear_bit(state, state_support);
}
per_cpu(nc_asm_enter, core)[state] = entry_fn;
}
if (!per_cpu(ready_count, core)) {
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count_alloc, core) = core_rc;
/* Ensure ready_count is aligned to a cacheline boundary */
core_rc += dlinesz - 1;
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
return 0;
}
static int __init cps_pm_init(void)
{
unsigned cpu;
int err;
/* Detect appropriate sync types for the system */
switch (current_cpu_data.cputype) {
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_M5150:
case CPU_P5600:
stype_intervention = 0x2;
stype_memory = 0x3;
stype_ordering = 0x10;
break;
default:
pr_warn("Power management is using heavyweight sync 0\n");
}
/* A CM is required for all non-coherent states */
if (!mips_cm_present()) {
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
goto out;
}
/*
* If interrupts were enabled whilst running a wait instruction on a
* non-coherent core then the VPE may end up processing interrupts
* whilst non-coherent. That would be bad.
*/
if (cpu_wait == r4k_wait_irqoff)
set_bit(CPS_PM_NC_WAIT, state_support);
else
pr_warn("pm-cps: non-coherent wait unavailable\n");
/* Detect whether a CPC is present */
if (mips_cpc_present()) {
/* Detect whether clock gating is implemented */
if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
set_bit(CPS_PM_CLOCK_GATED, state_support);
else
pr_warn("pm-cps: CPC does not support clock gating\n");
/* Power gating is available with CPS SMP & any CPC */
if (mips_cps_smp_in_use())
set_bit(CPS_PM_POWER_GATED, state_support);
else
pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
} else {
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
for_each_present_cpu(cpu) {
err = cps_gen_core_entries(cpu);
if (err)
return err;
}
out:
return 0;
}
arch_initcall(cps_pm_init);

99
arch/mips/kernel/pm.c Normal file
View file

@ -0,0 +1,99 @@
/*
* Copyright (C) 2014 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* CPU PM notifiers for saving/restoring general CPU state.
*/
#include <linux/cpu_pm.h>
#include <linux/init.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/pm.h>
#include <asm/watch.h>
/* Used by PM helper macros in asm/pm.h */
struct mips_static_suspend_state mips_static_suspend_state;
/**
* mips_cpu_save() - Save general CPU state.
* Ensures that general CPU context is saved, notably FPU and DSP.
*/
static int mips_cpu_save(void)
{
/* Save FPU state */
lose_fpu(1);
/* Save DSP state */
save_dsp(current);
return 0;
}
/**
* mips_cpu_restore() - Restore general CPU state.
* Restores important CPU context.
*/
static void mips_cpu_restore(void)
{
unsigned int cpu = smp_processor_id();
/* Restore ASID */
if (current->mm)
write_c0_entryhi(cpu_asid(cpu, current->mm));
/* Restore DSP state */
restore_dsp(current);
/* Restore UserLocal */
if (cpu_has_userlocal)
write_c0_userlocal(current_thread_info()->tp_value);
/* Restore watch registers */
__restore_watch();
}
/**
* mips_pm_notifier() - Notifier for preserving general CPU context.
* @self: Notifier block.
* @cmd: CPU PM event.
* @v: Private data (unused).
*
* This is called when a CPU power management event occurs, and is used to
* ensure that important CPU context is preserved across a CPU power down.
*/
static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
void *v)
{
int ret;
switch (cmd) {
case CPU_PM_ENTER:
ret = mips_cpu_save();
if (ret)
return NOTIFY_STOP;
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
mips_cpu_restore();
break;
}
return NOTIFY_OK;
}
static struct notifier_block mips_pm_notifier_block = {
.notifier_call = mips_pm_notifier,
};
static int __init mips_pm_init(void)
{
return cpu_pm_register_notifier(&mips_pm_notifier_block);
}
arch_initcall(mips_pm_init);

168
arch/mips/kernel/proc.c Normal file
View file

@ -0,0 +1,168 @@
/*
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
* Copyright (C) 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/prom.h>
unsigned int vced_count, vcei_count;
/*
* * No lock; only written during early bootup by CPU 0.
* */
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
}
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
unsigned long n = (unsigned long) v - 1;
unsigned int version = cpu_data[n].processor_id;
unsigned int fp_vers = cpu_data[n].fpu_id;
char fmt [64];
int i;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
/*
* For the first processor also print the system type
*/
if (n == 0) {
seq_printf(m, "system type\t\t: %s\n", get_system_type());
if (mips_get_machine_name())
seq_printf(m, "machine\t\t\t: %s\n",
mips_get_machine_name());
}
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
seq_printf(m, fmt, __cpu_name[n],
(version >> 4) & 0x0f, version & 0x0f,
(fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
cpu_data[n].udelay_val / (500000/HZ),
(cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
cpu_has_counter ? "yes" : "no");
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
cpu_has_divec ? "yes" : "no");
seq_printf(m, "hardware watchpoint\t: %s",
cpu_has_watch ? "yes, " : "no\n");
if (cpu_has_watch) {
seq_printf(m, "count: %d, address/irw mask: [",
cpu_data[n].watch_reg_count);
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
seq_printf(m, "%s0x%04x", i ? ", " : "" ,
cpu_data[n].watch_reg_masks[i]);
seq_printf(m, "]\n");
}
seq_printf(m, "isa\t\t\t: mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
if (cpu_has_mips_3)
seq_printf(m, "%s", " mips3");
if (cpu_has_mips_4)
seq_printf(m, "%s", " mips4");
if (cpu_has_mips_5)
seq_printf(m, "%s", " mips5");
if (cpu_has_mips32r1)
seq_printf(m, "%s", " mips32r1");
if (cpu_has_mips32r2)
seq_printf(m, "%s", " mips32r2");
if (cpu_has_mips64r1)
seq_printf(m, "%s", " mips64r1");
if (cpu_has_mips64r2)
seq_printf(m, "%s", " mips64r2");
seq_printf(m, "\n");
seq_printf(m, "ASEs implemented\t:");
if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
if (cpu_has_vz) seq_printf(m, "%s", " vz");
if (cpu_has_msa) seq_printf(m, "%s", " msa");
if (cpu_has_eva) seq_printf(m, "%s", " eva");
if (cpu_has_htw) seq_printf(m, "%s", " htw");
seq_printf(m, "\n");
if (cpu_has_mmips) {
seq_printf(m, "micromips kernel\t: %s\n",
(read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
}
seq_printf(m, "shadow register sets\t: %d\n",
cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
proc_cpuinfo_notifier_args.m = m;
proc_cpuinfo_notifier_args.n = n;
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
&proc_cpuinfo_notifier_args);
seq_printf(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
return i < NR_CPUS ? (void *) (i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};

534
arch/mips/kernel/process.c Normal file
View file

@ -0,0 +1,534 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
/* What the heck is this check doing ? */
if (!cpu_isset(smp_processor_id(), cpu_callin_map))
play_dead();
}
#endif
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
unsigned long status;
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
clear_used_math();
clear_fpu_owner();
init_dsp();
clear_thread_flag(TIF_USEDMSA);
clear_thread_flag(TIF_MSA_CTX_LIVE);
disable_msa();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
void exit_thread(void)
{
}
void flush_thread(void)
{
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
preempt_disable();
if (is_msa_enabled())
save_msa(p);
else if (is_fpu_owner())
save_fp(p);
if (cpu_has_dsp)
save_dsp(p);
preempt_enable();
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
if (unlikely(p->flags & PF_KTHREAD)) {
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
ti->addr_limit = KERNEL_DS;
p->thread.reg16 = usp; /* fn */
p->thread.reg17 = arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
((status & (ST0_KUC | ST0_IEC)) << 2);
#else
status |= ST0_EXL;
#endif
childregs->cp0_status = status;
return 0;
}
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDMSA);
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (clone_flags & CLONE_SETTLS)
ti->tp_value = regs->regs[7];
return 0;
}
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
struct mips_frame_info {
void *func;
unsigned long func_size;
int frame_size;
int pc_offset;
};
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
static inline int is_ra_save_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
union mips_instruction mmi;
/*
* swsp ra,offset
* swm16 reglist,offset(sp)
* swm32 reglist,offset(sp)
* sw32 ra,offset(sp)
* jradiussp - NOT SUPPORTED
*
* microMIPS is way more fun...
*/
if (mm_insn_16bit(ip->halfword[0])) {
mmi.word = (ip->halfword[0] << 16);
return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
mmi.mm16_r5_format.rt == 31) ||
(mmi.mm16_m_format.opcode == mm_pool16c_op &&
mmi.mm16_m_format.func == mm_swm16_op));
}
else {
mmi.halfword[0] = ip->halfword[1];
mmi.halfword[1] = ip->halfword[0];
return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
mmi.mm_m_format.rd > 9 &&
mmi.mm_m_format.base == 29 &&
mmi.mm_m_format.func == mm_swm32_func) ||
(mmi.i_format.opcode == mm_sw32_op &&
mmi.i_format.rs == 29 &&
mmi.i_format.rt == 31));
}
#else
/* sw / sd $ra, offset($sp) */
return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 &&
ip->i_format.rt == 31;
#endif
}
static inline int is_jump_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16,jrc,jalr16,jalr16
* jal
* jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
* jraddiusp - NOT SUPPORTED
*
* microMIPS is kind of more fun...
*/
union mips_instruction mmi;
mmi.word = (ip->halfword[0] << 16);
if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
(mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
ip->j_format.opcode == mm_jal32_op)
return 1;
if (ip->r_format.opcode != mm_pool32a_op ||
ip->r_format.func != mm_pool32axf_op)
return 0;
return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
#else
if (ip->j_format.opcode == j_op)
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* addiusp -imm
* addius5 sp,-imm
* addiu32 sp,sp,-imm
* jradiussp - NOT SUPPORTED
*
* microMIPS is not more fun...
*/
if (mm_insn_16bit(ip->halfword[0])) {
union mips_instruction mmi;
mmi.word = (ip->halfword[0] << 16);
return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
(mmi.mm16_r5_format.opcode == mm_pool16d_op &&
mmi.mm16_r5_format.rt == 29));
}
return (ip->mm_i_format.opcode == mm_addiu32_op &&
ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
return 1;
#endif
return 0;
}
static int get_frame_info(struct mips_frame_info *info)
{
#ifdef CONFIG_CPU_MICROMIPS
union mips_instruction *ip = (void *) (((char *) info->func) - 1);
#else
union mips_instruction *ip = info->func;
#endif
unsigned max_insns = info->func_size / sizeof(union mips_instruction);
unsigned i;
info->pc_offset = -1;
info->frame_size = 0;
if (!ip)
goto err;
if (max_insns == 0)
max_insns = 128U; /* unknown function size */
max_insns = min(128U, max_insns);
for (i = 0; i < max_insns; i++, ip++) {
if (is_jump_ins(ip))
break;
if (!info->frame_size) {
if (is_sp_move_ins(ip))
{
#ifdef CONFIG_CPU_MICROMIPS
if (mm_insn_16bit(ip->halfword[0]))
{
unsigned short tmp;
if (ip->halfword[0] & mm_addiusp_func)
{
tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
} else {
tmp = (ip->halfword[0] >> 1);
info->frame_size = -(signed short)(tmp & 0xf);
}
ip = (void *) &ip->halfword[1];
ip--;
} else
#endif
info->frame_size = - ip->i_format.simmediate;
}
continue;
}
if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
info->pc_offset =
ip->i_format.simmediate / sizeof(long);
break;
}
}
if (info->frame_size && info->pc_offset >= 0) /* nested */
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
/* prologue seems boggus... */
err:
return -1;
}
static struct mips_frame_info schedule_mfi __read_mostly;
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
union mips_instruction *ip = (void *)schedule;
int max_insns = 8;
int i;
for (i = 0; i < max_insns; i++, ip++) {
if (ip->j_format.opcode == j_op)
return J_TARGET(ip, ip->j_format.target);
}
return 0;
}
#endif
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
#endif
unsigned long addr;
addr = get___schedule_addr();
if (!addr)
addr = (unsigned long)schedule;
#ifdef CONFIG_KALLSYMS
kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
/*
* Without schedule() frame info, result given by
* thread_saved_pc() and get_wchan() are not reliable.
*/
if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
return 0;
}
arch_initcall(frame_info_init);
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
if (schedule_mfi.pc_offset < 0)
return 0;
return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
#ifdef CONFIG_KALLSYMS
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long *sp,
unsigned long pc,
unsigned long *ra)
{
struct mips_frame_info info;
unsigned long size, ofs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
* If we reached the bottom of interrupt context,
* return saved pc in pt_regs.
*/
if (pc == (unsigned long)ret_from_irq ||
pc == (unsigned long)ret_from_exception) {
struct pt_regs *regs;
if (*sp >= stack_page &&
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
if (__kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
}
return 0;
}
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
return 0;
/*
* Return ra if an exception occurred at the first instruction
*/
if (unlikely(ofs == 0)) {
pc = *ra;
*ra = 0;
return pc;
}
info.func = (void *)(pc - ofs);
info.func_size = ofs; /* analyze from start to ofs */
leaf = get_frame_info(&info);
if (leaf < 0)
return 0;
if (*sp < stack_page ||
*sp + info.frame_size > stack_page + THREAD_SIZE - 32)
return 0;
if (leaf)
/*
* For some extreme cases, get_frame_info() can
* consider wrongly a nested function as a leaf
* one. In that cases avoid to return always the
* same value.
*/
pc = pc != *ra ? *ra : 0;
else
pc = ((unsigned long *)(*sp))[info.pc_offset];
*sp += info.frame_size;
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
EXPORT_SYMBOL(unwind_stack_by_address);
/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
unsigned long stack_page = (unsigned long)task_stack_page(task);
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
#endif
/*
* get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
unsigned long sp;
unsigned long ra = 0;
#endif
if (!task || task == current || task->state == TASK_RUNNING)
goto out;
if (!task_stack_page(task))
goto out;
pc = thread_saved_pc(task);
#ifdef CONFIG_KALLSYMS
sp = task->thread.reg29 + schedule_mfi.frame_size;
while (in_sched_functions(pc))
pc = unwind_stack(task, &sp, pc, &ra);
#endif
out:
return pc;
}
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_int() & ~PAGE_MASK;
return sp & ALMASK;
}

57
arch/mips/kernel/prom.c Normal file
View file

@ -0,0 +1,57 @@
/*
* MIPS support for CONFIG_OF device tree support
*
* Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/page.h>
#include <asm/prom.h>
static char mips_machine_name[64] = "Unknown";
__init void mips_set_machine_name(const char *name)
{
if (name == NULL)
return;
strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
pr_info("MIPS: machine is %s\n", mips_get_machine_name());
}
char *mips_get_machine_name(void)
{
return mips_machine_name;
}
#ifdef CONFIG_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
}
void __init __dt_setup_arch(void *bph)
{
if (!early_init_dt_scan(bph))
return;
mips_set_machine_name(of_flat_dt_get_machine_name());
}
#endif

810
arch/mips/kernel/ptrace.c Normal file
View file

@ -0,0 +1,810 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*
* At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
* binaries.
*/
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/ftrace.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/syscall.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Don't load the watchpoint registers for the ex-child. */
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/*
* Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(VERIFY_WRITE, data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
__put_user((long)regs->lo, (__s64 __user *)&data->lo);
__put_user((long)regs->hi, (__s64 __user *)&data->hi);
__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
return 0;
}
/*
* Write a general register set. As for PTRACE_GETREGS, we always use
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(VERIFY_READ, data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
__get_user(regs->lo, (__s64 __user *)&data->lo);
__get_user(regs->hi, (__s64 __user *)&data->hi);
__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
/* badvaddr, status, and cause may not be written. */
return 0;
}
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
union fpureg *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
__put_user(get_fpr64(&fregs[i], 0),
i + (__u64 __user *)data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
__put_user(boot_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
union fpureg *fregs;
u64 fpr_val;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++) {
__get_user(fpr_val, i + (__u64 __user *)data);
set_fpr64(&fregs[i], 0, fpr_val);
}
__get_user(child->thread.fpu.fcr31, data + 64);
child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* FIR may not be written. */
return 0;
}
int ptrace_get_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
enum pt_watch_style style;
int i;
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
return -EIO;
#ifdef CONFIG_32BIT
style = pt_watch_style_mips32;
#define WATCH_STYLE mips32
#else
style = pt_watch_style_mips64;
#define WATCH_STYLE mips64
#endif
__put_user(style, &addr->style);
__put_user(boot_cpu_data.watch_reg_use_cnt,
&addr->WATCH_STYLE.num_valid);
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__put_user(child->thread.watch.mips3264.watchlo[i],
&addr->WATCH_STYLE.watchlo[i]);
__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
&addr->WATCH_STYLE.watchhi[i]);
__put_user(boot_cpu_data.watch_reg_masks[i],
&addr->WATCH_STYLE.watch_masks[i]);
}
for (; i < 8; i++) {
__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
}
return 0;
}
int ptrace_set_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
int i;
int watch_active = 0;
unsigned long lt[NUM_WATCH_REGS];
u16 ht[NUM_WATCH_REGS];
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
return -EIO;
/* Check the values. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
if (lt[i] & __UA_LIMIT)
return -EINVAL;
#else
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
if (lt[i] & 0xffffffff80000000UL)
return -EINVAL;
} else {
if (lt[i] & __UA_LIMIT)
return -EINVAL;
}
#endif
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
if (ht[i] & ~0xff8)
return -EINVAL;
}
/* Install them. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
if (lt[i] & 7)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
/* Set the G bit. */
child->thread.watch.mips3264.watchhi[i] = ht[i];
}
if (watch_active)
set_tsk_thread_flag(child, TIF_LOAD_WATCH);
else
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
return 0;
}
/* regset get/set implementations */
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u32 uregs[ELF_NGREG] = {};
unsigned i;
for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
continue;
uregs[i] = regs->regs[i - MIPS32_EF_R0];
}
uregs[MIPS32_EF_LO] = regs->lo;
uregs[MIPS32_EF_HI] = regs->hi;
uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u32 uregs[ELF_NGREG];
unsigned start, num_regs, i;
int err;
start = pos / sizeof(u32);
num_regs = count / sizeof(u32);
if (start + num_regs > ELF_NGREG)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
if (err)
return err;
for (i = start; i < num_regs; i++) {
/*
* Cast all values to signed here so that if this is a 64-bit
* kernel, the supplied 32-bit values will be sign extended.
*/
switch (i) {
case MIPS32_EF_R1 ... MIPS32_EF_R25:
/* k0/k1 are ignored. */
case MIPS32_EF_R28 ... MIPS32_EF_R31:
regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
break;
case MIPS32_EF_LO:
regs->lo = (s32)uregs[i];
break;
case MIPS32_EF_HI:
regs->hi = (s32)uregs[i];
break;
case MIPS32_EF_CP0_EPC:
regs->cp0_epc = (s32)uregs[i];
break;
}
}
return 0;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
static int gpr64_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u64 uregs[ELF_NGREG] = {};
unsigned i;
for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
continue;
uregs[i] = regs->regs[i - MIPS64_EF_R0];
}
uregs[MIPS64_EF_LO] = regs->lo;
uregs[MIPS64_EF_HI] = regs->hi;
uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
}
static int gpr64_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
u64 uregs[ELF_NGREG];
unsigned start, num_regs, i;
int err;
start = pos / sizeof(u64);
num_regs = count / sizeof(u64);
if (start + num_regs > ELF_NGREG)
return -EIO;
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
sizeof(uregs));
if (err)
return err;
for (i = start; i < num_regs; i++) {
switch (i) {
case MIPS64_EF_R1 ... MIPS64_EF_R25:
/* k0/k1 are ignored. */
case MIPS64_EF_R28 ... MIPS64_EF_R31:
regs->regs[i - MIPS64_EF_R0] = uregs[i];
break;
case MIPS64_EF_LO:
regs->lo = uregs[i];
break;
case MIPS64_EF_HI:
regs->hi = uregs[i];
break;
case MIPS64_EF_CP0_EPC:
regs->cp0_epc = uregs[i];
break;
}
}
return 0;
}
#endif /* CONFIG_64BIT */
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
unsigned i;
int err;
u64 fpr_val;
/* XXX fcr31 */
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu,
0, sizeof(elf_fpregset_t));
for (i = 0; i < NUM_FPU_REGS; i++) {
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
if (err)
return err;
}
return 0;
}
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned i;
int err;
u64 fpr_val;
/* XXX fcr31 */
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu,
0, sizeof(elf_fpregset_t));
for (i = 0; i < NUM_FPU_REGS; i++) {
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
if (err)
return err;
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
}
return 0;
}
enum mips_regset {
REGSET_GPR,
REGSET_FPR,
};
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
static const struct user_regset mips_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned int),
.align = sizeof(unsigned int),
.get = gpr32_get,
.set = gpr32_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
.get = fpr_get,
.set = fpr_set,
},
};
static const struct user_regset_view user_mips_view = {
.name = "mips",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips_regsets,
.n = ARRAY_SIZE(mips_regsets),
};
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
static const struct user_regset mips64_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = gpr64_get,
.set = gpr64_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
.get = fpr_get,
.set = fpr_set,
},
};
static const struct user_regset_view user_mips64_view = {
.name = "mips64",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips64_regsets,
.n = ARRAY_SIZE(mips64_regsets),
};
#endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_32BIT
return &user_mips_view;
#else
#ifdef CONFIG_MIPS32_O32
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return &user_mips_view;
#endif
return &user_mips64_view;
#endif
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
void __user *addrp = (void __user *) addr;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
case 0 ... 31:
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
break;
}
fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
break;
}
#endif
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
break;
case CAUSE:
tmp = regs->cp0_cause;
break;
case BADVADDR:
tmp = regs->cp0_badvaddr;
break;
case MMHI:
tmp = regs->hi;
break;
case MMLO:
tmp = regs->lo;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
tmp = regs->acx;
break;
#endif
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR:
/* implementation / version register */
tmp = boot_cpu_data.fpu_id;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, datalp);
break;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
memset(&child->thread.fpu, ~0,
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
break;
}
#endif
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
regs->cp0_epc = data;
break;
case MMHI:
regs->hi = data;
break;
case MMLO:
regs->lo = data;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
regs->acx = data;
break;
#endif
case FPC_CSR:
child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data;
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
child->thread.dsp.dspcontrol = data;
break;
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, datavp);
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child, addrp);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
out:
return ret;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
long ret = 0;
user_exit();
if (secure_computing() == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
ret = -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return syscall;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
/*
* We may come here right after calling schedule_user()
* or do_notify_resume(), in which case we can be in RCU
* user mode.
*/
user_exit();
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[2]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
user_enter();
}

302
arch/mips/kernel/ptrace32.c Normal file
View file

@ -0,0 +1,302 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*
* At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
* binaries.
*/
#include <linux/compiler.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/reg.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
* work. I don't know how to fix this.
*/
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
int addr = caddr;
int data = cdata;
int ret;
switch (request) {
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is a pointer in the user's storage that contains an 8 byte
* address in the other process of the 4 bytes that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PTRACE_PEEKTEXT_3264:
case PTRACE_PEEKDATA_3264: {
u32 tmp;
int copied;
u32 __user * addrOthers;
ret = -EIO;
/* Get the addr in the other process that we want to read */
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), 0);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
break;
}
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
union fpureg *fregs;
unsigned int tmp;
regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
case 0 ... 31:
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
break;
}
fregs = get_fpu_regs(child);
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1);
break;
}
tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
break;
case CAUSE:
tmp = regs->cp0_cause;
break;
case BADVADDR:
tmp = regs->cp0_badvaddr;
break;
case MMHI:
tmp = regs->hi;
break;
case MMLO:
tmp = regs->lo;
break;
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR:
/* implementation / version register */
tmp = boot_cpu_data.fpu_id;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
break;
}
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
* addr is a pointer in the user's storage that contains an
* 8 byte address in the other process where the 4 bytes
* that is to be written
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PTRACE_POKETEXT_3264:
case PTRACE_POKEDATA_3264: {
u32 __user * addrOthers;
/* Get the addr in the other process that we want to write into */
ret = -EIO;
if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &data,
sizeof(data), 1) == sizeof(data))
break;
ret = -EIO;
break;
}
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
memset(&child->thread.fpu, ~0,
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
addr & 1, data);
break;
}
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
regs->cp0_epc = data;
break;
case MMHI:
regs->hi = data;
break;
case MMLO:
regs->lo = data;
break;
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data;
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
child->thread.dsp.dspcontrol = data;
break;
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child,
(struct user_pt_regs __user *) (__u64) data);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child,
(struct user_pt_regs __user *) (__u64) data);
break;
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
break;
case PTRACE_GET_THREAD_AREA_3264:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) (unsigned long) data);
break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child,
(struct pt_watch_regs __user *) (unsigned long) addr);
break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child,
(struct pt_watch_regs __user *) (unsigned long) addr);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
out:
return ret;
}

View file

@ -0,0 +1,132 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998 by Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Further modifications to make this work:
* Copyright (c) 1998 Harald Koerfgen
*/
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
PTR 9b,bad_stack; \
.previous
.set noreorder
.set mips1
/* Save floating point context */
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
cfc1 t1,fcr31
EX(swc1 $f0,(SC_FPREGS+0)(a0))
EX(swc1 $f1,(SC_FPREGS+8)(a0))
EX(swc1 $f2,(SC_FPREGS+16)(a0))
EX(swc1 $f3,(SC_FPREGS+24)(a0))
EX(swc1 $f4,(SC_FPREGS+32)(a0))
EX(swc1 $f5,(SC_FPREGS+40)(a0))
EX(swc1 $f6,(SC_FPREGS+48)(a0))
EX(swc1 $f7,(SC_FPREGS+56)(a0))
EX(swc1 $f8,(SC_FPREGS+64)(a0))
EX(swc1 $f9,(SC_FPREGS+72)(a0))
EX(swc1 $f10,(SC_FPREGS+80)(a0))
EX(swc1 $f11,(SC_FPREGS+88)(a0))
EX(swc1 $f12,(SC_FPREGS+96)(a0))
EX(swc1 $f13,(SC_FPREGS+104)(a0))
EX(swc1 $f14,(SC_FPREGS+112)(a0))
EX(swc1 $f15,(SC_FPREGS+120)(a0))
EX(swc1 $f16,(SC_FPREGS+128)(a0))
EX(swc1 $f17,(SC_FPREGS+136)(a0))
EX(swc1 $f18,(SC_FPREGS+144)(a0))
EX(swc1 $f19,(SC_FPREGS+152)(a0))
EX(swc1 $f20,(SC_FPREGS+160)(a0))
EX(swc1 $f21,(SC_FPREGS+168)(a0))
EX(swc1 $f22,(SC_FPREGS+176)(a0))
EX(swc1 $f23,(SC_FPREGS+184)(a0))
EX(swc1 $f24,(SC_FPREGS+192)(a0))
EX(swc1 $f25,(SC_FPREGS+200)(a0))
EX(swc1 $f26,(SC_FPREGS+208)(a0))
EX(swc1 $f27,(SC_FPREGS+216)(a0))
EX(swc1 $f28,(SC_FPREGS+224)(a0))
EX(swc1 $f29,(SC_FPREGS+232)(a0))
EX(swc1 $f30,(SC_FPREGS+240)(a0))
EX(swc1 $f31,(SC_FPREGS+248)(a0))
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
.set pop
.set nomacro
EX(sw t0,(SC_FPC_EIR)(a0))
.set macro
END(_save_fp_context)
/*
* Restore FPU state:
* - fp gp registers
* - cp1 status/control register
*
* We base the decision which registers to restore from the signal stack
* frame on the current content of c0_status, not on the content of the
* stack frame which might have been changed by the user.
*/
LEAF(_restore_fp_context)
.set push
SET_HARDFLOAT
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
EX(lwc1 $f0,(SC_FPREGS+0)(a0))
EX(lwc1 $f1,(SC_FPREGS+8)(a0))
EX(lwc1 $f2,(SC_FPREGS+16)(a0))
EX(lwc1 $f3,(SC_FPREGS+24)(a0))
EX(lwc1 $f4,(SC_FPREGS+32)(a0))
EX(lwc1 $f5,(SC_FPREGS+40)(a0))
EX(lwc1 $f6,(SC_FPREGS+48)(a0))
EX(lwc1 $f7,(SC_FPREGS+56)(a0))
EX(lwc1 $f8,(SC_FPREGS+64)(a0))
EX(lwc1 $f9,(SC_FPREGS+72)(a0))
EX(lwc1 $f10,(SC_FPREGS+80)(a0))
EX(lwc1 $f11,(SC_FPREGS+88)(a0))
EX(lwc1 $f12,(SC_FPREGS+96)(a0))
EX(lwc1 $f13,(SC_FPREGS+104)(a0))
EX(lwc1 $f14,(SC_FPREGS+112)(a0))
EX(lwc1 $f15,(SC_FPREGS+120)(a0))
EX(lwc1 $f16,(SC_FPREGS+128)(a0))
EX(lwc1 $f17,(SC_FPREGS+136)(a0))
EX(lwc1 $f18,(SC_FPREGS+144)(a0))
EX(lwc1 $f19,(SC_FPREGS+152)(a0))
EX(lwc1 $f20,(SC_FPREGS+160)(a0))
EX(lwc1 $f21,(SC_FPREGS+168)(a0))
EX(lwc1 $f22,(SC_FPREGS+176)(a0))
EX(lwc1 $f23,(SC_FPREGS+184)(a0))
EX(lwc1 $f24,(SC_FPREGS+192)(a0))
EX(lwc1 $f25,(SC_FPREGS+200)(a0))
EX(lwc1 $f26,(SC_FPREGS+208)(a0))
EX(lwc1 $f27,(SC_FPREGS+216)(a0))
EX(lwc1 $f28,(SC_FPREGS+224)(a0))
EX(lwc1 $f29,(SC_FPREGS+232)(a0))
EX(lwc1 $f30,(SC_FPREGS+240)(a0))
EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
.set pop
END(_restore_fp_context)
.set reorder
.type fault@function
.ent fault
fault: li v0, -EFAULT
jr ra
.end fault

View file

@ -0,0 +1,172 @@
/*
* r2300_switch.S: R2300 specific task switching code.
*
* Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
* Copyright (C) 1994, 1995, 1996 by Andreas Busse
*
* Multi-cpu abstraction and macros for easier reading:
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Further modifications to make this work:
* Copyright (c) 1998-2000 Harald Koerfgen
*/
#include <asm/asm.h>
#include <asm/cachectl.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/asmmacro.h>
.set mips1
.align 5
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
/*
* FPU context is saved iff the process has used it's FPU in the current
* time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
* space STATUS register should be 0, so that a process *always* starts its
* userland with FPU disabled after each context switch.
*
* FPU will be enabled as soon as the process accesses FPU again, through
* do_cpu() trap.
*/
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
beqz a3, 1f
PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
*/
lw t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
sw t0, ST_OFF(t3)
fpu_save_single a0, t0 # clobbers t0
1:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
*/
move $28, a2
cpu_restore_nonscratch a1
addiu t1, $28, _THREAD_SIZE - 32
sw t1, kernelsp
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
lw a2, THREAD_STATUS(a1)
nor a3, $0, a3
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
move v0, a0
jr ra
END(resume)
/*
* Save a thread's fp context.
*/
LEAF(_save_fp)
fpu_save_single a0, t1 # clobbers t1
jr ra
END(_save_fp)
/*
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
fpu_restore_single a0, t1 # clobbers t1
jr ra
END(_restore_fp)
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
* precision represents signaling NANS.
*
* We initialize fcr31 to rounding to nearest, no exceptions.
*/
#define FPU_DEFAULT 0x00000000
.set push
SET_HARDFLOAT
LEAF(_init_fpu)
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
li t1, FPU_DEFAULT
ctc1 t1, fcr31
li t0, -1
mtc1 t0, $f0
mtc1 t0, $f1
mtc1 t0, $f2
mtc1 t0, $f3
mtc1 t0, $f4
mtc1 t0, $f5
mtc1 t0, $f6
mtc1 t0, $f7
mtc1 t0, $f8
mtc1 t0, $f9
mtc1 t0, $f10
mtc1 t0, $f11
mtc1 t0, $f12
mtc1 t0, $f13
mtc1 t0, $f14
mtc1 t0, $f15
mtc1 t0, $f16
mtc1 t0, $f17
mtc1 t0, $f18
mtc1 t0, $f19
mtc1 t0, $f20
mtc1 t0, $f21
mtc1 t0, $f22
mtc1 t0, $f23
mtc1 t0, $f24
mtc1 t0, $f25
mtc1 t0, $f26
mtc1 t0, $f27
mtc1 t0, $f28
mtc1 t0, $f29
mtc1 t0, $f30
mtc1 t0, $f31
jr ra
END(_init_fpu)
.set pop

277
arch/mips/kernel/r4k_fpu.S Normal file
View file

@ -0,0 +1,277 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
.macro EX insn, reg, src
.set push
SET_HARDFLOAT
.set nomacro
.ex\@: \insn \reg, \src
.set pop
.section __ex_table,"a"
PTR .ex\@, fault
.previous
.endm
.set noreorder
.set arch=r4000
LEAF(_save_fp_context)
.set push
SET_HARDFLOAT
cfc1 t1, fcr31
.set pop
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2
.set mips32r2
.set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip storing odd if FR=0
nop
#endif
/* Store the 16 odd double precision registers */
EX sdc1 $f1, SC_FPREGS+8(a0)
EX sdc1 $f3, SC_FPREGS+24(a0)
EX sdc1 $f5, SC_FPREGS+40(a0)
EX sdc1 $f7, SC_FPREGS+56(a0)
EX sdc1 $f9, SC_FPREGS+72(a0)
EX sdc1 $f11, SC_FPREGS+88(a0)
EX sdc1 $f13, SC_FPREGS+104(a0)
EX sdc1 $f15, SC_FPREGS+120(a0)
EX sdc1 $f17, SC_FPREGS+136(a0)
EX sdc1 $f19, SC_FPREGS+152(a0)
EX sdc1 $f21, SC_FPREGS+168(a0)
EX sdc1 $f23, SC_FPREGS+184(a0)
EX sdc1 $f25, SC_FPREGS+200(a0)
EX sdc1 $f27, SC_FPREGS+216(a0)
EX sdc1 $f29, SC_FPREGS+232(a0)
EX sdc1 $f31, SC_FPREGS+248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
/* Store the 16 even double precision registers */
EX sdc1 $f0, SC_FPREGS+0(a0)
EX sdc1 $f2, SC_FPREGS+16(a0)
EX sdc1 $f4, SC_FPREGS+32(a0)
EX sdc1 $f6, SC_FPREGS+48(a0)
EX sdc1 $f8, SC_FPREGS+64(a0)
EX sdc1 $f10, SC_FPREGS+80(a0)
EX sdc1 $f12, SC_FPREGS+96(a0)
EX sdc1 $f14, SC_FPREGS+112(a0)
EX sdc1 $f16, SC_FPREGS+128(a0)
EX sdc1 $f18, SC_FPREGS+144(a0)
EX sdc1 $f20, SC_FPREGS+160(a0)
EX sdc1 $f22, SC_FPREGS+176(a0)
EX sdc1 $f24, SC_FPREGS+192(a0)
EX sdc1 $f26, SC_FPREGS+208(a0)
EX sdc1 $f28, SC_FPREGS+224(a0)
EX sdc1 $f30, SC_FPREGS+240(a0)
EX sw t1, SC_FPC_CSR(a0)
jr ra
li v0, 0 # success
.set pop
END(_save_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
.set push
SET_HARDFLOAT
cfc1 t1, fcr31
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip storing odd if FR=0
nop
/* Store the 16 odd double precision registers */
EX sdc1 $f1, SC32_FPREGS+8(a0)
EX sdc1 $f3, SC32_FPREGS+24(a0)
EX sdc1 $f5, SC32_FPREGS+40(a0)
EX sdc1 $f7, SC32_FPREGS+56(a0)
EX sdc1 $f9, SC32_FPREGS+72(a0)
EX sdc1 $f11, SC32_FPREGS+88(a0)
EX sdc1 $f13, SC32_FPREGS+104(a0)
EX sdc1 $f15, SC32_FPREGS+120(a0)
EX sdc1 $f17, SC32_FPREGS+136(a0)
EX sdc1 $f19, SC32_FPREGS+152(a0)
EX sdc1 $f21, SC32_FPREGS+168(a0)
EX sdc1 $f23, SC32_FPREGS+184(a0)
EX sdc1 $f25, SC32_FPREGS+200(a0)
EX sdc1 $f27, SC32_FPREGS+216(a0)
EX sdc1 $f29, SC32_FPREGS+232(a0)
EX sdc1 $f31, SC32_FPREGS+248(a0)
/* Store the 16 even double precision registers */
1: EX sdc1 $f0, SC32_FPREGS+0(a0)
EX sdc1 $f2, SC32_FPREGS+16(a0)
EX sdc1 $f4, SC32_FPREGS+32(a0)
EX sdc1 $f6, SC32_FPREGS+48(a0)
EX sdc1 $f8, SC32_FPREGS+64(a0)
EX sdc1 $f10, SC32_FPREGS+80(a0)
EX sdc1 $f12, SC32_FPREGS+96(a0)
EX sdc1 $f14, SC32_FPREGS+112(a0)
EX sdc1 $f16, SC32_FPREGS+128(a0)
EX sdc1 $f18, SC32_FPREGS+144(a0)
EX sdc1 $f20, SC32_FPREGS+160(a0)
EX sdc1 $f22, SC32_FPREGS+176(a0)
EX sdc1 $f24, SC32_FPREGS+192(a0)
EX sdc1 $f26, SC32_FPREGS+208(a0)
EX sdc1 $f28, SC32_FPREGS+224(a0)
EX sdc1 $f30, SC32_FPREGS+240(a0)
EX sw t1, SC32_FPC_CSR(a0)
cfc1 t0, $0 # implementation/version
EX sw t0, SC32_FPC_EIR(a0)
.set pop
jr ra
li v0, 0 # success
END(_save_fp_context32)
#endif
/*
* Restore FPU state:
* - fp gp registers
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
EX lw t1, SC_FPC_CSR(a0)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2
.set mips32r2
.set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip loading odd if FR=0
nop
#endif
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
EX ldc1 $f5, SC_FPREGS+40(a0)
EX ldc1 $f7, SC_FPREGS+56(a0)
EX ldc1 $f9, SC_FPREGS+72(a0)
EX ldc1 $f11, SC_FPREGS+88(a0)
EX ldc1 $f13, SC_FPREGS+104(a0)
EX ldc1 $f15, SC_FPREGS+120(a0)
EX ldc1 $f17, SC_FPREGS+136(a0)
EX ldc1 $f19, SC_FPREGS+152(a0)
EX ldc1 $f21, SC_FPREGS+168(a0)
EX ldc1 $f23, SC_FPREGS+184(a0)
EX ldc1 $f25, SC_FPREGS+200(a0)
EX ldc1 $f27, SC_FPREGS+216(a0)
EX ldc1 $f29, SC_FPREGS+232(a0)
EX ldc1 $f31, SC_FPREGS+248(a0)
1: .set pop
#endif
.set push
SET_HARDFLOAT
EX ldc1 $f0, SC_FPREGS+0(a0)
EX ldc1 $f2, SC_FPREGS+16(a0)
EX ldc1 $f4, SC_FPREGS+32(a0)
EX ldc1 $f6, SC_FPREGS+48(a0)
EX ldc1 $f8, SC_FPREGS+64(a0)
EX ldc1 $f10, SC_FPREGS+80(a0)
EX ldc1 $f12, SC_FPREGS+96(a0)
EX ldc1 $f14, SC_FPREGS+112(a0)
EX ldc1 $f16, SC_FPREGS+128(a0)
EX ldc1 $f18, SC_FPREGS+144(a0)
EX ldc1 $f20, SC_FPREGS+160(a0)
EX ldc1 $f22, SC_FPREGS+176(a0)
EX ldc1 $f24, SC_FPREGS+192(a0)
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
ctc1 t1, fcr31
.set pop
jr ra
li v0, 0 # success
END(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
.set push
SET_HARDFLOAT
EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
bgez t0, 1f # skip loading odd if FR=0
nop
EX ldc1 $f1, SC32_FPREGS+8(a0)
EX ldc1 $f3, SC32_FPREGS+24(a0)
EX ldc1 $f5, SC32_FPREGS+40(a0)
EX ldc1 $f7, SC32_FPREGS+56(a0)
EX ldc1 $f9, SC32_FPREGS+72(a0)
EX ldc1 $f11, SC32_FPREGS+88(a0)
EX ldc1 $f13, SC32_FPREGS+104(a0)
EX ldc1 $f15, SC32_FPREGS+120(a0)
EX ldc1 $f17, SC32_FPREGS+136(a0)
EX ldc1 $f19, SC32_FPREGS+152(a0)
EX ldc1 $f21, SC32_FPREGS+168(a0)
EX ldc1 $f23, SC32_FPREGS+184(a0)
EX ldc1 $f25, SC32_FPREGS+200(a0)
EX ldc1 $f27, SC32_FPREGS+216(a0)
EX ldc1 $f29, SC32_FPREGS+232(a0)
EX ldc1 $f31, SC32_FPREGS+248(a0)
1: EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0)
EX ldc1 $f6, SC32_FPREGS+48(a0)
EX ldc1 $f8, SC32_FPREGS+64(a0)
EX ldc1 $f10, SC32_FPREGS+80(a0)
EX ldc1 $f12, SC32_FPREGS+96(a0)
EX ldc1 $f14, SC32_FPREGS+112(a0)
EX ldc1 $f16, SC32_FPREGS+128(a0)
EX ldc1 $f18, SC32_FPREGS+144(a0)
EX ldc1 $f20, SC32_FPREGS+160(a0)
EX ldc1 $f22, SC32_FPREGS+176(a0)
EX ldc1 $f24, SC32_FPREGS+192(a0)
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
ctc1 t1, fcr31
jr ra
li v0, 0 # success
.set pop
END(_restore_fp_context32)
#endif
.set reorder
.type fault@function
.ent fault
fault: li v0, -EFAULT # failure
jr ra
.end fault

View file

@ -0,0 +1,306 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1994, 1995, 1996, by Andreas Busse
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 MIPS Technologies, Inc.
* written by Carsten Langgaard, carstenl@mips.com
*/
#include <asm/asm.h>
#include <asm/cachectl.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/asmmacro.h>
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti, s32 fp_save)
*/
.align 5
LEAF(resume)
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
/*
* Check whether we need to save any FP context. FP context is saved
* iff the process has used the context with the scalar FPU or the MSA
* ASE in the current time slice, as indicated by _TIF_USEDFPU and
* _TIF_USEDMSA respectively. switch_to will have set fp_save
* accordingly to an FP_SAVE_ enum value.
*/
beqz a3, 2f
/*
* We do. Clear the saved CU1 bit for prev, such that next time it is
* scheduled it will start in userland with the FPU disabled. If the
* task uses the FPU then it will be enabled again via the do_cpu trap.
* This allows us to lazily restore the FP context.
*/
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
/* Check whether we're saving scalar or vector context. */
bgtz a3, 1f
/* Save 128b MSA vector context + scalar FP control & status. */
.set push
SET_HARDFLOAT
cfc1 t1, fcr31
msa_save_all a0
.set pop /* SET_HARDFLOAT */
sw t1, THREAD_FCR31(a0)
b 2f
1: /* Save 32b/64b scalar FP context. */
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
2:
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, TASK_STACK_CANARY(a1)
LONG_S t9, 0(t8)
#endif
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
*/
move $28, a2
cpu_restore_nonscratch a1
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
LONG_L a2, THREAD_STATUS(a1)
nor a3, $0, a3
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
move v0, a0
jr ra
END(resume)
#endif /* USE_ALTERNATE_RESUME_IMPL */
/*
* Save a thread's fp context.
*/
LEAF(_save_fp)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
mfc0 t0, CP0_STATUS
#endif
fpu_save_double a0 t0 t1 # clobbers t1
jr ra
END(_save_fp)
/*
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
mfc0 t0, CP0_STATUS
#endif
fpu_restore_double a0 t0 t1 # clobbers t1
jr ra
END(_restore_fp)
#ifdef CONFIG_CPU_HAS_MSA
/*
* Save a thread's MSA vector context.
*/
LEAF(_save_msa)
msa_save_all a0
jr ra
END(_save_msa)
/*
* Restore a thread's MSA vector context.
*/
LEAF(_restore_msa)
msa_restore_all a0
jr ra
END(_restore_msa)
LEAF(_init_msa_upper)
msa_init_all_upper
jr ra
END(_init_msa_upper)
#endif
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
* precision represents signaling NANS.
*
* We initialize fcr31 to rounding to nearest, no exceptions.
*/
#define FPU_DEFAULT 0x00000000
.set push
SET_HARDFLOAT
LEAF(_init_fpu)
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
enable_fpu_hazard
li t1, FPU_DEFAULT
ctc1 t1, fcr31
li t1, -1 # SNaN
#ifdef CONFIG_64BIT
sll t0, t0, 5
bgez t0, 1f # 16 / 32 register mode?
dmtc1 t1, $f1
dmtc1 t1, $f3
dmtc1 t1, $f5
dmtc1 t1, $f7
dmtc1 t1, $f9
dmtc1 t1, $f11
dmtc1 t1, $f13
dmtc1 t1, $f15
dmtc1 t1, $f17
dmtc1 t1, $f19
dmtc1 t1, $f21
dmtc1 t1, $f23
dmtc1 t1, $f25
dmtc1 t1, $f27
dmtc1 t1, $f29
dmtc1 t1, $f31
1:
#endif
#ifdef CONFIG_CPU_MIPS32
mtc1 t1, $f0
mtc1 t1, $f1
mtc1 t1, $f2
mtc1 t1, $f3
mtc1 t1, $f4
mtc1 t1, $f5
mtc1 t1, $f6
mtc1 t1, $f7
mtc1 t1, $f8
mtc1 t1, $f9
mtc1 t1, $f10
mtc1 t1, $f11
mtc1 t1, $f12
mtc1 t1, $f13
mtc1 t1, $f14
mtc1 t1, $f15
mtc1 t1, $f16
mtc1 t1, $f17
mtc1 t1, $f18
mtc1 t1, $f19
mtc1 t1, $f20
mtc1 t1, $f21
mtc1 t1, $f22
mtc1 t1, $f23
mtc1 t1, $f24
mtc1 t1, $f25
mtc1 t1, $f26
mtc1 t1, $f27
mtc1 t1, $f28
mtc1 t1, $f29
mtc1 t1, $f30
mtc1 t1, $f31
#ifdef CONFIG_CPU_MIPS32_R2
.set push
.set mips32r2
.set fp=64
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip setting upper 32b
mthc1 t1, $f0
mthc1 t1, $f1
mthc1 t1, $f2
mthc1 t1, $f3
mthc1 t1, $f4
mthc1 t1, $f5
mthc1 t1, $f6
mthc1 t1, $f7
mthc1 t1, $f8
mthc1 t1, $f9
mthc1 t1, $f10
mthc1 t1, $f11
mthc1 t1, $f12
mthc1 t1, $f13
mthc1 t1, $f14
mthc1 t1, $f15
mthc1 t1, $f16
mthc1 t1, $f17
mthc1 t1, $f18
mthc1 t1, $f19
mthc1 t1, $f20
mthc1 t1, $f21
mthc1 t1, $f22
mthc1 t1, $f23
mthc1 t1, $f24
mthc1 t1, $f25
mthc1 t1, $f26
mthc1 t1, $f27
mthc1 t1, $f28
mthc1 t1, $f29
mthc1 t1, $f30
mthc1 t1, $f31
1: .set pop
#endif /* CONFIG_CPU_MIPS32_R2 */
#else
.set arch=r4000
dmtc1 t1, $f0
dmtc1 t1, $f2
dmtc1 t1, $f4
dmtc1 t1, $f6
dmtc1 t1, $f8
dmtc1 t1, $f10
dmtc1 t1, $f12
dmtc1 t1, $f14
dmtc1 t1, $f16
dmtc1 t1, $f18
dmtc1 t1, $f20
dmtc1 t1, $f22
dmtc1 t1, $f24
dmtc1 t1, $f26
dmtc1 t1, $f28
dmtc1 t1, $f30
#endif
jr ra
END(_init_fpu)
.set pop /* SET_HARDFLOAT */

View file

@ -0,0 +1,92 @@
/*
* r6000_fpu.S: Save/restore floating point context for signal handlers.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 by Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*/
#include <asm/asm.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
.set noreorder
.set mips2
.set push
SET_HARDFLOAT
/* Save floating point context */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
nop
cfc1 t1,fcr31
/* Store the 16 double precision registers */
sdc1 $f0,(SC_FPREGS+0)(a0)
sdc1 $f2,(SC_FPREGS+16)(a0)
sdc1 $f4,(SC_FPREGS+32)(a0)
sdc1 $f6,(SC_FPREGS+48)(a0)
sdc1 $f8,(SC_FPREGS+64)(a0)
sdc1 $f10,(SC_FPREGS+80)(a0)
sdc1 $f12,(SC_FPREGS+96)(a0)
sdc1 $f14,(SC_FPREGS+112)(a0)
sdc1 $f16,(SC_FPREGS+128)(a0)
sdc1 $f18,(SC_FPREGS+144)(a0)
sdc1 $f20,(SC_FPREGS+160)(a0)
sdc1 $f22,(SC_FPREGS+176)(a0)
sdc1 $f24,(SC_FPREGS+192)(a0)
sdc1 $f26,(SC_FPREGS+208)(a0)
sdc1 $f28,(SC_FPREGS+224)(a0)
sdc1 $f30,(SC_FPREGS+240)(a0)
jr ra
sw t0,SC_FPC_CSR(a0)
1: jr ra
nop
END(_save_fp_context)
/* Restore FPU state:
* - fp gp registers
* - cp1 status/control register
*
* We base the decision which registers to restore from the signal stack
* frame on the current content of c0_status, not on the content of the
* stack frame which might have been changed by the user.
*/
LEAF(_restore_fp_context)
mfc0 t0,CP0_STATUS
sll t0,t0,2
bgez t0,1f
lw t0,SC_FPC_CSR(a0)
/* Restore the 16 double precision registers */
ldc1 $f0,(SC_FPREGS+0)(a0)
ldc1 $f2,(SC_FPREGS+16)(a0)
ldc1 $f4,(SC_FPREGS+32)(a0)
ldc1 $f6,(SC_FPREGS+48)(a0)
ldc1 $f8,(SC_FPREGS+64)(a0)
ldc1 $f10,(SC_FPREGS+80)(a0)
ldc1 $f12,(SC_FPREGS+96)(a0)
ldc1 $f14,(SC_FPREGS+112)(a0)
ldc1 $f16,(SC_FPREGS+128)(a0)
ldc1 $f18,(SC_FPREGS+144)(a0)
ldc1 $f20,(SC_FPREGS+160)(a0)
ldc1 $f22,(SC_FPREGS+176)(a0)
ldc1 $f24,(SC_FPREGS+192)(a0)
ldc1 $f26,(SC_FPREGS+208)(a0)
ldc1 $f28,(SC_FPREGS+224)(a0)
ldc1 $f30,(SC_FPREGS+240)(a0)
jr ra
ctc1 t0,fcr31
1: jr ra
nop
END(_restore_fp_context)
.set pop /* SET_HARDFLOAT */

View file

@ -0,0 +1,192 @@
/*
* relocate_kernel.S for kexec
* Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
LEAF(relocate_new_kernel)
PTR_L a0, arg0
PTR_L a1, arg1
PTR_L a2, arg2
PTR_L a3, arg3
PTR_L s0, kexec_indirection_page
PTR_L s1, kexec_start_address
process_entry:
PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
* populated as the kernel is directly copied to a reserved location
*/
beqz s2, done
/* destination page */
and s3, s2, 0x1
beq s3, zero, 1f
and s4, s2, ~0x1 /* store destination addr in s4 */
b process_entry
1:
/* indirection page, update s0 */
and s3, s2, 0x2
beq s3, zero, 1f
and s0, s2, ~0x2
b process_entry
1:
/* done page */
and s3, s2, 0x4
beq s3, zero, 1f
b done
1:
/* source page */
and s3, s2, 0x8
beq s3, zero, process_entry
and s2, s2, ~0x8
li s6, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
PTR_ADD s4, s4, SZREG
PTR_ADD s2, s2, SZREG
LONG_SUB s6, s6, 1
beq s6, zero, process_entry
b copy_word
b process_entry
done:
#ifdef CONFIG_SMP
/* kexec_flag reset is signal to other CPUs what kernel
was moved to it's location. Note - we need relocated address
of kexec_flag. */
bal 1f
1: move t1,ra;
PTR_LA t2,1b
PTR_LA t0,kexec_flag
PTR_SUB t0,t0,t2;
PTR_ADD t0,t1,t0;
LONG_S zero,(t0)
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
/* We need to flush I-cache before jumping to new kernel.
* Unfortunatelly, this code is cpu-specific.
*/
.set push
.set noreorder
syncw
syncw
synci 0($0)
.set pop
#else
sync
#endif
/* jump to kexec_start_address */
j s1
END(relocate_new_kernel)
#ifdef CONFIG_SMP
/*
* Other CPUs should wait until code is relocated and
* then start at entry (?) point.
*/
LEAF(kexec_smp_wait)
PTR_L a0, s_arg0
PTR_L a1, s_arg1
PTR_L a2, s_arg2
PTR_L a3, s_arg3
PTR_L s1, kexec_start_address
/* Non-relocated address works for args and kexec_start_address ( old
* kernel is not overwritten). But we need relocated address of
* kexec_flag.
*/
bal 1f
1: move t1,ra;
PTR_LA t2,1b
PTR_LA t0,kexec_flag
PTR_SUB t0,t0,t2;
PTR_ADD t0,t1,t0;
1: LONG_L s0, (t0)
bne s0, zero,1b
#ifdef CONFIG_CPU_CAVIUM_OCTEON
.set push
.set noreorder
synci 0($0)
.set pop
#else
sync
#endif
j s1
END(kexec_smp_wait)
#endif
#ifdef __mips64
/* all PTR's must be aligned to 8 byte in 64-bit mode */
.align 3
#endif
/* All parameters to new kernel are passed in registers a0-a3.
* kexec_args[0..3] are uses to prepare register values.
*/
kexec_args:
EXPORT(kexec_args)
arg0: PTR 0x0
arg1: PTR 0x0
arg2: PTR 0x0
arg3: PTR 0x0
.size kexec_args,PTRSIZE*4
#ifdef CONFIG_SMP
/*
* Secondary CPUs may have different kernel parameters in
* their registers a0-a3. secondary_kexec_args[0..3] are used
* to prepare register values.
*/
secondary_kexec_args:
EXPORT(secondary_kexec_args)
s_arg0: PTR 0x0
s_arg1: PTR 0x0
s_arg2: PTR 0x0
s_arg3: PTR 0x0
.size secondary_kexec_args,PTRSIZE*4
kexec_flag:
LONG 0x1
#endif
kexec_start_address:
EXPORT(kexec_start_address)
PTR 0x0
.size kexec_start_address, PTRSIZE
kexec_indirection_page:
EXPORT(kexec_indirection_page)
PTR 0
.size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end:
relocate_new_kernel_size:
EXPORT(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
.size relocate_new_kernel_size, PTRSIZE

44
arch/mips/kernel/reset.c Normal file
View file

@ -0,0 +1,44 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 06 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/reboot.h>
#include <asm/reboot.h>
/*
* Urgs ... Too many MIPS machines to handle this in a generic way.
* So handle all using function pointers to machine specific
* functions.
*/
void (*_machine_restart)(char *command);
void (*_machine_halt)(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
void machine_restart(char *command)
{
if (_machine_restart)
_machine_restart(command);
}
void machine_halt(void)
{
if (_machine_halt)
_machine_halt();
}
void machine_power_off(void)
{
if (pm_power_off)
pm_power_off();
}

122
arch/mips/kernel/rtlx-cmp.c Normal file
View file

@ -0,0 +1,122 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/mips_mt.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
static int major;
static void rtlx_interrupt(void)
{
int i;
struct rtlx_info *info;
struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
if (p == NULL || *p == NULL)
return;
info = *p;
if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
for (i = 0; i < RTLX_CHANNELS; i++) {
wake_up(&channel_wqs[i].lx_queue);
wake_up(&channel_wqs[i].rt_queue);
}
info->ap_int_pending = 0;
}
}
void _interrupt_sp(void)
{
smp_send_reschedule(aprp_cpu_index());
}
int __init rtlx_module_init(void)
{
struct device *dev;
int i, err;
if (!cpu_has_mipsmt) {
pr_warn("VPE loader: not a MIPS MT capable processor\n");
return -ENODEV;
}
if (num_possible_cpus() - aprp_cpu_index() < 1) {
pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
"Pass maxcpus=<n> argument as kernel argument\n");
return -ENODEV;
}
major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
if (major < 0) {
pr_err("rtlx_module_init: unable to register device\n");
return major;
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
while (i--)
device_destroy(mt_class, MKDEV(major, i));
err = PTR_ERR(dev);
goto out_chrdev;
}
}
/* set up notifiers */
rtlx_notify.start = rtlx_starting;
rtlx_notify.stop = rtlx_stopping;
vpe_notify(aprp_cpu_index(), &rtlx_notify);
if (cpu_has_vint) {
aprp_hook = rtlx_interrupt;
} else {
pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
err = -ENODEV;
goto out_class;
}
return 0;
out_class:
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
out_chrdev:
unregister_chrdev(major, RTLX_MODULE_NAME);
return err;
}
void __exit rtlx_module_exit(void)
{
int i;
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);
aprp_hook = NULL;
}

153
arch/mips/kernel/rtlx-mt.c Normal file
View file

@ -0,0 +1,153 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/mips_mt.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
static int major;
static void rtlx_dispatch(void)
{
if (read_c0_cause() & read_c0_status() & C_SW0)
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
}
/*
* Interrupt handler may be called before rtlx_init has otherwise had
* a chance to run.
*/
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
{
unsigned int vpeflags;
unsigned long flags;
int i;
local_irq_save(flags);
vpeflags = dvpe();
set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
irq_enable_hazard();
evpe(vpeflags);
local_irq_restore(flags);
for (i = 0; i < RTLX_CHANNELS; i++) {
wake_up(&channel_wqs[i].lx_queue);
wake_up(&channel_wqs[i].rt_queue);
}
return IRQ_HANDLED;
}
static struct irqaction rtlx_irq = {
.handler = rtlx_interrupt,
.name = "RTLX",
};
static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
void _interrupt_sp(void)
{
unsigned long flags;
local_irq_save(flags);
dvpe();
settc(1);
write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
evpe(EVPE_ENABLE);
local_irq_restore(flags);
}
int __init rtlx_module_init(void)
{
struct device *dev;
int i, err;
if (!cpu_has_mipsmt) {
pr_warn("VPE loader: not a MIPS MT capable processor\n");
return -ENODEV;
}
if (aprp_cpu_index() == 0) {
pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
"Pass maxtcs=<n> argument as kernel argument\n");
return -ENODEV;
}
major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
if (major < 0) {
pr_err("rtlx_module_init: unable to register device\n");
return major;
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
while (i--)
device_destroy(mt_class, MKDEV(major, i));
err = PTR_ERR(dev);
goto out_chrdev;
}
}
/* set up notifiers */
rtlx_notify.start = rtlx_starting;
rtlx_notify.stop = rtlx_stopping;
vpe_notify(aprp_cpu_index(), &rtlx_notify);
if (cpu_has_vint) {
aprp_hook = rtlx_dispatch;
} else {
pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
err = -ENODEV;
goto out_class;
}
rtlx_irq.dev_id = rtlx;
err = setup_irq(rtlx_irq_num, &rtlx_irq);
if (err)
goto out_class;
return 0;
out_class:
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
out_chrdev:
unregister_chrdev(major, RTLX_MODULE_NAME);
return err;
}
void __exit rtlx_module_exit(void)
{
int i;
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);
aprp_hook = NULL;
}

407
arch/mips/kernel/rtlx.c Normal file
View file

@ -0,0 +1,407 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/atomic.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/processor.h>
#include <asm/rtlx.h>
#include <asm/setup.h>
#include <asm/vpe.h>
static int sp_stopping;
struct rtlx_info *rtlx;
struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
struct vpe_notifications rtlx_notify;
void (*aprp_hook)(void) = NULL;
EXPORT_SYMBOL(aprp_hook);
static void __used dump_rtlx(void)
{
int i;
pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
pr_info(" rt_state %d lx_state %d buffer_size %d\n",
chan->rt_state, chan->lx_state, chan->buffer_size);
pr_info(" rt_read %d rt_write %d\n",
chan->rt_read, chan->rt_write);
pr_info(" lx_read %d lx_write %d\n",
chan->lx_read, chan->lx_write);
pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
}
}
/* call when we have the address of the shared structure from the SP side. */
static int rtlx_init(struct rtlx_info *rtlxi)
{
if (rtlxi->id != RTLX_ID) {
pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
return -ENOEXEC;
}
rtlx = rtlxi;
return 0;
}
/* notifications */
void rtlx_starting(int vpe)
{
int i;
sp_stopping = 0;
/* force a reload of rtlx */
rtlx = NULL;
/* wake up any sleeping rtlx_open's */
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
void rtlx_stopping(int vpe)
{
int i;
sp_stopping = 1;
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
int rtlx_open(int index, int can_sleep)
{
struct rtlx_info **p;
struct rtlx_channel *chan;
enum rtlx_state state;
int ret = 0;
if (index >= RTLX_CHANNELS) {
pr_debug("rtlx_open index out of range\n");
return -ENOSYS;
}
if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
pr_debug("rtlx_open channel %d already opened\n", index);
ret = -EBUSY;
goto out_fail;
}
if (rtlx == NULL) {
p = vpe_get_shared(aprp_cpu_index());
if (p == NULL) {
if (can_sleep) {
ret = __wait_event_interruptible(
channel_wqs[index].lx_queue,
(p = vpe_get_shared(aprp_cpu_index())));
if (ret)
goto out_fail;
} else {
pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
ret = -ENOSYS;
goto out_fail;
}
}
smp_rmb();
if (*p == NULL) {
if (can_sleep) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(
&channel_wqs[index].lx_queue,
&wait, TASK_INTERRUPTIBLE);
smp_rmb();
if (*p != NULL)
break;
if (!signal_pending(current)) {
schedule();
continue;
}
ret = -ERESTARTSYS;
goto out_fail;
}
finish_wait(&channel_wqs[index].lx_queue,
&wait);
} else {
pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
ret = -ENOSYS;
goto out_fail;
}
}
if ((unsigned int)*p < KSEG0) {
pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
(int)*p);
ret = -ENOSYS;
goto out_fail;
}
ret = rtlx_init(*p);
if (ret < 0)
goto out_ret;
}
chan = &rtlx->channel[index];
state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
if (state == RTLX_STATE_OPENED) {
ret = -EBUSY;
goto out_fail;
}
out_fail:
smp_mb();
atomic_dec(&channel_wqs[index].in_open);
smp_mb();
out_ret:
return ret;
}
int rtlx_release(int index)
{
if (rtlx == NULL) {
pr_err("rtlx_release() with null rtlx\n");
return 0;
}
rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
return 0;
}
unsigned int rtlx_read_poll(int index, int can_sleep)
{
struct rtlx_channel *chan;
if (rtlx == NULL)
return 0;
chan = &rtlx->channel[index];
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
int ret = __wait_event_interruptible(
channel_wqs[index].lx_queue,
(chan->lx_read != chan->lx_write) ||
sp_stopping);
if (ret)
return ret;
if (sp_stopping)
return 0;
} else
return 0;
}
return (chan->lx_write + chan->buffer_size - chan->lx_read)
% chan->buffer_size;
}
static inline int write_spacefree(int read, int write, int size)
{
if (read == write) {
/*
* Never fill the buffer completely, so indexes are always
* equal if empty and only empty, or !equal if data available
*/
return size - 1;
}
return ((read + size - write) % size) - 1;
}
unsigned int rtlx_write_poll(int index)
{
struct rtlx_channel *chan = &rtlx->channel[index];
return write_spacefree(chan->rt_read, chan->rt_write,
chan->buffer_size);
}
ssize_t rtlx_read(int index, void __user *buff, size_t count)
{
size_t lx_write, fl = 0L;
struct rtlx_channel *lx;
unsigned long failed;
if (rtlx == NULL)
return -ENOSYS;
lx = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
lx_write = lx->lx_write;
/* find out how much in total */
count = min(count,
(size_t)(lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size);
/* then how much from the read pointer onwards */
fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
if (failed)
goto out;
/* and if there is anything left at the beginning of the buffer */
if (count - fl)
failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
out:
count -= failed;
smp_wmb();
lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count;
}
ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
{
struct rtlx_channel *rt;
unsigned long failed;
size_t rt_read;
size_t fl;
if (rtlx == NULL)
return -ENOSYS;
rt = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
rt_read = rt->rt_read;
/* total number of bytes to copy */
count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
if (failed)
goto out;
/* if there's any left copy to the beginning of the buffer */
if (count - fl)
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
out:
count -= failed;
smp_wmb();
rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
_interrupt_sp();
return count;
}
static int file_open(struct inode *inode, struct file *filp)
{
return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
return rtlx_release(iminor(inode));
}
static unsigned int file_poll(struct file *file, poll_table *wait)
{
int minor = iminor(file_inode(file));
unsigned int mask = 0;
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
if (rtlx == NULL)
return 0;
/* data available to read? */
if (rtlx_read_poll(minor, 0))
mask |= POLLIN | POLLRDNORM;
/* space to write */
if (rtlx_write_poll(minor))
mask |= POLLOUT | POLLWRNORM;
return mask;
}
static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
int minor = iminor(file_inode(file));
/* data available? */
if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
return 0; /* -EAGAIN makes 'cat' whine */
return rtlx_read(minor, buffer, count);
}
static ssize_t file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
int minor = iminor(file_inode(file));
/* any space left... */
if (!rtlx_write_poll(minor)) {
int ret;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
rtlx_write_poll(minor));
if (ret)
return ret;
}
return rtlx_write(minor, buffer, count);
}
const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = file_open,
.release = file_release,
.write = file_write,
.read = file_read,
.poll = file_poll,
.llseek = noop_llseek,
};
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
MODULE_DESCRIPTION("MIPS RTLX");
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,582 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/isadep.h>
#include <asm/sysmips.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/war.h>
#include <asm/asm-offsets.h>
/* Highest syscall used of any syscall flavour */
#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
.align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
TRACE_IRQS_ON_RELOAD
STI
.set at
lw t1, PT_EPC(sp) # skip syscall on return
subu v0, v0, __NR_O32_Linux # check syscall number
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp)
beqz t0, illegal_syscall
sll t0, v0, 2
la t1, sys_call_table
addu t1, t0
lw t2, (t1) # syscall routine
beqz t2, illegal_syscall
sw a3, PT_R26(sp) # save a3 for syscall restarting
/*
* More than four arguments. Try to deal with it by copying the
* stack arguments from the user stack to the kernel stack.
* This Sucks (TM).
*/
lw t0, PT_R29(sp) # get old user stack pointer
/*
* We intentionally keep the kernel stack a little below the top of
* userspace so we don't have to do a slower byte accurate check here.
*/
lw t5, TI_ADDR_LIMIT($28)
addu t4, t0, 32
and t5, t4
bltz t5, bad_stack # -> sp is bad
/*
* Ok, copy the args from the luser stack to the kernel stack.
*/
.set push
.set noreorder
.set nomacro
1: user_lw(t5, 16(t0)) # argument #5 from usp
4: user_lw(t6, 20(t0)) # argument #6 from usp
3: user_lw(t7, 24(t0)) # argument #7 from usp
2: user_lw(t8, 28(t0)) # argument #8 from usp
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
sw t7, 24(sp) # argument #7 to ksp
sw t8, 28(sp) # argument #8 to ksp
.set pop
.section __ex_table,"a"
PTR 1b,bad_stack
PTR 2b,bad_stack
PTR 3b,bad_stack
PTR 4b,bad_stack
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
bnez t0, syscall_trace_entry # -> yes
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
lw t1, PT_R2(sp) # syscall number
negu v0 # error
sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
o32_syscall_exit:
j syscall_exit_partial
/* ------------------------------------------------------------------------ */
syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
/*
* syscall number is in v0 unless we called syscall(__NR_###)
* where the real syscall number is in a0
*/
addiu a1, v0, __NR_O32_Linux
bnez v0, 1f /* __NR_syscall at offset 0 */
lw a1, PT_R4(sp)
1: jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
lw a0, PT_R4(sp) # Restore argument registers
lw a1, PT_R5(sp)
lw a2, PT_R6(sp)
lw a3, PT_R7(sp)
jalr t0
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
lw t1, PT_R2(sp) # syscall number
negu v0 # error
sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
2: j syscall_exit
/* ------------------------------------------------------------------------ */
/*
* The stackpointer for a call with more than 4 arguments is bad.
* We probably should handle this case a bit more drastic.
*/
bad_stack:
li v0, EFAULT
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
j o32_syscall_exit
/*
* The system call does not exist in this kernel
*/
illegal_syscall:
li v0, ENOSYS # error
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
j o32_syscall_exit
END(handle_sys)
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
sll t1, t0, 2
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
/* Some syscalls like execve get their arguments from struct pt_regs
and claim zero arguments in the syscall table. Thus we have to
assume the worst case and shuffle around all potential arguments.
If you want performance, don't use indirect syscalls. */
move a0, a1 # shift argument registers
move a1, a2
move a2, a3
lw a3, 16(sp)
lw t4, 20(sp)
lw t5, 24(sp)
lw t6, 28(sp)
sw t4, 16(sp)
sw t5, 20(sp)
sw t6, 24(sp)
sw a0, PT_R4(sp) # .. and push back a0 - a3, some
sw a1, PT_R5(sp) # syscalls expect them there
sw a2, PT_R6(sp)
sw a3, PT_R7(sp)
sw a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
.align 2
.type sys_call_table, @object
EXPORT(sys_call_table)
PTR sys_syscall /* 4000 */
PTR sys_exit
PTR __sys_fork
PTR sys_read
PTR sys_write
PTR sys_open /* 4005 */
PTR sys_close
PTR sys_waitpid
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 4010 */
PTR sys_execve
PTR sys_chdir
PTR sys_time
PTR sys_mknod
PTR sys_chmod /* 4015 */
PTR sys_lchown
PTR sys_ni_syscall
PTR sys_ni_syscall /* was sys_stat */
PTR sys_lseek
PTR sys_getpid /* 4020 */
PTR sys_mount
PTR sys_oldumount
PTR sys_setuid
PTR sys_getuid
PTR sys_stime /* 4025 */
PTR sys_ptrace
PTR sys_alarm
PTR sys_ni_syscall /* was sys_fstat */
PTR sys_pause
PTR sys_utime /* 4030 */
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_access
PTR sys_nice
PTR sys_ni_syscall /* 4035 */
PTR sys_sync
PTR sys_kill
PTR sys_rename
PTR sys_mkdir
PTR sys_rmdir /* 4040 */
PTR sys_dup
PTR sysm_pipe
PTR sys_times
PTR sys_ni_syscall
PTR sys_brk /* 4045 */
PTR sys_setgid
PTR sys_getgid
PTR sys_ni_syscall /* was signal(2) */
PTR sys_geteuid
PTR sys_getegid /* 4050 */
PTR sys_acct
PTR sys_umount
PTR sys_ni_syscall
PTR sys_ioctl
PTR sys_fcntl /* 4055 */
PTR sys_ni_syscall
PTR sys_setpgid
PTR sys_ni_syscall
PTR sys_olduname
PTR sys_umask /* 4060 */
PTR sys_chroot
PTR sys_ustat
PTR sys_dup2
PTR sys_getppid
PTR sys_getpgrp /* 4065 */
PTR sys_setsid
PTR sys_sigaction
PTR sys_sgetmask
PTR sys_ssetmask
PTR sys_setreuid /* 4070 */
PTR sys_setregid
PTR sys_sigsuspend
PTR sys_sigpending
PTR sys_sethostname
PTR sys_setrlimit /* 4075 */
PTR sys_getrlimit
PTR sys_getrusage
PTR sys_gettimeofday
PTR sys_settimeofday
PTR sys_getgroups /* 4080 */
PTR sys_setgroups
PTR sys_ni_syscall /* old_select */
PTR sys_symlink
PTR sys_ni_syscall /* was sys_lstat */
PTR sys_readlink /* 4085 */
PTR sys_uselib
PTR sys_swapon
PTR sys_reboot
PTR sys_old_readdir
PTR sys_mips_mmap /* 4090 */
PTR sys_munmap
PTR sys_truncate
PTR sys_ftruncate
PTR sys_fchmod
PTR sys_fchown /* 4095 */
PTR sys_getpriority
PTR sys_setpriority
PTR sys_ni_syscall
PTR sys_statfs
PTR sys_fstatfs /* 4100 */
PTR sys_ni_syscall /* was ioperm(2) */
PTR sys_socketcall
PTR sys_syslog
PTR sys_setitimer
PTR sys_getitimer /* 4105 */
PTR sys_newstat
PTR sys_newlstat
PTR sys_newfstat
PTR sys_uname
PTR sys_ni_syscall /* 4110 was iopl(2) */
PTR sys_vhangup
PTR sys_ni_syscall /* was sys_idle() */
PTR sys_ni_syscall /* was sys_vm86 */
PTR sys_wait4
PTR sys_swapoff /* 4115 */
PTR sys_sysinfo
PTR sys_ipc
PTR sys_fsync
PTR sys_sigreturn
PTR __sys_clone /* 4120 */
PTR sys_setdomainname
PTR sys_newuname
PTR sys_ni_syscall /* sys_modify_ldt */
PTR sys_adjtimex
PTR sys_mprotect /* 4125 */
PTR sys_sigprocmask
PTR sys_ni_syscall /* was create_module */
PTR sys_init_module
PTR sys_delete_module
PTR sys_ni_syscall /* 4130 was get_kernel_syms */
PTR sys_quotactl
PTR sys_getpgid
PTR sys_fchdir
PTR sys_bdflush
PTR sys_sysfs /* 4135 */
PTR sys_personality
PTR sys_ni_syscall /* for afs_syscall */
PTR sys_setfsuid
PTR sys_setfsgid
PTR sys_llseek /* 4140 */
PTR sys_getdents
PTR sys_select
PTR sys_flock
PTR sys_msync
PTR sys_readv /* 4145 */
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
PTR sys_sysctl
PTR sys_mlock
PTR sys_munlock /* 4155 */
PTR sys_mlockall
PTR sys_munlockall
PTR sys_sched_setparam
PTR sys_sched_getparam
PTR sys_sched_setscheduler /* 4160 */
PTR sys_sched_getscheduler
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
PTR sys_sched_rr_get_interval /* 4165 */
PTR sys_nanosleep
PTR sys_mremap
PTR sys_accept
PTR sys_bind
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
PTR sys_getsockopt
PTR sys_listen
PTR sys_recv /* 4175 */
PTR sys_recvfrom
PTR sys_recvmsg
PTR sys_send
PTR sys_sendmsg
PTR sys_sendto /* 4180 */
PTR sys_setsockopt
PTR sys_shutdown
PTR sys_socket
PTR sys_socketpair
PTR sys_setresuid /* 4185 */
PTR sys_getresuid
PTR sys_ni_syscall /* was sys_query_module */
PTR sys_poll
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_setresgid /* 4190 */
PTR sys_getresgid
PTR sys_prctl
PTR sys_rt_sigreturn
PTR sys_rt_sigaction
PTR sys_rt_sigprocmask /* 4195 */
PTR sys_rt_sigpending
PTR sys_rt_sigtimedwait
PTR sys_rt_sigqueueinfo
PTR sys_rt_sigsuspend
PTR sys_pread64 /* 4200 */
PTR sys_pwrite64
PTR sys_chown
PTR sys_getcwd
PTR sys_capget
PTR sys_capset /* 4205 */
PTR sys_sigaltstack
PTR sys_sendfile
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_mips_mmap2 /* 4210 */
PTR sys_truncate64
PTR sys_ftruncate64
PTR sys_stat64
PTR sys_lstat64
PTR sys_fstat64 /* 4215 */
PTR sys_pivot_root
PTR sys_mincore
PTR sys_madvise
PTR sys_getdents64
PTR sys_fcntl64 /* 4220 */
PTR sys_ni_syscall
PTR sys_gettid
PTR sys_readahead
PTR sys_setxattr
PTR sys_lsetxattr /* 4225 */
PTR sys_fsetxattr
PTR sys_getxattr
PTR sys_lgetxattr
PTR sys_fgetxattr
PTR sys_listxattr /* 4230 */
PTR sys_llistxattr
PTR sys_flistxattr
PTR sys_removexattr
PTR sys_lremovexattr
PTR sys_fremovexattr /* 4235 */
PTR sys_tkill
PTR sys_sendfile64
PTR sys_futex
#ifdef CONFIG_MIPS_MT_FPAFF
/*
* For FPU affinity scheduling on MIPS MT processors, we need to
* intercept sys_sched_xxxaffinity() calls until we get a proper hook
* in kernel/sched/core.c. Considered only temporary we only support
* these hooks for the 32-bit kernel - there is no MIPS64 MT processor
* atm.
*/
PTR mipsmt_sys_sched_setaffinity
PTR mipsmt_sys_sched_getaffinity
#else
PTR sys_sched_setaffinity
PTR sys_sched_getaffinity /* 4240 */
#endif /* CONFIG_MIPS_MT_FPAFF */
PTR sys_io_setup
PTR sys_io_destroy
PTR sys_io_getevents
PTR sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
PTR sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait /* 4250 */
PTR sys_remap_file_pages
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR sys_fadvise64_64
PTR sys_statfs64 /* 4255 */
PTR sys_fstatfs64
PTR sys_timer_create
PTR sys_timer_settime
PTR sys_timer_gettime
PTR sys_timer_getoverrun /* 4260 */
PTR sys_timer_delete
PTR sys_clock_settime
PTR sys_clock_gettime
PTR sys_clock_getres
PTR sys_clock_nanosleep /* 4265 */
PTR sys_tgkill
PTR sys_utimes
PTR sys_mbind
PTR sys_get_mempolicy
PTR sys_set_mempolicy /* 4270 */
PTR sys_mq_open
PTR sys_mq_unlink
PTR sys_mq_timedsend
PTR sys_mq_timedreceive
PTR sys_mq_notify /* 4275 */
PTR sys_mq_getsetattr
PTR sys_ni_syscall /* sys_vserver */
PTR sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
PTR sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
PTR sys_inotify_rm_watch
PTR sys_migrate_pages
PTR sys_openat
PTR sys_mkdirat
PTR sys_mknodat /* 4290 */
PTR sys_fchownat
PTR sys_futimesat
PTR sys_fstatat64
PTR sys_unlinkat
PTR sys_renameat /* 4295 */
PTR sys_linkat
PTR sys_symlinkat
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat /* 4300 */
PTR sys_pselect6
PTR sys_ppoll
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range /* 4305 */
PTR sys_tee
PTR sys_vmsplice
PTR sys_move_pages
PTR sys_set_robust_list
PTR sys_get_robust_list /* 4310 */
PTR sys_kexec_load
PTR sys_getcpu
PTR sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get /* 4315 */
PTR sys_utimensat
PTR sys_signalfd
PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys_fallocate /* 4320 */
PTR sys_timerfd_create
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2 /* 4325 */
PTR sys_epoll_create1
PTR sys_dup3
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv /* 4330 */
PTR sys_pwritev
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR sys_recvmmsg /* 4335 */
PTR sys_fanotify_init
PTR sys_fanotify_mark
PTR sys_prlimit64
PTR sys_name_to_handle_at
PTR sys_open_by_handle_at /* 4340 */
PTR sys_clock_adjtime
PTR sys_syncfs
PTR sys_sendmmsg
PTR sys_setns
PTR sys_process_vm_readv /* 4345 */
PTR sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */
PTR sys_renameat2
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 4355 */

View file

@ -0,0 +1,438 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/asm-offsets.h>
#include <asm/sysmips.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/war.h>
#ifndef CONFIG_BINFMT_ELF32
/* Neither O32 nor N32, so define handle_sys here */
#define handle_sys64 handle_sys
#endif
.align 5
NESTED(handle_sys64, PT_SIZE, sp)
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
/*
* When 32-bit compatibility is configured scall_o32.S
* already did this.
*/
.set noat
SAVE_SOME
TRACE_IRQS_ON_RELOAD
STI
.set at
#endif
dsubu t0, v0, __NR_64_Linux # check syscall number
sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
#endif
beqz t0, illegal_syscall
dsll t0, v0, 3 # offset into table
ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
# syscall routine
sd a3, PT_R26(sp) # save a3 for syscall restarting
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
n64_syscall_exit:
j syscall_exit_partial
/* ------------------------------------------------------------------------ */
syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_64_Linux
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
jalr t0
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
2: j syscall_exit
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
li v0, ENOSYS # error
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
j n64_syscall_exit
END(handle_sys64)
.align 3
.type sys_call_table, @object
EXPORT(sys_call_table)
PTR sys_read /* 5000 */
PTR sys_write
PTR sys_open
PTR sys_close
PTR sys_newstat
PTR sys_newfstat /* 5005 */
PTR sys_newlstat
PTR sys_poll
PTR sys_lseek
PTR sys_mips_mmap
PTR sys_mprotect /* 5010 */
PTR sys_munmap
PTR sys_brk
PTR sys_rt_sigaction
PTR sys_rt_sigprocmask
PTR sys_ioctl /* 5015 */
PTR sys_pread64
PTR sys_pwrite64
PTR sys_readv
PTR sys_writev
PTR sys_access /* 5020 */
PTR sysm_pipe
PTR sys_select
PTR sys_sched_yield
PTR sys_mremap
PTR sys_msync /* 5025 */
PTR sys_mincore
PTR sys_madvise
PTR sys_shmget
PTR sys_shmat
PTR sys_shmctl /* 5030 */
PTR sys_dup
PTR sys_dup2
PTR sys_pause
PTR sys_nanosleep
PTR sys_getitimer /* 5035 */
PTR sys_setitimer
PTR sys_alarm
PTR sys_getpid
PTR sys_sendfile64
PTR sys_socket /* 5040 */
PTR sys_connect
PTR sys_accept
PTR sys_sendto
PTR sys_recvfrom
PTR sys_sendmsg /* 5045 */
PTR sys_recvmsg
PTR sys_shutdown
PTR sys_bind
PTR sys_listen
PTR sys_getsockname /* 5050 */
PTR sys_getpeername
PTR sys_socketpair
PTR sys_setsockopt
PTR sys_getsockopt
PTR __sys_clone /* 5055 */
PTR __sys_fork
PTR sys_execve
PTR sys_exit
PTR sys_wait4
PTR sys_kill /* 5060 */
PTR sys_newuname
PTR sys_semget
PTR sys_semop
PTR sys_semctl
PTR sys_shmdt /* 5065 */
PTR sys_msgget
PTR sys_msgsnd
PTR sys_msgrcv
PTR sys_msgctl
PTR sys_fcntl /* 5070 */
PTR sys_flock
PTR sys_fsync
PTR sys_fdatasync
PTR sys_truncate
PTR sys_ftruncate /* 5075 */
PTR sys_getdents
PTR sys_getcwd
PTR sys_chdir
PTR sys_fchdir
PTR sys_rename /* 5080 */
PTR sys_mkdir
PTR sys_rmdir
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 5085 */
PTR sys_symlink
PTR sys_readlink
PTR sys_chmod
PTR sys_fchmod
PTR sys_chown /* 5090 */
PTR sys_fchown
PTR sys_lchown
PTR sys_umask
PTR sys_gettimeofday
PTR sys_getrlimit /* 5095 */
PTR sys_getrusage
PTR sys_sysinfo
PTR sys_times
PTR sys_ptrace
PTR sys_getuid /* 5100 */
PTR sys_syslog
PTR sys_getgid
PTR sys_setuid
PTR sys_setgid
PTR sys_geteuid /* 5105 */
PTR sys_getegid
PTR sys_setpgid
PTR sys_getppid
PTR sys_getpgrp
PTR sys_setsid /* 5110 */
PTR sys_setreuid
PTR sys_setregid
PTR sys_getgroups
PTR sys_setgroups
PTR sys_setresuid /* 5115 */
PTR sys_getresuid
PTR sys_setresgid
PTR sys_getresgid
PTR sys_getpgid
PTR sys_setfsuid /* 5120 */
PTR sys_setfsgid
PTR sys_getsid
PTR sys_capget
PTR sys_capset
PTR sys_rt_sigpending /* 5125 */
PTR sys_rt_sigtimedwait
PTR sys_rt_sigqueueinfo
PTR sys_rt_sigsuspend
PTR sys_sigaltstack
PTR sys_utime /* 5130 */
PTR sys_mknod
PTR sys_personality
PTR sys_ustat
PTR sys_statfs
PTR sys_fstatfs /* 5135 */
PTR sys_sysfs
PTR sys_getpriority
PTR sys_setpriority
PTR sys_sched_setparam
PTR sys_sched_getparam /* 5140 */
PTR sys_sched_setscheduler
PTR sys_sched_getscheduler
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
PTR sys_sched_rr_get_interval /* 5145 */
PTR sys_mlock
PTR sys_munlock
PTR sys_mlockall
PTR sys_munlockall
PTR sys_vhangup /* 5150 */
PTR sys_pivot_root
PTR sys_sysctl
PTR sys_prctl
PTR sys_adjtimex
PTR sys_setrlimit /* 5155 */
PTR sys_chroot
PTR sys_sync
PTR sys_acct
PTR sys_settimeofday
PTR sys_mount /* 5160 */
PTR sys_umount
PTR sys_swapon
PTR sys_swapoff
PTR sys_reboot
PTR sys_sethostname /* 5165 */
PTR sys_setdomainname
PTR sys_ni_syscall /* was create_module */
PTR sys_init_module
PTR sys_delete_module
PTR sys_ni_syscall /* 5170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
PTR sys_ni_syscall /* 5175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
PTR sys_readahead
PTR sys_setxattr /* 5180 */
PTR sys_lsetxattr
PTR sys_fsetxattr
PTR sys_getxattr
PTR sys_lgetxattr
PTR sys_fgetxattr /* 5185 */
PTR sys_listxattr
PTR sys_llistxattr
PTR sys_flistxattr
PTR sys_removexattr
PTR sys_lremovexattr /* 5190 */
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
PTR sys_futex
PTR sys_sched_setaffinity /* 5195 */
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
PTR sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 5205 */
PTR sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait
PTR sys_remap_file_pages /* 5210 */
PTR sys_rt_sigreturn
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR sys_semtimedop
PTR sys_fadvise64_64 /* 5215 */
PTR sys_timer_create
PTR sys_timer_settime
PTR sys_timer_gettime
PTR sys_timer_getoverrun
PTR sys_timer_delete /* 5220 */
PTR sys_clock_settime
PTR sys_clock_gettime
PTR sys_clock_getres
PTR sys_clock_nanosleep
PTR sys_tgkill /* 5225 */
PTR sys_utimes
PTR sys_mbind
PTR sys_get_mempolicy
PTR sys_set_mempolicy
PTR sys_mq_open /* 5230 */
PTR sys_mq_unlink
PTR sys_mq_timedsend
PTR sys_mq_timedreceive
PTR sys_mq_notify
PTR sys_mq_getsetattr /* 5235 */
PTR sys_ni_syscall /* sys_vserver */
PTR sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key /* 5240 */
PTR sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch
PTR sys_inotify_rm_watch /* 5245 */
PTR sys_migrate_pages
PTR sys_openat
PTR sys_mkdirat
PTR sys_mknodat
PTR sys_fchownat /* 5250 */
PTR sys_futimesat
PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat
PTR sys_linkat /* 5255 */
PTR sys_symlinkat
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat
PTR sys_pselect6 /* 5260 */
PTR sys_ppoll
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee /* 5265 */
PTR sys_vmsplice
PTR sys_move_pages
PTR sys_set_robust_list
PTR sys_get_robust_list
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR sys_utimensat /* 5275 */
PTR sys_signalfd
PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create /* 5280 */
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1 /* 5285 */
PTR sys_dup3
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR sys_recvmmsg
PTR sys_fanotify_init /* 5295 */
PTR sys_fanotify_mark
PTR sys_prlimit64
PTR sys_name_to_handle_at
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
PTR sys_sendmmsg
PTR sys_setns
PTR sys_process_vm_readv
PTR sys_process_vm_writev /* 5305 */
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
PTR sys_sched_setattr
PTR sys_sched_getattr /* 5310 */
PTR sys_renameat2
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 5315 */
.size sys_call_table,.-sys_call_table

View file

@ -0,0 +1,431 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#ifndef CONFIG_MIPS32_O32
/* No O32, so define handle_sys here */
#define handle_sysn32 handle_sys
#endif
.align 5
NESTED(handle_sysn32, PT_SIZE, sp)
#ifndef CONFIG_MIPS32_O32
.set noat
SAVE_SOME
TRACE_IRQS_ON_RELOAD
STI
.set at
#endif
dsubu t0, v0, __NR_N32_Linux # check syscall number
sltiu t0, t0, __NR_N32_Linux_syscalls + 1
#ifndef CONFIG_MIPS32_O32
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
#endif
beqz t0, not_n32_scall
dsll t0, v0, 3 # offset into table
ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
sd a3, PT_R26(sp) # save a3 for syscall restarting
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
j syscall_exit_partial
/* ------------------------------------------------------------------------ */
n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_N32_Linux
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
jalr t0
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
2: j syscall_exit
not_n32_scall:
/* This is not an n32 compatibility syscall, pass it on to
the n64 syscall handlers. */
j handle_sys64
END(handle_sysn32)
.type sysn32_call_table, @object
EXPORT(sysn32_call_table)
PTR sys_read /* 6000 */
PTR sys_write
PTR sys_open
PTR sys_close
PTR sys_newstat
PTR sys_newfstat /* 6005 */
PTR sys_newlstat
PTR sys_poll
PTR sys_lseek
PTR sys_mips_mmap
PTR sys_mprotect /* 6010 */
PTR sys_munmap
PTR sys_brk
PTR compat_sys_rt_sigaction
PTR compat_sys_rt_sigprocmask
PTR compat_sys_ioctl /* 6015 */
PTR sys_pread64
PTR sys_pwrite64
PTR compat_sys_readv
PTR compat_sys_writev
PTR sys_access /* 6020 */
PTR sysm_pipe
PTR compat_sys_select
PTR sys_sched_yield
PTR sys_mremap
PTR sys_msync /* 6025 */
PTR sys_mincore
PTR sys_madvise
PTR sys_shmget
PTR sys_shmat
PTR compat_sys_shmctl /* 6030 */
PTR sys_dup
PTR sys_dup2
PTR sys_pause
PTR compat_sys_nanosleep
PTR compat_sys_getitimer /* 6035 */
PTR compat_sys_setitimer
PTR sys_alarm
PTR sys_getpid
PTR compat_sys_sendfile
PTR sys_socket /* 6040 */
PTR sys_connect
PTR sys_accept
PTR sys_sendto
PTR compat_sys_recvfrom
PTR compat_sys_sendmsg /* 6045 */
PTR compat_sys_recvmsg
PTR sys_shutdown
PTR sys_bind
PTR sys_listen
PTR sys_getsockname /* 6050 */
PTR sys_getpeername
PTR sys_socketpair
PTR compat_sys_setsockopt
PTR compat_sys_getsockopt
PTR __sys_clone /* 6055 */
PTR __sys_fork
PTR compat_sys_execve
PTR sys_exit
PTR compat_sys_wait4
PTR sys_kill /* 6060 */
PTR sys_newuname
PTR sys_semget
PTR sys_semop
PTR compat_sys_semctl
PTR sys_shmdt /* 6065 */
PTR sys_msgget
PTR compat_sys_msgsnd
PTR compat_sys_msgrcv
PTR compat_sys_msgctl
PTR compat_sys_fcntl /* 6070 */
PTR sys_flock
PTR sys_fsync
PTR sys_fdatasync
PTR sys_truncate
PTR sys_ftruncate /* 6075 */
PTR compat_sys_getdents
PTR sys_getcwd
PTR sys_chdir
PTR sys_fchdir
PTR sys_rename /* 6080 */
PTR sys_mkdir
PTR sys_rmdir
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 6085 */
PTR sys_symlink
PTR sys_readlink
PTR sys_chmod
PTR sys_fchmod
PTR sys_chown /* 6090 */
PTR sys_fchown
PTR sys_lchown
PTR sys_umask
PTR compat_sys_gettimeofday
PTR compat_sys_getrlimit /* 6095 */
PTR compat_sys_getrusage
PTR compat_sys_sysinfo
PTR compat_sys_times
PTR compat_sys_ptrace
PTR sys_getuid /* 6100 */
PTR sys_syslog
PTR sys_getgid
PTR sys_setuid
PTR sys_setgid
PTR sys_geteuid /* 6105 */
PTR sys_getegid
PTR sys_setpgid
PTR sys_getppid
PTR sys_getpgrp
PTR sys_setsid /* 6110 */
PTR sys_setreuid
PTR sys_setregid
PTR sys_getgroups
PTR sys_setgroups
PTR sys_setresuid /* 6115 */
PTR sys_getresuid
PTR sys_setresgid
PTR sys_getresgid
PTR sys_getpgid
PTR sys_setfsuid /* 6120 */
PTR sys_setfsgid
PTR sys_getsid
PTR sys_capget
PTR sys_capset
PTR compat_sys_rt_sigpending /* 6125 */
PTR compat_sys_rt_sigtimedwait
PTR compat_sys_rt_sigqueueinfo
PTR compat_sys_rt_sigsuspend
PTR compat_sys_sigaltstack
PTR compat_sys_utime /* 6130 */
PTR sys_mknod
PTR sys_32_personality
PTR compat_sys_ustat
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 6135 */
PTR sys_sysfs
PTR sys_getpriority
PTR sys_setpriority
PTR sys_sched_setparam
PTR sys_sched_getparam /* 6140 */
PTR sys_sched_setscheduler
PTR sys_sched_getscheduler
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
PTR compat_sys_sched_rr_get_interval /* 6145 */
PTR sys_mlock
PTR sys_munlock
PTR sys_mlockall
PTR sys_munlockall
PTR sys_vhangup /* 6150 */
PTR sys_pivot_root
PTR compat_sys_sysctl
PTR sys_prctl
PTR compat_sys_adjtimex
PTR compat_sys_setrlimit /* 6155 */
PTR sys_chroot
PTR sys_sync
PTR sys_acct
PTR compat_sys_settimeofday
PTR compat_sys_mount /* 6160 */
PTR sys_umount
PTR sys_swapon
PTR sys_swapoff
PTR sys_reboot
PTR sys_sethostname /* 6165 */
PTR sys_setdomainname
PTR sys_ni_syscall /* was create_module */
PTR sys_init_module
PTR sys_delete_module
PTR sys_ni_syscall /* 6170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
PTR sys_ni_syscall /* 6175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
PTR sys_readahead
PTR sys_setxattr /* 6180 */
PTR sys_lsetxattr
PTR sys_fsetxattr
PTR sys_getxattr
PTR sys_lgetxattr
PTR sys_fgetxattr /* 6185 */
PTR sys_listxattr
PTR sys_llistxattr
PTR sys_flistxattr
PTR sys_removexattr
PTR sys_lremovexattr /* 6190 */
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity /* 6195 */
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
PTR compat_sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 6205 */
PTR sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait
PTR sys_remap_file_pages /* 6210 */
PTR sysn32_rt_sigreturn
PTR compat_sys_fcntl64
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR compat_sys_semtimedop /* 6215 */
PTR sys_fadvise64_64
PTR compat_sys_statfs64
PTR compat_sys_fstatfs64
PTR sys_sendfile64
PTR compat_sys_timer_create /* 6220 */
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun
PTR sys_timer_delete
PTR compat_sys_clock_settime /* 6225 */
PTR compat_sys_clock_gettime
PTR compat_sys_clock_getres
PTR compat_sys_clock_nanosleep
PTR sys_tgkill
PTR compat_sys_utimes /* 6230 */
PTR compat_sys_mbind
PTR compat_sys_get_mempolicy
PTR compat_sys_set_mempolicy
PTR compat_sys_mq_open
PTR sys_mq_unlink /* 6235 */
PTR compat_sys_mq_timedsend
PTR compat_sys_mq_timedreceive
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* 6240, sys_vserver */
PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
PTR sys_keyctl /* 6245 */
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch
PTR sys_inotify_rm_watch
PTR compat_sys_migrate_pages /* 6250 */
PTR sys_openat
PTR sys_mkdirat
PTR sys_mknodat
PTR sys_fchownat
PTR compat_sys_futimesat /* 6255 */
PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat
PTR sys_linkat
PTR sys_symlinkat /* 6260 */
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat
PTR compat_sys_pselect6
PTR compat_sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
PTR compat_sys_vmsplice /* 6270 */
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR compat_sys_kexec_load
PTR sys_getcpu /* 6275 */
PTR compat_sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR compat_sys_utimensat
PTR compat_sys_signalfd /* 6280 */
PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create
PTR compat_sys_timerfd_gettime /* 6285 */
PTR compat_sys_timerfd_settime
PTR compat_sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1
PTR sys_dup3 /* 6290 */
PTR sys_pipe2
PTR sys_inotify_init1
PTR compat_sys_preadv
PTR compat_sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg
PTR sys_getdents64
PTR sys_fanotify_init /* 6300 */
PTR sys_fanotify_mark
PTR sys_prlimit64
PTR sys_name_to_handle_at
PTR sys_open_by_handle_at
PTR compat_sys_clock_adjtime /* 6305 */
PTR sys_syncfs
PTR compat_sys_sendmmsg
PTR sys_setns
PTR compat_sys_process_vm_readv
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr
PTR sys_renameat2 /* 6315 */
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf
.size sysn32_call_table,.-sysn32_call_table

View file

@ -0,0 +1,568 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000, 2001 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
*
* Hairy, the userspace application uses a different argument passing
* convention than the kernel, so we have to translate things from o32
* to ABI64 calling convention. 64-bit syscalls are also processed
* here for now.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/sysmips.h>
.align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
TRACE_IRQS_ON_RELOAD
STI
.set at
ld t1, PT_EPC(sp) # skip syscall on return
dsubu t0, v0, __NR_O32_Linux # check syscall number
sltiu t0, t0, __NR_O32_Linux_syscalls + 1
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
beqz t0, not_o32_scall
#if 0
SAVE_ALL
move a1, v0
PRINT("Scall %ld\n")
RESTORE_ALL
#endif
/* We don't want to stumble over broken sign extensions from
userland. O32 does never use the upper half. */
sll a0, a0, 0
sll a1, a1, 0
sll a2, a2, 0
sll a3, a3, 0
dsll t0, v0, 3 # offset into table
ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
sd a3, PT_R26(sp) # save a3 for syscall restarting
/*
* More than four arguments. Try to deal with it by copying the
* stack arguments from the user stack to the kernel stack.
* This Sucks (TM).
*
* We intentionally keep the kernel stack a little below the top of
* userspace so we don't have to do a slower byte accurate check here.
*/
ld t0, PT_R29(sp) # get old user stack pointer
daddu t1, t0, 32
bltz t1, bad_stack
1: lw a4, 16(t0) # argument #5 from usp
2: lw a5, 20(t0) # argument #6 from usp
3: lw a6, 24(t0) # argument #7 from usp
4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
.section __ex_table,"a"
PTR 1b, bad_stack
PTR 2b, bad_stack
PTR 3b, bad_stack
PTR 4b, bad_stack
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
o32_syscall_exit:
j syscall_exit_partial
/* ------------------------------------------------------------------------ */
trace_a_syscall:
SAVE_STATIC
sd a4, PT_R8(sp) # Save argument registers
sd a5, PT_R9(sp)
sd a6, PT_R10(sp)
sd a7, PT_R11(sp) # For indirect syscalls
move s0, t2 # Save syscall pointer
move a0, sp
/*
* absolute syscall number is in v0 unless we called syscall(__NR_###)
* where the real syscall number is in a0
* note: NR_syscall is the first O32 syscall but the macro is
* only defined when compiling with -mabi=32 (CONFIG_32BIT)
* therefore __NR_O32_Linux is used (4000)
*/
.set push
.set reorder
subu t1, v0, __NR_O32_Linux
move a1, v0
bnez t1, 1f /* __NR_syscall at offset 0 */
lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
.set pop
1: jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
ld a6, PT_R10(sp)
ld a7, PT_R11(sp) # For indirect syscalls
jalr t0
li t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
2: j syscall_exit
/* ------------------------------------------------------------------------ */
/*
* The stackpointer for a call with more than 4 arguments is bad.
*/
bad_stack:
li v0, EFAULT
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
j o32_syscall_exit
not_o32_scall:
/*
* This is not an o32 compatibility syscall, pass it on
* to the 64-bit syscall handlers.
*/
#ifdef CONFIG_MIPS32_N32
j handle_sysn32
#else
j handle_sys64
#endif
END(handle_sys)
LEAF(sys32_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
ld t2, sys32_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
move a2, a3
move a3, a4
move a4, a5
move a5, a6
move a6, a7
sd a0, PT_R4(sp) # ... and push back a0 - a3, some
sd a1, PT_R5(sp) # syscalls expect them there
sd a2, PT_R6(sp)
sd a3, PT_R7(sp)
sd a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
.align 3
.type sys32_call_table,@object
EXPORT(sys32_call_table)
PTR sys32_syscall /* 4000 */
PTR sys_exit
PTR __sys_fork
PTR sys_read
PTR sys_write
PTR compat_sys_open /* 4005 */
PTR sys_close
PTR sys_waitpid
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 4010 */
PTR compat_sys_execve
PTR sys_chdir
PTR compat_sys_time
PTR sys_mknod
PTR sys_chmod /* 4015 */
PTR sys_lchown
PTR sys_ni_syscall
PTR sys_ni_syscall /* was sys_stat */
PTR sys_lseek
PTR sys_getpid /* 4020 */
PTR compat_sys_mount
PTR sys_oldumount
PTR sys_setuid
PTR sys_getuid
PTR compat_sys_stime /* 4025 */
PTR compat_sys_ptrace
PTR sys_alarm
PTR sys_ni_syscall /* was sys_fstat */
PTR sys_pause
PTR compat_sys_utime /* 4030 */
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_access
PTR sys_nice
PTR sys_ni_syscall /* 4035 */
PTR sys_sync
PTR sys_kill
PTR sys_rename
PTR sys_mkdir
PTR sys_rmdir /* 4040 */
PTR sys_dup
PTR sysm_pipe
PTR compat_sys_times
PTR sys_ni_syscall
PTR sys_brk /* 4045 */
PTR sys_setgid
PTR sys_getgid
PTR sys_ni_syscall /* was signal 2 */
PTR sys_geteuid
PTR sys_getegid /* 4050 */
PTR sys_acct
PTR sys_umount
PTR sys_ni_syscall
PTR compat_sys_ioctl
PTR compat_sys_fcntl /* 4055 */
PTR sys_ni_syscall
PTR sys_setpgid
PTR sys_ni_syscall
PTR sys_olduname
PTR sys_umask /* 4060 */
PTR sys_chroot
PTR compat_sys_ustat
PTR sys_dup2
PTR sys_getppid
PTR sys_getpgrp /* 4065 */
PTR sys_setsid
PTR sys_32_sigaction
PTR sys_sgetmask
PTR sys_ssetmask
PTR sys_setreuid /* 4070 */
PTR sys_setregid
PTR sys32_sigsuspend
PTR compat_sys_sigpending
PTR sys_sethostname
PTR compat_sys_setrlimit /* 4075 */
PTR compat_sys_getrlimit
PTR compat_sys_getrusage
PTR compat_sys_gettimeofday
PTR compat_sys_settimeofday
PTR sys_getgroups /* 4080 */
PTR sys_setgroups
PTR sys_ni_syscall /* old_select */
PTR sys_symlink
PTR sys_ni_syscall /* was sys_lstat */
PTR sys_readlink /* 4085 */
PTR sys_uselib
PTR sys_swapon
PTR sys_reboot
PTR compat_sys_old_readdir
PTR sys_mips_mmap /* 4090 */
PTR sys_munmap
PTR compat_sys_truncate
PTR compat_sys_ftruncate
PTR sys_fchmod
PTR sys_fchown /* 4095 */
PTR sys_getpriority
PTR sys_setpriority
PTR sys_ni_syscall
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 4100 */
PTR sys_ni_syscall /* sys_ioperm */
PTR compat_sys_socketcall
PTR sys_syslog
PTR compat_sys_setitimer
PTR compat_sys_getitimer /* 4105 */
PTR compat_sys_newstat
PTR compat_sys_newlstat
PTR compat_sys_newfstat
PTR sys_uname
PTR sys_ni_syscall /* sys_ioperm *//* 4110 */
PTR sys_vhangup
PTR sys_ni_syscall /* was sys_idle */
PTR sys_ni_syscall /* sys_vm86 */
PTR compat_sys_wait4
PTR sys_swapoff /* 4115 */
PTR compat_sys_sysinfo
PTR compat_sys_ipc
PTR sys_fsync
PTR sys32_sigreturn
PTR __sys_clone /* 4120 */
PTR sys_setdomainname
PTR sys_newuname
PTR sys_ni_syscall /* sys_modify_ldt */
PTR compat_sys_adjtimex
PTR sys_mprotect /* 4125 */
PTR compat_sys_sigprocmask
PTR sys_ni_syscall /* was creat_module */
PTR sys_init_module
PTR sys_delete_module
PTR sys_ni_syscall /* 4130, get_kernel_syms */
PTR sys_quotactl
PTR sys_getpgid
PTR sys_fchdir
PTR sys_bdflush
PTR sys_sysfs /* 4135 */
PTR sys_32_personality
PTR sys_ni_syscall /* for afs_syscall */
PTR sys_setfsuid
PTR sys_setfsgid
PTR sys_32_llseek /* 4140 */
PTR compat_sys_getdents
PTR compat_sys_select
PTR sys_flock
PTR sys_msync
PTR compat_sys_readv /* 4145 */
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
PTR compat_sys_sysctl
PTR sys_mlock
PTR sys_munlock /* 4155 */
PTR sys_mlockall
PTR sys_munlockall
PTR sys_sched_setparam
PTR sys_sched_getparam
PTR sys_sched_setscheduler /* 4160 */
PTR sys_sched_getscheduler
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
PTR compat_sys_sched_rr_get_interval /* 4165 */
PTR compat_sys_nanosleep
PTR sys_mremap
PTR sys_accept
PTR sys_bind
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
PTR sys_getsockopt
PTR sys_listen
PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom
PTR compat_sys_recvmsg
PTR sys_send
PTR compat_sys_sendmsg
PTR sys_sendto /* 4180 */
PTR compat_sys_setsockopt
PTR sys_shutdown
PTR sys_socket
PTR sys_socketpair
PTR sys_setresuid /* 4185 */
PTR sys_getresuid
PTR sys_ni_syscall /* was query_module */
PTR sys_poll
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_setresgid /* 4190 */
PTR sys_getresgid
PTR sys_prctl
PTR sys32_rt_sigreturn
PTR compat_sys_rt_sigaction
PTR compat_sys_rt_sigprocmask /* 4195 */
PTR compat_sys_rt_sigpending
PTR compat_sys_rt_sigtimedwait
PTR compat_sys_rt_sigqueueinfo
PTR compat_sys_rt_sigsuspend
PTR sys_32_pread /* 4200 */
PTR sys_32_pwrite
PTR sys_chown
PTR sys_getcwd
PTR sys_capget
PTR sys_capset /* 4205 */
PTR compat_sys_sigaltstack
PTR compat_sys_sendfile
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_mips_mmap2 /* 4210 */
PTR sys_32_truncate64
PTR sys_32_ftruncate64
PTR sys_newstat
PTR sys_newlstat
PTR sys_newfstat /* 4215 */
PTR sys_pivot_root
PTR sys_mincore
PTR sys_madvise
PTR sys_getdents64
PTR compat_sys_fcntl64 /* 4220 */
PTR sys_ni_syscall
PTR sys_gettid
PTR sys32_readahead
PTR sys_setxattr
PTR sys_lsetxattr /* 4225 */
PTR sys_fsetxattr
PTR sys_getxattr
PTR sys_lgetxattr
PTR sys_fgetxattr
PTR sys_listxattr /* 4230 */
PTR sys_llistxattr
PTR sys_flistxattr
PTR sys_removexattr
PTR sys_lremovexattr
PTR sys_fremovexattr /* 4235 */
PTR sys_tkill
PTR sys_sendfile64
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
PTR compat_sys_io_setup
PTR sys_io_destroy
PTR compat_sys_io_getevents
PTR compat_sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
PTR compat_sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait /* 4250 */
PTR sys_remap_file_pages
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR sys32_fadvise64_64
PTR compat_sys_statfs64 /* 4255 */
PTR compat_sys_fstatfs64
PTR compat_sys_timer_create
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun /* 4260 */
PTR sys_timer_delete
PTR compat_sys_clock_settime
PTR compat_sys_clock_gettime
PTR compat_sys_clock_getres
PTR compat_sys_clock_nanosleep /* 4265 */
PTR sys_tgkill
PTR compat_sys_utimes
PTR compat_sys_mbind
PTR compat_sys_get_mempolicy
PTR compat_sys_set_mempolicy /* 4270 */
PTR compat_sys_mq_open
PTR sys_mq_unlink
PTR compat_sys_mq_timedsend
PTR compat_sys_mq_timedreceive
PTR compat_sys_mq_notify /* 4275 */
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* sys_vserver */
PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
PTR sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
PTR sys_inotify_rm_watch
PTR compat_sys_migrate_pages
PTR compat_sys_openat
PTR sys_mkdirat
PTR sys_mknodat /* 4290 */
PTR sys_fchownat
PTR compat_sys_futimesat
PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat /* 4295 */
PTR sys_linkat
PTR sys_symlinkat
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat /* 4300 */
PTR compat_sys_pselect6
PTR compat_sys_ppoll
PTR sys_unshare
PTR sys_splice
PTR sys32_sync_file_range /* 4305 */
PTR sys_tee
PTR compat_sys_vmsplice
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */
PTR compat_sys_kexec_load
PTR sys_getcpu
PTR compat_sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get /* 4315 */
PTR compat_sys_utimensat
PTR compat_sys_signalfd
PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys32_fallocate /* 4320 */
PTR sys_timerfd_create
PTR compat_sys_timerfd_gettime
PTR compat_sys_timerfd_settime
PTR compat_sys_signalfd4
PTR sys_eventfd2 /* 4325 */
PTR sys_epoll_create1
PTR sys_dup3
PTR sys_pipe2
PTR sys_inotify_init1
PTR compat_sys_preadv /* 4330 */
PTR compat_sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR compat_sys_recvmmsg /* 4335 */
PTR sys_fanotify_init
PTR compat_sys_fanotify_mark
PTR sys_prlimit64
PTR sys_name_to_handle_at
PTR compat_sys_open_by_handle_at /* 4340 */
PTR compat_sys_clock_adjtime
PTR sys_syncfs
PTR compat_sys_sendmmsg
PTR sys_setns
PTR compat_sys_process_vm_readv /* 4345 */
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */
PTR sys_renameat2
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 4355 */
.size sys32_call_table,.-sys32_call_table

110
arch/mips/kernel/segment.c Normal file
View file

@ -0,0 +1,110 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
static void build_segment_config(char *str, unsigned int cfg)
{
unsigned int am;
static const char * const am_str[] = {
"UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
"RSRVD", "UUSK"};
/* Segment access mode. */
am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
str += sprintf(str, "%-5s", am_str[am]);
/*
* Access modes MK, MSK and MUSK are mapped segments. Therefore
* there is no direct physical address mapping.
*/
if ((am == 0) || (am > 3)) {
str += sprintf(str, " %03lx",
((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
str += sprintf(str, " %01ld",
((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
} else {
str += sprintf(str, " UND");
str += sprintf(str, " U");
}
/* Exception configuration. */
str += sprintf(str, " %01ld\n",
((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
}
static int show_segments(struct seq_file *m, void *v)
{
unsigned int segcfg;
char str[42];
seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
segcfg = read_c0_segctl0();
build_segment_config(str, segcfg);
seq_printf(m, " 0 e0000000 512M %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 1 c0000000 512M %s", str);
segcfg = read_c0_segctl1();
build_segment_config(str, segcfg);
seq_printf(m, " 2 a0000000 512M %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 3 80000000 512M %s", str);
segcfg = read_c0_segctl2();
build_segment_config(str, segcfg);
seq_printf(m, " 4 40000000 1G %s", str);
segcfg >>= 16;
build_segment_config(str, segcfg);
seq_printf(m, " 5 00000000 1G %s\n", str);
return 0;
}
static int segments_open(struct inode *inode, struct file *file)
{
return single_open(file, show_segments, NULL);
}
static const struct file_operations segments_fops = {
.open = segments_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init segments_info(void)
{
extern struct dentry *mips_debugfs_dir;
struct dentry *segments;
if (cpu_has_segments) {
if (!mips_debugfs_dir)
return -ENODEV;
segments = debugfs_create_file("segments", S_IRUGO,
mips_debugfs_dir, NULL,
&segments_fops);
if (!segments)
return -ENOMEM;
}
return 0;
}
device_initcall(segments_info);

805
arch/mips/kernel/setup.c Normal file
View file

@ -0,0 +1,805 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1995 Waldorf Electronics
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/debugfs.h>
#include <linux/kexec.h>
#include <linux/sizes.h>
#include <linux/device.h>
#include <linux/dma-contiguous.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/bugs.h>
#include <asm/cache.h>
#include <asm/cpu.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/prom.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
#ifdef CONFIG_VT
struct screen_info screen_info;
#endif
/*
* Despite it's name this variable is even if we don't have PCI
*/
unsigned int PCI_DMA_BUS_IS_PHYS;
EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
/*
* Setup information
*
* These are initialized so they are in the .data section
*/
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
EXPORT_SYMBOL(mips_machtype);
struct boot_mem_map boot_mem_map;
static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
/*
* mips_io_port_base is the begin of the address space to which x86 style
* I/O ports are mapped.
*/
const unsigned long mips_io_port_base = -1;
EXPORT_SYMBOL(mips_io_port_base);
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static void *detect_magic __initdata = detect_memory_region;
void __init add_memory_region(phys_t start, phys_t size, long type)
{
int x = boot_mem_map.nr_map;
int i;
/* Sanity check */
if (start + size < start) {
pr_warning("Trying to add an invalid memory region, skipped\n");
return;
}
/*
* Try to merge with existing entry, if any.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct boot_mem_map_entry *entry = boot_mem_map.map + i;
unsigned long top;
if (entry->type != type)
continue;
if (start + size < entry->addr)
continue; /* no overlap */
if (entry->addr + entry->size < start)
continue; /* no overlap */
top = max(entry->addr + entry->size, start + size);
entry->addr = min(entry->addr, start);
entry->size = top - entry->addr;
return;
}
if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
boot_mem_map.map[x].addr = start;
boot_mem_map.map[x].size = size;
boot_mem_map.map[x].type = type;
boot_mem_map.nr_map++;
}
void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
{
void *dm = &detect_magic;
phys_t size;
for (size = sz_min; size < sz_max; size <<= 1) {
if (!memcmp(dm, dm + size, sizeof(detect_magic)))
break;
}
pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
((unsigned long long) size) / SZ_1M,
(unsigned long long) start,
((unsigned long long) sz_min) / SZ_1M,
((unsigned long long) sz_max) / SZ_1M);
add_memory_region(start, size, BOOT_MEM_RAM);
}
static void __init print_memory_map(void)
{
int i;
const int field = 2 * sizeof(unsigned long);
for (i = 0; i < boot_mem_map.nr_map; i++) {
printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
field, (unsigned long long) boot_mem_map.map[i].size,
field, (unsigned long long) boot_mem_map.map[i].addr);
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
printk(KERN_CONT "(usable)\n");
break;
case BOOT_MEM_INIT_RAM:
printk(KERN_CONT "(usable after init)\n");
break;
case BOOT_MEM_ROM_DATA:
printk(KERN_CONT "(ROM data)\n");
break;
case BOOT_MEM_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
default:
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
}
}
}
/*
* Manage initrd
*/
#ifdef CONFIG_BLK_DEV_INITRD
static int __init rd_start_early(char *p)
{
unsigned long start = memparse(p, &p);
#ifdef CONFIG_64BIT
/* Guess if the sign extension was forgotten by bootloader */
if (start < XKPHYS)
start = (int)start;
#endif
initrd_start = start;
initrd_end += start;
return 0;
}
early_param("rd_start", rd_start_early);
static int __init rd_size_early(char *p)
{
initrd_end += memparse(p, &p);
return 0;
}
early_param("rd_size", rd_size_early);
/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
unsigned long end;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
if (!initrd_start || initrd_end <= initrd_start)
goto disable;
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n");
goto disable;
}
if (initrd_start < PAGE_OFFSET) {
pr_err("initrd start < PAGE_OFFSET\n");
goto disable;
}
/*
* Sanitize initrd addresses. For example firmware
* can't guess if they need to pass them through
* 64-bits values if the kernel has been built in pure
* 32-bit. We need also to switch from KSEG0 to XKPHYS
* addresses now, so the code can now safely use __pa().
*/
end = __pa(initrd_end);
initrd_end = (unsigned long)__va(end);
initrd_start = (unsigned long)__va(__pa(initrd_start));
ROOT_DEV = Root_RAM0;
return PFN_UP(end);
disable:
initrd_start = 0;
initrd_end = 0;
return 0;
}
static void __init finalize_initrd(void)
{
unsigned long size = initrd_end - initrd_start;
if (size == 0) {
printk(KERN_INFO "Initrd not found or empty");
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk(KERN_ERR "Initrd extends beyond end of memory");
goto disable;
}
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
initrd_start, size);
return;
disable:
printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
#else /* !CONFIG_BLK_DEV_INITRD */
static unsigned long __init init_initrd(void)
{
return 0;
}
#define finalize_initrd() do {} while (0)
#endif
/*
* Initialize the bootmem allocator. It also setup initrd related data
* if needed.
*/
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
static void __init bootmem_init(void)
{
init_initrd();
finalize_initrd();
}
#else /* !CONFIG_SGI_IP27 */
static void __init bootmem_init(void)
{
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
int i;
/*
* Sanity check any INITRD first. We don't take it into account
* for bootmem setup initially, rely on the end-of-kernel-code
* as our memory range starting point. Once bootmem is inited we
* will reserve the area used for the initrd.
*/
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
/*
* max_low_pfn is not a number of pages. The number of pages
* of the system is given by 'max_low_pfn - min_low_pfn'.
*/
min_low_pfn = ~0UL;
max_low_pfn = 0;
/*
* Find the highest page frame number we have available.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
continue;
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
if (end <= reserved_end)
continue;
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
}
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
min_low_pfn - ARCH_PFN_OFFSET);
} else if (min_low_pfn < ARCH_PFN_OFFSET) {
pr_info("%lu free pages won't be used\n",
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
/*
* Determine low and high memory ranges
*/
max_pfn = max_low_pfn;
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
highend_pfn = max_low_pfn;
#endif
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
#ifdef CONFIG_BLK_DEV_INITRD
/*
* mapstart should be after initrd_end
*/
if (initrd_end)
mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif
/*
* Initialize the boot-time allocator with low memory only.
*/
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn);
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
if (start <= min_low_pfn)
start = min_low_pfn;
if (start >= end)
continue;
#ifndef CONFIG_HIGHMEM
if (end > max_low_pfn)
end = max_low_pfn;
/*
* ... finally, is the area going away?
*/
if (end <= start)
continue;
#endif
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end, size;
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
/*
* Reserve usable memory.
*/
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
break;
case BOOT_MEM_INIT_RAM:
memory_present(0, start, end);
continue;
default:
/* Not usable memory */
continue;
}
/*
* We are rounding up the start address of usable memory
* and at the end of the usable range downwards.
*/
if (start >= max_low_pfn)
continue;
if (start < reserved_end)
start = reserved_end;
if (end > max_low_pfn)
end = max_low_pfn;
/*
* ... finally, is the area going away?
*/
if (end <= start)
continue;
size = end - start;
/* Register lowmem ranges */
free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
memory_present(0, start, end);
}
/*
* Reserve the bootmap memory.
*/
reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
/*
* Reserve initrd memory if needed.
*/
finalize_initrd();
}
#endif /* CONFIG_SGI_IP27 */
/*
* arch_mem_init - initialize memory management subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using add_memory_region.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory management system is still entirely uninitialized.
*
* o bootmem_init()
* o sparse_init()
* o paging_init()
* o dma_continguous_reserve()
*
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
* This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
* initialization hook for anything else was introduced.
*/
static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
phys_t start, size;
/*
* If a user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
boot_mem_map.nr_map = 0;
usermem = 1;
}
start = 0;
size = memparse(p, &p);
if (*p == '@')
start = memparse(p + 1, &p);
add_memory_region(start, size, BOOT_MEM_RAM);
return 0;
}
early_param("mem", early_parse_mem);
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
{
int i;
setup_elfcorehdr = memparse(p, &p);
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start = boot_mem_map.map[i].addr;
unsigned long end = (boot_mem_map.map[i].addr +
boot_mem_map.map[i].size);
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
setup_elfcorehdr_size = end - setup_elfcorehdr;
break;
}
}
/*
* If we don't find it in the memory map, then we shouldn't
* have to worry about it, as the new kernel won't use it.
*/
return 0;
}
early_param("elfcorehdr", early_parse_elfcorehdr);
#endif
static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
{
phys_t size;
int i;
size = end - mem;
if (!size)
return;
/* Make sure it is in the boot_mem_map */
for (i = 0; i < boot_mem_map.nr_map; i++) {
if (mem >= boot_mem_map.map[i].addr &&
mem < (boot_mem_map.map[i].addr +
boot_mem_map.map[i].size))
return;
}
add_memory_region(mem, size, type);
}
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
unsigned long long total;
total = max_pfn - min_low_pfn;
return total << PAGE_SHIFT;
}
static void __init mips_parse_crashkernel(void)
{
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
int ret;
total_mem = get_total_mem();
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret != 0 || crash_size <= 0)
return;
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
static void __init request_crashkernel(struct resource *res)
{
int ret;
ret = request_resource(res, &crashk_res);
if (!ret)
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)((crashk_res.end -
crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#else /* !defined(CONFIG_KEXEC) */
static void __init mips_parse_crashkernel(void)
{
}
static void __init request_crashkernel(struct resource *res)
{
}
#endif /* !defined(CONFIG_KEXEC) */
static void __init arch_mem_init(char **cmdline_p)
{
struct memblock_region *reg;
extern void plat_mem_setup(void);
/* call board setup routine */
plat_mem_setup();
/*
* Make sure all kernel memory is in the maps. The "UP" and
* "DOWN" are opposite for initdata since if it crosses over
* into another memory section you don't want that to be
* freed when the initdata is freed.
*/
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
BOOT_MEM_RAM);
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
BOOT_MEM_INIT_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
}
strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
#endif
#else
strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
if (usermem) {
pr_info("User-defined physical RAM map:\n");
print_memory_map();
}
bootmem_init();
#ifdef CONFIG_PROC_VMCORE
if (setup_elfcorehdr && setup_elfcorehdr_size) {
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
setup_elfcorehdr, setup_elfcorehdr_size);
reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
BOOTMEM_DEFAULT);
}
#endif
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1,
BOOTMEM_DEFAULT);
#endif
device_tree_init();
sparse_init();
plat_swiotlb_setup();
paging_init();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Tell bootmem about cma reserved memblock section */
for_each_memblock(reserved, reg)
if (reg->size != 0)
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
}
static void __init resource_init(void)
{
int i;
if (UNCAC_BASE != IO_BASE)
return;
code_resource.start = __pa_symbol(&_text);
code_resource.end = __pa_symbol(&_etext) - 1;
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
unsigned long start, end;
start = boot_mem_map.map[i].addr;
end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
if (start >= HIGHMEM_START)
continue;
if (end >= HIGHMEM_START)
end = HIGHMEM_START - 1;
res = alloc_bootmem(sizeof(struct resource));
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM:
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
break;
case BOOT_MEM_RESERVED:
default:
res->name = "reserved";
}
res->start = start;
res->end = end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
/*
* We don't know which RAM region contains kernel data,
* so we try it repeatedly and let the resource manager
* test it.
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_crashkernel(res);
}
}
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
int i, possible = num_possible_cpus();
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
nr_cpu_ids = possible;
}
#else
static inline void prefill_possible_map(void) {}
#endif
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
prom_init();
#ifdef CONFIG_EARLY_PRINTK
setup_early_printk();
#endif
cpu_report();
check_bugs_early();
#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
arch_mem_init(cmdline_p);
resource_init();
plat_smp_setup();
prefill_possible_map();
cpu_cache_init();
}
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
struct dentry *d;
d = debugfs_create_dir("mips", NULL);
if (!d)
return -ENOMEM;
mips_debugfs_dir = d;
return 0;
}
arch_initcall(debugfs_mips);
#endif

View file

@ -0,0 +1,39 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#ifndef __SIGNAL_COMMON_H
#define __SIGNAL_COMMON_H
/* #define DEBUG_SIG */
#ifdef DEBUG_SIG
# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
#else
# define DEBUGP(fmt, args...)
#endif
/*
* Determine which stack to use..
*/
extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size);
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
/* Make sure we will not lose FPU ownership */
#ifdef CONFIG_PREEMPT
#define lock_fpu_owner() preempt_disable()
#define unlock_fpu_owner() preempt_enable()
#else
#define lock_fpu_owner() pagefault_disable()
#define unlock_fpu_owner() pagefault_enable()
#endif
#endif /* __SIGNAL_COMMON_H */

673
arch/mips/kernel/signal.c Normal file
View file

@ -0,0 +1,673 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/cache.h>
#include <linux/context_tracking.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
#include <asm/inst.h>
#include "signal-common.h"
static int (*save_fp_context)(struct sigcontext __user *sc);
static int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext sf_sc;
sigset_t sf_mask;
};
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct siginfo rs_info;
struct ucontext rs_uc;
};
/*
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
{
int i;
int err = 0;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |=
__put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
&sc->sc_fpregs[i]);
}
err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
return err;
}
static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
{
int i;
int err = 0;
u64 fpr_val;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
}
err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
return err;
}
/*
* Helper routines
*/
static int protected_save_fp_context(struct sigcontext __user *sc)
{
int err;
#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = save_fp_context(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_to_sigcontext(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
#else
/*
* EVA does not have FPU EVA instructions so saving fpu context directly
* does not work.
*/
lose_fpu(1);
err = save_fp_context(sc); /* this might fail */
#endif
return err;
}
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
int err, tmp __maybe_unused;
#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = restore_fp_context(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_from_sigcontext(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
#else
/*
* EVA does not have FPU EVA instructions so restoring fpu context
* directly does not work.
*/
lose_fpu(0);
err = restore_fp_context(sc); /* this might fail */
#endif
return err;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __put_user(regs->acx, &sc->sc_acx);
#endif
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context(sc);
}
return err;
}
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
static int
check_and_restore_fp_context(struct sigcontext __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= protected_restore_fp_context(sc);
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned int used_math;
unsigned long treg;
int err = 0;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __get_user(regs->acx, &sc->sc_acx);
#endif
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
err = check_and_restore_fp_context(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
}
return err;
}
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
sp = sigsp(sp, ksig);
return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
{
return sys_rt_sigsuspend(uset, sizeof(sigset_t));
}
#endif
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
#endif
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
goto badframe;
set_current_blocked(&blocked);
sig = restore_sigcontext(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (&regs));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#endif /* CONFIG_TRAD_SIGNALS */
asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
if (restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (&regs));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#ifdef CONFIG_TRAD_SIGNALS
static int setup_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct sigframe __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return -EFAULT;
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
#endif
static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return -EFAULT;
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
.setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
};
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
struct mips_abi *abi = current->thread.abi;
#ifdef CONFIG_CPU_MICROMIPS
void *vdso;
unsigned int tmp = (unsigned int)current->mm->context.vdso;
set_isa16_mode(tmp);
vdso = (void *)tmp;
#else
void *vdso = current->mm->context.vdso;
#endif
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
case ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
}
/* fallthrough */
case ERESTARTNOINTR:
regs->regs[7] = regs->regs[26];
regs->regs[2] = regs->regs[0];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
if (sig_uses_siginfo(&ksig->ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
ksig, regs, oldset);
else
ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
regs, oldset);
signal_setup_done(ret, ksig, 0);
}
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
if (regs->regs[0]) {
switch (regs->regs[2]) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
break;
case ERESTART_RESTARTBLOCK:
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
break;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
restore_saved_sigmask();
}
/*
* notification of userspace execution resumption
* - triggered by the TIF_WORK_MASK flags
*/
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
local_irq_enable();
user_exit();
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
user_enter();
}
#ifdef CONFIG_SMP
#ifndef CONFIG_EVA
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _save_fp_context(sc)
: copy_fp_to_sigcontext(sc);
}
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _restore_fp_context(sc)
: copy_fp_from_sigcontext(sc);
}
#endif /* CONFIG_EVA */
#endif
static int signal_setup(void)
{
#ifndef CONFIG_EVA
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
save_fp_context = copy_fp_to_sigcontext;
restore_fp_context = copy_fp_from_sigcontext;
}
#endif /* CONFIG_SMP */
#else
save_fp_context = copy_fp_to_sigcontext;
restore_fp_context = copy_fp_from_sigcontext;
#endif
return 0;
}
arch_initcall(signal_setup);

606
arch/mips/kernel/signal32.c Normal file
View file

@ -0,0 +1,606 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/compat-signal.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
#include "signal-common.h"
static int (*save_fp_context32)(struct sigcontext32 __user *sc);
static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
struct ucontext32 {
u32 uc_flags;
s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe32 {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext32 sf_sc;
compat_sigset_t sf_mask;
};
struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
compat_siginfo_t rs_info;
struct ucontext32 rs_uc;
};
/*
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
{
int i;
int err = 0;
int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |=
__put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
&sc->sc_fpregs[i]);
}
err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
return err;
}
static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
{
int i;
int err = 0;
int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
u64 fpr_val;
for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
}
err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
return err;
}
/*
* sigcontext handlers
*/
static int protected_save_fp_context32(struct sigcontext32 __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = save_fp_context32(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_to_sigcontext32(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = restore_fp_context32(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_from_sigcontext32(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
int err = 0;
int i;
u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
}
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context32(sc);
}
return err;
}
static int
check_and_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= protected_restore_fp_context32(sc);
return err ?: sig;
}
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
u32 used_math;
int err = 0;
s32 treg;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
err = check_and_restore_fp_context32(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
}
return err;
}
/*
*
*/
extern void __put_sigset_unknown_nsig(void);
extern void __get_sigset_unknown_nsig(void);
static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
{
int err = 0;
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
return -EFAULT;
switch (_NSIG_WORDS) {
default:
__put_sigset_unknown_nsig();
case 2:
err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
case 1:
err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
}
return err;
}
static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
{
int err = 0;
unsigned long sig[4];
if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
return -EFAULT;
switch (_NSIG_WORDS) {
default:
__get_sigset_unknown_nsig();
case 2:
err |= __get_user(sig[3], &ubuf->sig[3]);
err |= __get_user(sig[2], &ubuf->sig[2]);
kbuf->sig[1] = sig[2] | (sig[3] << 32);
case 1:
err |= __get_user(sig[1], &ubuf->sig[1]);
err |= __get_user(sig[0], &ubuf->sig[0]);
kbuf->sig[0] = sig[0] | (sig[1] << 32);
}
return err;
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset)
{
return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t));
}
SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act,
struct compat_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
s32 handler;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(handler, &act->sa_handler);
new_ka.sa.sa_handler = (void __user *)(s64)handler;
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
&oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
}
}
return err;
}
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
return -EFAULT;
return 0;
}
asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe32 __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
goto badframe;
set_current_blocked(&blocked);
sig = restore_sigcontext32(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (&regs));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe32 __user *frame;
sigset_t set;
int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (&regs));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
static int setup_frame_32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return -EFAULT;
err |= setup_sigcontext32(regs, &frame->sf_sc);
err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return -EFAULT;
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe32.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
/*
* o32 compatibility on 64-bit kernels, without DSP ASE
*/
struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
.signal_return_offset =
offsetof(struct mips_vdso, o32_signal_trampoline),
.setup_rt_frame = setup_rt_frame_32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, o32_rt_signal_trampoline),
.restart = __NR_O32_restart_syscall
};
static int signal32_init(void)
{
if (cpu_has_fpu) {
save_fp_context32 = _save_fp_context32;
restore_fp_context32 = _restore_fp_context32;
} else {
save_fp_context32 = copy_fp_to_sigcontext32;
restore_fp_context32 = copy_fp_from_sigcontext32;
}
return 0;
}
arch_initcall(signal32_init);

View file

@ -0,0 +1,157 @@
/*
* Copyright (C) 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compat.h>
#include <linux/bitops.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/cacheflush.h>
#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/fpu.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include "signal-common.h"
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
#define __NR_N32_restart_syscall 6214
extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
struct ucontextn32 {
u32 uc_flags;
s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct compat_siginfo rs_info;
struct ucontextn32 rs_uc;
};
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
sigset_t set;
int sig;
frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (&regs));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe_n32 __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return -EFAULT;
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
struct mips_abi mips_abi_n32 = {
.setup_rt_frame = setup_rt_frame_n32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, n32_rt_signal_trampoline),
.restart = __NR_N32_restart_syscall
};

View file

@ -0,0 +1,535 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
*
* SMP support for BMIPS
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/time.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
#include <asm/pmon.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/mipsregs.h>
#include <asm/bmips.h>
#include <asm/traps.h>
#include <asm/barrier.h>
static int __maybe_unused max_cpus = 1;
/* these may be configured by the platform code */
int bmips_smp_enabled = 1;
int bmips_cpu_offset;
cpumask_t bmips_booted_mask;
#ifdef CONFIG_SMP
/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
unsigned long bmips_smp_boot_sp;
unsigned long bmips_smp_boot_gp;
static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
static void bmips5000_send_ipi_single(int cpu, unsigned int action);
static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
/* SW interrupts 0,1 are used for interprocessor signaling */
#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
static void __init bmips_smp_setup(void)
{
int i, cpu = 1, boot_cpu = 0;
int cpu_hw_intr;
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
/* NBK and weak order flags */
set_c0_brcm_config_0(0x30000);
/* Find out if we are running on TP0 or TP1 */
boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
/*
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
* thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
*/
if (boot_cpu == 0)
cpu_hw_intr = 0x02;
else
cpu_hw_intr = 0x1d;
change_c0_brcm_cmt_intr(0xf8018000,
(cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
break;
case CPU_BMIPS5000:
/* enable raceless SW interrupts */
set_c0_brcm_config(0x03 << 22);
/* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
/* N cores, 2 threads per core */
max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
/* clear any pending SW interrupts */
for (i = 0; i < max_cpus; i++) {
write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
}
break;
default:
max_cpus = 1;
}
if (!bmips_smp_enabled)
max_cpus = 1;
/* this can be overridden by the BSP */
if (!board_ebase_setup)
board_ebase_setup = &bmips_ebase_setup;
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
for (i = 0; i < max_cpus; i++) {
if (i != boot_cpu) {
__cpu_number_map[i] = cpu;
__cpu_logical_map[cpu] = i;
cpu++;
}
set_cpu_possible(i, 1);
set_cpu_present(i, 1);
}
}
/*
* IPI IRQ setup - runs on CPU0
*/
static void bmips_prepare_cpus(unsigned int max_cpus)
{
irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
break;
case CPU_BMIPS5000:
bmips_ipi_interrupt = bmips5000_ipi_interrupt;
break;
default:
return;
}
if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi0", NULL))
panic("Can't request IPI0 interrupt");
if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
"smp_ipi1", NULL))
panic("Can't request IPI1 interrupt");
}
/*
* Tell the hardware to boot CPUx - runs on CPU0
*/
static void bmips_boot_secondary(int cpu, struct task_struct *idle)
{
bmips_smp_boot_sp = __KSTK_TOS(idle);
bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
mb();
/*
* Initial boot sequence for secondary CPU:
* bmips_reset_nmi_vec @ a000_0000 ->
* bmips_smp_entry ->
* plat_wired_tlb_setup (cached function call; optional) ->
* start_secondary (cached jump)
*
* Warm restart sequence:
* play_dead WAIT loop ->
* bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
* eret to play_dead ->
* bmips_secondary_reentry ->
* start_secondary
*/
pr_info("SMP: Booting CPU%d...\n", cpu);
if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
bmips43xx_send_ipi_single(cpu, 0);
break;
case CPU_BMIPS5000:
bmips5000_send_ipi_single(cpu, 0);
break;
}
}
else {
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
/* Reset slave TP1 if booting from TP0 */
if (cpu_logical_map(cpu) == 1)
set_c0_brcm_cmt_ctrl(0x01);
break;
case CPU_BMIPS5000:
if (cpu & 0x01)
write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
else {
/*
* core N thread 0 was already booted; just
* pulse the NMI line
*/
bmips_write_zscm_reg(0x210, 0xc0000000);
udelay(10);
bmips_write_zscm_reg(0x210, 0x00);
}
break;
}
cpumask_set_cpu(cpu, &bmips_booted_mask);
}
}
/*
* Early setup - runs on secondary CPU after cache probe
*/
static void bmips_init_secondary(void)
{
/* move NMI vector to kseg0, in case XKS01 is enabled */
void __iomem *cbr;
unsigned long old_vec;
unsigned long relo_vector;
int boot_cpu;
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
cbr = BMIPS_GET_CBR();
boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
BMIPS_RELO_VECTOR_CONTROL_1;
old_vec = __raw_readl(cbr + relo_vector);
__raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
break;
case CPU_BMIPS5000:
write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
(smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
break;
}
}
/*
* Late setup - runs on secondary CPU before entering the idle loop
*/
static void bmips_smp_finish(void)
{
pr_info("SMP: CPU%d is running\n", smp_processor_id());
/* make sure there won't be a timer interrupt for a little while */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
irq_enable_hazard();
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
irq_enable_hazard();
}
/*
* BMIPS5000 raceless IPIs
*
* Each CPU has two inbound SW IRQs which are independent of all other CPUs.
* IPI0 is used for SMP_RESCHEDULE_YOURSELF
* IPI1 is used for SMP_CALL_FUNCTION
*/
static void bmips5000_send_ipi_single(int cpu, unsigned int action)
{
write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
}
static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
{
int action = irq - IPI0_IRQ;
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
if (action == 0)
scheduler_ipi();
else
smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void bmips5000_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bmips5000_send_ipi_single(i, action);
}
/*
* BMIPS43xx racey IPIs
*
* We use one inbound SW IRQ for each CPU.
*
* A spinlock must be held in order to keep CPUx from accidentally clearing
* an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
* same spinlock is used to protect the action masks.
*/
static DEFINE_SPINLOCK(ipi_lock);
static DEFINE_PER_CPU(int, ipi_action_mask);
static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
spin_lock_irqsave(&ipi_lock, flags);
set_c0_cause(cpu ? C_SW1 : C_SW0);
per_cpu(ipi_action_mask, cpu) |= action;
irq_enable_hazard();
spin_unlock_irqrestore(&ipi_lock, flags);
}
static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
{
unsigned long flags;
int action, cpu = irq - IPI0_IRQ;
spin_lock_irqsave(&ipi_lock, flags);
action = __this_cpu_read(ipi_action_mask);
per_cpu(ipi_action_mask, cpu) = 0;
clear_c0_cause(cpu ? C_SW1 : C_SW0);
spin_unlock_irqrestore(&ipi_lock, flags);
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
bmips43xx_send_ipi_single(i, action);
}
#ifdef CONFIG_HOTPLUG_CPU
static int bmips_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_flush_tlb_all();
local_flush_icache_range(0, ~0);
return 0;
}
static void bmips_cpu_die(unsigned int cpu)
{
}
void __ref play_dead(void)
{
idle_task_exit();
/* flush data cache */
_dma_cache_wback_inv(0, ~0);
/*
* Wakeup is on SW0 or SW1; disable everything else
* Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
* IRQ handlers; this clears ST0_IE and returns immediately.
*/
clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
irq_disable_hazard();
/*
* wait for SW interrupt from bmips_boot_secondary(), then jump
* back to start_secondary()
*/
__asm__ __volatile__(
" wait\n"
" j bmips_secondary_reentry\n"
: : : "memory");
}
#endif /* CONFIG_HOTPLUG_CPU */
struct plat_smp_ops bmips43xx_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.send_ipi_single = bmips43xx_send_ipi_single,
.send_ipi_mask = bmips43xx_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
};
struct plat_smp_ops bmips5000_smp_ops = {
.smp_setup = bmips_smp_setup,
.prepare_cpus = bmips_prepare_cpus,
.boot_secondary = bmips_boot_secondary,
.smp_finish = bmips_smp_finish,
.init_secondary = bmips_init_secondary,
.send_ipi_single = bmips5000_send_ipi_single,
.send_ipi_mask = bmips5000_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
};
#endif /* CONFIG_SMP */
/***********************************************************************
* BMIPS vector relocation
* This is primarily used for SMP boot, but it is applicable to some
* UP BMIPS systems as well.
***********************************************************************/
static void bmips_wr_vec(unsigned long dst, char *start, char *end)
{
memcpy((void *)dst, start, end - start);
dma_cache_wback((unsigned long)start, end - start);
local_flush_icache_range(dst, dst + (end - start));
instruction_hazard();
}
static inline void bmips_nmi_handler_setup(void)
{
bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
&bmips_reset_nmi_vec_end);
bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
&bmips_smp_int_vec_end);
}
void bmips_ebase_setup(void)
{
unsigned long new_ebase = ebase;
void __iomem __maybe_unused *cbr;
BUG_ON(ebase != CKSEG0);
switch (current_cpu_type()) {
case CPU_BMIPS4350:
/*
* BMIPS4350 cannot relocate the normal vectors, but it
* can relocate the BEV=1 vectors. So CPU1 starts up at
* the relocated BEV=1, IV=0 general exception vector @
* 0xa000_0380.
*
* set_uncached_handler() is used here because:
* - CPU1 will run this from uncached space
* - None of the cacheflush functions are set up yet
*/
set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
&bmips_smp_int_vec, 0x80);
__sync();
return;
case CPU_BMIPS4380:
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_0400: normal vectors
*/
new_ebase = 0x80000400;
cbr = BMIPS_GET_CBR();
__raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
__raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
break;
case CPU_BMIPS5000:
/*
* 0x8000_0000: reset/NMI (initially in kseg1)
* 0x8000_1000: normal vectors
*/
new_ebase = 0x80001000;
write_c0_brcm_bootvec(0xa0088008);
write_c0_ebase(new_ebase);
if (max_cpus > 2)
bmips_write_zscm_reg(0xa0, 0xa008a008);
break;
default:
return;
}
board_nmi_handler_setup = &bmips_nmi_handler_setup;
ebase = new_ebase;
}
asmlinkage void __weak plat_wired_tlb_setup(void)
{
/*
* Called when starting/restarting a secondary CPU.
* Kernel stacks and other important data might only be accessible
* once the wired entries are present.
*/
}

159
arch/mips/kernel/smp-cmp.c Normal file
View file

@ -0,0 +1,159 @@
/*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Chris Dearman (chris@mips.com)
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/amon.h>
#include <asm/gic.h>
static void cmp_init_secondary(void)
{
struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
/* Assume GIC is present */
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
STATUSF_IP7);
/* Enable per-cpu interrupts: platform specific */
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
TCBIND_CURVPE;
#endif
}
static void cmp_smp_finish(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it running
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*/
static void cmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
unsigned long pc = (unsigned long)&smp_bootstrap;
unsigned long a0 = 0;
pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
__func__, cpu);
#if 0
/* Needed? */
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
}
/*
* Common setup before any secondaries are started
*/
void __init cmp_smp_setup(void)
{
int i;
int ncpu = 0;
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
}
if (cpu_has_mipsmt) {
unsigned int nvpe = 1;
#ifdef CONFIG_MIPS_MT_SMP
unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
#endif
smp_num_siblings = nvpe;
}
pr_info("Detected %i available secondary CPU(s)\n", ncpu);
}
void __init cmp_prepare_cpus(unsigned int max_cpus)
{
pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
smp_processor_id(), __func__, max_cpus);
#ifdef CONFIG_MIPS_MT
/*
* FIXME: some of these options are per-system, some per-core and
* some per-cpu
*/
mips_mt_set_cpuoptions();
#endif
}
struct plat_smp_ops cmp_smp_ops = {
.send_ipi_single = gic_send_ipi_single,
.send_ipi_mask = gic_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.boot_secondary = cmp_boot_secondary,
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,
};

476
arch/mips/kernel/smp-cps.c Normal file
View file

@ -0,0 +1,476 @@
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/bcache.h>
#include <asm/gic.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
#include <asm/r4kcache.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
static DECLARE_BITMAP(core_power, NR_CPUS);
struct core_boot_config *mips_cps_core_bootcfg;
static unsigned core_vpe_count(unsigned core)
{
unsigned cfg;
if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
return 1;
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
static void __init cps_smp_setup(void)
{
unsigned int ncores, nvpes, core_vpes;
int c, v;
/* Detect & record VPE topology */
ncores = mips_cm_numcores();
pr_info("VPE topology ");
for (c = nvpes = 0; c < ncores; c++) {
core_vpes = core_vpe_count(c);
pr_cont("%c%u", c ? ',' : '{', core_vpes);
/* Use the number of VPEs in core 0 for smp_num_siblings */
if (!c)
smp_num_siblings = core_vpes;
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_data[nvpes + v].core = c;
#ifdef CONFIG_MIPS_MT_SMP
cpu_data[nvpes + v].vpe_id = v;
#endif
}
nvpes += core_vpes;
}
pr_cont("} total %u\n", nvpes);
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
set_cpu_possible(v, true);
set_cpu_present(v, true);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
/* Core 0 is powered up (we're running on it) */
bitmap_set(core_power, 0, 1);
/* Initialise core 0 */
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
unsigned ncores, core_vpes, c, cca;
bool cca_unsuitable;
u32 *entry_code;
mips_mt_set_cpuoptions();
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) {
case 0x4: /* CWBE */
case 0x5: /* CWB */
/* The CCA is coherent, multi-core is fine */
cca_unsuitable = false;
break;
default:
/* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true;
}
/* Warn the user if the CCA prevents multi-core */
ncores = mips_cm_numcores();
if (cca_unsuitable && ncores > 1) {
pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
cca);
for_each_present_cpu(c) {
if (cpu_data[c].core)
set_cpu_present(c, false);
}
}
/*
* Patch the start of mips_cps_core_entry to provide:
*
* v0 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
uasm_i_addiu(&entry_code, 16, 0, cca);
blast_dcache_range((unsigned long)&mips_cps_core_entry,
(unsigned long)entry_code);
bc_wback_inv((unsigned long)&mips_cps_core_entry,
(void *)entry_code - (void *)&mips_cps_core_entry);
__sync();
/* Allocate core boot configuration structs */
mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
GFP_KERNEL);
if (!mips_cps_core_bootcfg) {
pr_err("Failed to allocate boot config for %u cores\n", ncores);
goto err_out;
}
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(c);
mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*mips_cps_core_bootcfg[c].vpe_config),
GFP_KERNEL);
if (!mips_cps_core_bootcfg[c].vpe_config) {
pr_err("Failed to allocate %u VPE boot configs\n",
core_vpes);
goto err_out;
}
}
/* Mark this CPU as booted */
atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
1 << cpu_vpe_id(&current_cpu_data));
return;
err_out:
/* Clean up allocations */
if (mips_cps_core_bootcfg) {
for (c = 0; c < ncores; c++)
kfree(mips_cps_core_bootcfg[c].vpe_config);
kfree(mips_cps_core_bootcfg);
mips_cps_core_bootcfg = NULL;
}
/* Effectively disable SMP by declaring CPUs not present */
for_each_possible_cpu(c) {
if (c == 0)
continue;
set_cpu_present(c, false);
}
}
static void boot_core(unsigned core)
{
u32 access;
/* Select the appropriate core */
write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
/* Ensure the core can access the GCRs */
access = read_gcr_access();
access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
write_gcr_access(access);
if (mips_cpc_present()) {
/* Reset the core */
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
mips_cpc_unlock_other();
} else {
/* Take the core out of reset */
write_gcr_co_reset_release(0);
}
/* The core is now powered up */
bitmap_set(core_power, core, 1);
}
static void remote_vpe_boot(void *dummy)
{
mips_cps_boot_vpes();
}
static void cps_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned core = cpu_data[cpu].core;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned int remote;
int err;
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
preempt_disable();
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
boot_core(core);
goto out;
}
if (core != current_cpu_data.core) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
if (cpu_data[remote].core != core)
continue;
if (cpu_online(remote))
break;
}
BUG_ON(remote >= NR_CPUS);
err = smp_call_function_single(remote, remote_vpe_boot,
NULL, 1);
if (err)
panic("Failed to call remote CPU\n");
goto out;
}
BUG_ON(!cpu_has_mipsmt);
/* Boot a VPE on this core */
mips_cps_boot_vpes();
out:
preempt_enable();
}
static void cps_init_secondary(void)
{
/* Disable MT - we only want to run 1 TC per VPE */
if (cpu_has_mipsmt)
dmt();
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
}
static void cps_smp_finish(void)
{
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
{
unsigned cpu = smp_processor_id();
struct core_boot_config *core_cfg;
if (!cpu)
return -EBUSY;
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
return 0;
}
static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
CPU_DEATH_POWER,
} cpu_death;
void play_dead(void)
{
unsigned cpu, core;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
if (cpu_has_mipsmt) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
if (cpu_data[cpu_death_sibling].core != core)
continue;
/*
* There is an online VPE within the core. Just halt
* this TC and leave the core alone.
*/
cpu_death = CPU_DEATH_HALT;
break;
}
}
/* This CPU has chosen its way out */
complete(&cpu_death_chosen);
if (cpu_death == CPU_DEATH_HALT) {
/* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} else {
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
}
static void wait_for_sibling_halt(void *ptr_cpu)
{
unsigned cpu = (unsigned)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
do {
local_irq_save(flags);
settc(vpe_id);
halted = read_tc_c0_tchalt();
local_irq_restore(flags);
} while (!(halted & TCHALT_H));
}
static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
unsigned stat;
int err;
/* Wait for the cpu to choose its way out */
if (!wait_for_completion_timeout(&cpu_death_chosen,
msecs_to_jiffies(5000))) {
pr_err("CPU%u: didn't offline\n", cpu);
return;
}
/*
* Now wait for the CPU to actually offline. Without doing this that
* offlining may race with one or more of:
*
* - Onlining the CPU again.
* - Powering down the core if another VPE within it is offlined.
* - A sibling VPE entering a non-coherent state.
*
* In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
* with which we could race, so do nothing.
*/
if (cpu_death == CPU_DEATH_POWER) {
/*
* Wait for the core to enter a powered down or clock gated
* state, the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core.
*/
do {
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
/* Indicate the core is powered off */
bitmap_clear(core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait
* for its TC to halt.
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
(void *)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
}
#endif /* CONFIG_HOTPLUG_CPU */
static struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
.boot_secondary = cps_boot_secondary,
.init_secondary = cps_init_secondary,
.smp_finish = cps_smp_finish,
.send_ipi_single = gic_send_ipi_single,
.send_ipi_mask = gic_send_ipi_mask,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
#endif
};
bool mips_cps_smp_in_use(void)
{
extern struct plat_smp_ops *mp_ops;
return mp_ops == &cps_smp_ops;
}
int register_cps_smp_ops(void)
{
if (!mips_cm_present()) {
pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
return -ENODEV;
}
/* check we have a GIC - we need one for IPIs */
if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
return -ENODEV;
}
register_smp_ops(&cps_smp_ops);
return 0;
}

View file

@ -0,0 +1,64 @@
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* Based on smp-cmp.c:
* Copyright (C) 2007 MIPS Technologies, Inc.
* Author: Chris Dearman (chris@mips.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/printk.h>
#include <asm/gic.h>
#include <asm/mips-cpc.h>
#include <asm/smp-ops.h>
void gic_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
unsigned int intr;
unsigned int core = cpu_data[cpu].core;
pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
smp_processor_id(), __func__, cpu, action, read_c0_status());
local_irq_save(flags);
switch (action) {
case SMP_CALL_FUNCTION:
intr = plat_ipi_call_int_xlate(cpu);
break;
case SMP_RESCHEDULE_YOURSELF:
intr = plat_ipi_resched_int_xlate(cpu);
break;
default:
BUG();
}
gic_send_ipi(intr);
if (mips_cpc_present() && (core != current_cpu_data.core)) {
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
}
}
local_irq_restore(flags);
}
void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
gic_send_ipi_single(i, action);
}

313
arch/mips/kernel/smp-mt.c Normal file
View file

@ -0,0 +1,313 @@
/*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
* Elizabeth Clarke (beth@mips.com)
* Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/gic.h>
static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
write_vpe_c0_cause(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
write_vpe_c0_count(read_c0_count());
}
static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
return ncpu;
/* Deactivate all but VPE 0 */
if (tc != 0) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
/* master VPE */
tmp |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
set_cpu_possible(tc, true);
set_cpu_present(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
smvp_copy_vpe_config();
return ncpu;
}
static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
if (!tc)
return;
/* bind a TC to each VPE, May as well put all excess TC's
on the last VPE */
if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
else {
write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
/* and set XTC */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
}
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
}
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
#ifdef CONFIG_IRQ_GIC
if (gic_present) {
gic_send_ipi_single(cpu, action);
return;
}
#endif
local_irq_save(flags);
vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}
static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
static void vsmp_init_secondary(void)
{
#ifdef CONFIG_IRQ_GIC
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
else
#endif
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
}
static void vsmp_smp_finish(void)
{
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
}
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondaries
*/
static void __init vsmp_smp_setup(void)
{
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
/* disable MT so we can configure */
dvpe();
dmt();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
smvp_tc_init(tc, mvpconf0);
ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
}
struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = vsmp_send_ipi_single,
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
#ifdef CONFIG_PROC_FS
static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
unsigned long action_unused, void *data)
{
struct proc_cpuinfo_notifier_args *pcn = data;
struct seq_file *m = pcn->m;
unsigned long n = pcn->n;
if (!cpu_has_mipsmt)
return NOTIFY_OK;
seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
return NOTIFY_OK;
}
static int __init proc_cpuinfo_notifier_init(void)
{
return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
}
subsys_initcall(proc_cpuinfo_notifier_init);
#endif

78
arch/mips/kernel/smp-up.c Normal file
View file

@ -0,0 +1,78 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
*
* Symmetric Uniprocessor (TM) Support
*/
#include <linux/kernel.h>
#include <linux/sched.h>
/*
* Send inter-processor interrupt
*/
static void up_send_ipi_single(int cpu, unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
static inline void up_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
static void up_init_secondary(void)
{
}
static void up_smp_finish(void)
{
}
/*
* Firmware CPU startup hook
*/
static void up_boot_secondary(int cpu, struct task_struct *idle)
{
}
static void __init up_smp_setup(void)
{
}
static void __init up_prepare_cpus(unsigned int max_cpus)
{
}
#ifdef CONFIG_HOTPLUG_CPU
static int up_cpu_disable(void)
{
return -ENOSYS;
}
static void up_cpu_die(unsigned int cpu)
{
BUG();
}
#endif
struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = up_cpu_disable,
.cpu_die = up_cpu_die,
#endif
};

463
arch/mips/kernel/smp.c Normal file
View file

@ -0,0 +1,463 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2000, 2001 Kanoj Sarcar
* Copyright (C) 2000, 2001 Ralf Baechle
* Copyright (C) 2000, 2001 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2003 Broadcom Corporation
*/
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/ftrace.h>
#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
cpumask_t cpu_coherent_mask;
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package &&
cpu_data[cpu].core == cpu_data[i].core) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
}
}
} else
cpu_set(cpu, cpu_sibling_map[cpu]);
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpu_set(cpu, cpu_core_setup_map);
for_each_cpu_mask(i, cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
}
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
void register_smp_ops(struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
mp_ops = ops;
}
/*
* First C code run on the secondary CPUs after being started up by
* the master.
*/
asmlinkage void start_secondary(void)
{
unsigned int cpu;
cpu_probe();
per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
/*
* XXX parity protection should be folded in here when it's converted
* to an option instead of something based on .cputype
*/
calibrate_delay();
preempt_disable();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
cpu_set(cpu, cpu_coherent_mask);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
cpu_set(cpu, cpu_callin_map);
synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
*/
WARN_ON_ONCE(!irqs_disabled());
mp_ops->smp_finish();
cpu_startup_entry(CPUHP_ONLINE);
}
/*
* Call into both interrupt handlers, as we share the IPI for them
*/
void __irq_entry smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
static void stop_this_cpu(void *dummy)
{
/*
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
for (;;) {
if (cpu_wait)
(*cpu_wait)(); /* Wait if available. */
}
}
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
cpu_set(0, cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...
*/
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
synchronise_count_master(cpu);
return 0;
}
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
static void flush_tlb_all_ipi(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
local_flush_tlb_mm((struct mm_struct *)mm);
}
/*
* Special Variant of smp_call_function for use by TLB functions:
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
*/
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
smp_call_function(func, info, 1);
}
static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
{
preempt_disable();
smp_on_other_tlbs(func, info);
func(info);
preempt_enable();
}
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, intercpu interrupts have to be sent.
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
}
}
local_flush_tlb_mm(mm);
preempt_enable();
}
struct flush_tlb_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void flush_tlb_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
.addr2 = end,
};
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
}
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
}
static void flush_tlb_kernel_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_tlb_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0;
}
}
local_flush_tlb_page(vma, page);
preempt_enable();
}
static void flush_tlb_one_ipi(void *info)
{
unsigned long vaddr = (unsigned long) info;
local_flush_tlb_one(vaddr);
}
void flush_tlb_one(unsigned long vaddr)
{
smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
}
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
#if defined(CONFIG_KEXEC)
void (*dump_ipi_function_ptr)(void *) = NULL;
void dump_send_ipi(void (*dump_ipi_callback)(void *))
{
int i;
int cpu = smp_processor_id();
dump_ipi_function_ptr = dump_ipi_callback;
smp_mb();
for_each_online_cpu(i)
if (i != cpu)
mp_ops->send_ipi_single(i, SMP_DUMP);
}
EXPORT_SYMBOL(dump_send_ipi);
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
atomic_t *count;
struct call_single_data *csd;
int cpu;
for_each_cpu(cpu, mask) {
count = &per_cpu(tick_broadcast_count, cpu);
csd = &per_cpu(tick_broadcast_csd, cpu);
if (atomic_inc_return(count) == 1)
smp_call_function_single_async(cpu, csd);
}
}
static void tick_broadcast_callee(void *info)
{
int cpu = smp_processor_id();
tick_receive_broadcast();
atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
}
static int __init tick_broadcast_init(void)
{
struct call_single_data *csd;
int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
csd = &per_cpu(tick_broadcast_csd, cpu);
csd->func = tick_broadcast_callee;
}
return 0;
}
early_initcall(tick_broadcast_init);
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */

View file

@ -0,0 +1,141 @@
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/spinlock.h>
static int ss_get(void *data, u64 *val)
{
ktime_t start, finish;
int loops;
int cont;
DEFINE_RAW_SPINLOCK(ss_spin);
loops = 1000000;
cont = 1;
start = ktime_get();
while (cont) {
raw_spin_lock(&ss_spin);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&ss_spin);
}
finish = ktime_get();
*val = ktime_us_delta(finish, start);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
struct spin_multi_state {
raw_spinlock_t lock;
atomic_t start_wait;
atomic_t enter_wait;
atomic_t exit_wait;
int loops;
};
struct spin_multi_per_thread {
struct spin_multi_state *state;
ktime_t start;
};
static int multi_other(void *data)
{
int loops;
int cont;
struct spin_multi_per_thread *pt = data;
struct spin_multi_state *s = pt->state;
loops = s->loops;
cont = 1;
atomic_dec(&s->enter_wait);
while (atomic_read(&s->enter_wait))
; /* spin */
pt->start = ktime_get();
atomic_dec(&s->start_wait);
while (atomic_read(&s->start_wait))
; /* spin */
while (cont) {
raw_spin_lock(&s->lock);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&s->lock);
}
atomic_dec(&s->exit_wait);
while (atomic_read(&s->exit_wait))
; /* spin */
return 0;
}
static int multi_get(void *data, u64 *val)
{
ktime_t finish;
struct spin_multi_state ms;
struct spin_multi_per_thread t1, t2;
ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
ms.loops = 1000000;
atomic_set(&ms.start_wait, 2);
atomic_set(&ms.enter_wait, 2);
atomic_set(&ms.exit_wait, 2);
t1.state = &ms;
t2.state = &ms;
kthread_run(multi_other, &t2, "multi_get");
multi_other(&t1);
finish = ktime_get();
*val = ktime_us_delta(finish, t1.start);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
extern struct dentry *mips_debugfs_dir;
static int __init spinlock_test(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_file("spin_single", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_ss);
if (!d)
return -ENOMEM;
d = debugfs_create_file("spin_multi", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_multi);
if (!d)
return -ENOMEM;
return 0;
}
device_initcall(spinlock_test);

221
arch/mips/kernel/spram.c Normal file
View file

@ -0,0 +1,221 @@
/*
* MIPS SPRAM support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (C) 2007, 2008 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/stddef.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/r4kcache.h>
#include <asm/hazards.h>
/*
* These definitions are correct for the 24K/34K/74K SPRAM sample
* implementation. The 4KS interpreted the tags differently...
*/
#define SPRAM_TAG0_ENABLE 0x00000080
#define SPRAM_TAG0_PA_MASK 0xfffff000
#define SPRAM_TAG1_SIZE_MASK 0xfffff000
#define SPRAM_TAG_STRIDE 8
#define ERRCTL_SPRAM (1 << 28)
/* errctl access */
#define read_c0_errctl(x) read_c0_ecc(x)
#define write_c0_errctl(x) write_c0_ecc(x)
/*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/
static unsigned int bis_c0_errctl(unsigned int set)
{
unsigned int res;
res = read_c0_errctl();
write_c0_errctl(res | set);
return res;
}
static void ispram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
write_c0_taglo(data);
ehb();
cache_op(Index_Store_Tag_I, CKSEG0|offset);
ehb();
write_c0_errctl(errctl);
ehb();
}
static unsigned int ispram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
cache_op(Index_Load_Tag_I, CKSEG0 | offset);
ehb();
data = read_c0_taglo();
ehb();
write_c0_errctl(errctl);
ehb();
return data;
}
static void dspram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
write_c0_dtaglo(data);
ehb();
cache_op(Index_Store_Tag_D, CKSEG0 | offset);
ehb();
write_c0_errctl(errctl);
ehb();
}
static unsigned int dspram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
cache_op(Index_Load_Tag_D, CKSEG0 | offset);
ehb();
data = read_c0_dtaglo();
ehb();
write_c0_errctl(errctl);
ehb();
return data;
}
static void probe_spram(char *type,
unsigned int base,
unsigned int (*read)(unsigned int),
void (*write)(unsigned int, unsigned int))
{
unsigned int firstsize = 0, lastsize = 0;
unsigned int firstpa = 0, lastpa = 0, pa = 0;
unsigned int offset = 0;
unsigned int size, tag0, tag1;
unsigned int enabled;
int i;
/*
* The limit is arbitrary but avoids the loop running away if
* the SPRAM tags are implemented differently
*/
for (i = 0; i < 8; i++) {
tag0 = read(offset);
tag1 = read(offset+SPRAM_TAG_STRIDE);
pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
type, i, tag0, tag1);
size = tag1 & SPRAM_TAG1_SIZE_MASK;
if (size == 0)
break;
if (i != 0) {
/* tags may repeat... */
if ((pa == firstpa && size == firstsize) ||
(pa == lastpa && size == lastsize))
break;
}
/* Align base with size */
base = (base + size - 1) & ~(size-1);
/* reprogram the base address base address and enable */
tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
write(offset, tag0);
base += size;
/* reread the tag */
tag0 = read(offset);
pa = tag0 & SPRAM_TAG0_PA_MASK;
enabled = tag0 & SPRAM_TAG0_ENABLE;
if (i == 0) {
firstpa = pa;
firstsize = size;
}
lastpa = pa;
lastsize = size;
if (strcmp(type, "DSPRAM") == 0) {
unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
unsigned int v;
#define TDAT 0x5a5aa5a5
vp[0] = TDAT;
vp[1] = ~TDAT;
mb();
v = vp[0];
if (v != TDAT)
printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
vp, TDAT, v);
v = vp[1];
if (v != ~TDAT)
printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
vp+1, ~TDAT, v);
}
pr_info("%s%d: PA=%08x,Size=%08x%s\n",
type, i, pa, size, enabled ? ",enabled" : "");
offset += 2 * SPRAM_TAG_STRIDE;
}
}
void spram_config(void)
{
unsigned int config0;
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
probe_spram("ISPRAM", 0x1c000000,
&ispram_load_tag, &ispram_store_tag);
}
if (config0 & (1<<23))
probe_spram("DSPRAM", 0x1c100000,
&dspram_load_tag, &dspram_store_tag);
}
}

View file

@ -0,0 +1,87 @@
/*
* Stack trace management functions
*
* Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/export.h>
#include <asm/stacktrace.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer:
*/
static void save_raw_context_stack(struct stack_trace *trace,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
}
static void save_context_stack(struct stack_trace *trace,
struct task_struct *tsk, struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(tsk);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_context_stack(trace, sp);
return;
}
do {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
pc = unwind_stack(tsk, &sp, pc, &ra);
} while (pc);
#else
save_raw_context_stack(trace, sp);
#endif
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
WARN_ON(trace->nr_entries || !trace->max_entries);
if (tsk != current) {
regs->regs[29] = tsk->thread.reg29;
regs->regs[31] = 0;
regs->cp0_epc = tsk->thread.reg31;
} else
prepare_frametrace(regs);
save_context_stack(trace, tsk, regs);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

132
arch/mips/kernel/sync-r4k.c Normal file
View file

@ -0,0 +1,132 @@
/*
* Count register synchronisation.
*
* All CPUs will have their count registers synchronised to the CPU0 next time
* value. This can cause a small timewarp for CPU0. All other CPU's should
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
*/
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/cpumask.h>
#include <asm/r4k-timer.h>
#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/mipsregs.h>
static atomic_t count_start_flag = ATOMIC_INIT(0);
static atomic_t count_count_start = ATOMIC_INIT(0);
static atomic_t count_count_stop = ATOMIC_INIT(0);
static atomic_t count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
void synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
unsigned int initcount;
printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
/*
* Notify the slaves that it's time to start
*/
atomic_set(&count_reference, read_c0_count());
atomic_set(&count_start_flag, cpu);
smp_wmb();
/* Count will be initialised to current timer for all CPU's */
initcount = read_c0_count();
/*
* We loop a few times to get a primed instruction cache,
* then the last pass is more or less synchronised and
* the master and slaves each set their cycle counters to a known
* value all at once. This reduces the chance of having random offsets
* between the processors, and guarantees that the maximum
* delay between the cycle counters is never bigger than
* the latency of information-passing (cachelines) between
* two CPUs.
*/
for (i = 0; i < NR_LOOPS; i++) {
/* slaves loop on '!= 2' */
while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
/* this lets the slaves write their count register */
atomic_inc(&count_count_start);
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
/*
* Wait for all slaves to leave the synchronization point:
*/
while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
atomic_inc(&count_count_stop);
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
atomic_set(&count_start_flag, 0);
local_irq_restore(flags);
/*
* i386 code reported the skew here, but the
* count registers were almost certainly out of sync
* so no point in alarming people
*/
printk("done.\n");
}
void synchronise_count_slave(int cpu)
{
int i;
unsigned int initcount;
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
*/
while (atomic_read(&count_start_flag) != cpu)
mb();
/* Count will be initialised to next expire for all CPU's */
initcount = atomic_read(&count_reference);
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
while (atomic_read(&count_count_start) != 2)
mb();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
}
#undef NR_LOOPS

244
arch/mips/kernel/syscall.c Normal file
View file

@ -0,0 +1,244 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/unistd.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/compiler.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#include <asm/signal.h>
#include <asm/sim.h>
#include <asm/shmparam.h>
#include <asm/sysmips.h>
#include <asm/uaccess.h>
#include <asm/switch_to.h>
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
* convention. It returns results in registers $v0 / $v1 which means there
* is no need for it to do verify the validity of a userspace pointer
* argument. Historically that used to be expensive in Linux. These days
* the performance advantage is negligible.
*/
asmlinkage int sysm_pipe(void)
{
int fd[2];
int error = do_pipe_flags(fd, 0);
if (error)
return error;
current_pt_regs()->regs[3] = fd[1];
return fd[0];
}
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
{
unsigned long result;
result = -EINVAL;
if (offset & ~PAGE_MASK)
goto out;
result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return result;
}
SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
if (pgoff & (~PAGE_MASK >> 12))
return -EINVAL;
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
}
save_static_function(sys_fork);
save_static_function(sys_clone);
SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
{
struct thread_info *ti = task_thread_info(current);
ti->tp_value = addr;
if (cpu_has_userlocal)
write_c0_userlocal(addr);
return 0;
}
static inline int mips_atomic_set(unsigned long addr, unsigned long new)
{
unsigned long old, tmp;
struct pt_regs *regs;
unsigned int err;
if (unlikely(addr & 3))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
return -EINVAL;
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
" beqzl %[tmp], 1b \n"
"3: \n"
" .section .fixup,\"ax\" \n"
"4: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n"
" .previous \n"
" .set mips0 \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
" .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
" bnez %[tmp], 4f \n"
"3: \n"
" .subsection 2 \n"
"4: b 1b \n"
" .previous \n"
" \n"
" .section .fixup,\"ax\" \n"
"5: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n"
" .previous \n"
" .set mips0 \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else {
do {
preempt_disable();
ll_bit = 1;
ll_task = current;
preempt_enable();
err = __get_user(old, (unsigned int *) addr);
err |= __put_user(new, (unsigned int *) addr);
if (err)
break;
rmb();
} while (!ll_bit);
}
if (unlikely(err))
return err;
regs = current_pt_regs();
regs->regs[2] = old;
regs->regs[7] = 0; /* No error */
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
" move $29, %0 \n"
" j syscall_exit \n"
: /* no outputs */
: "r" (regs));
/* unreached. Honestly. */
unreachable();
}
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
case MIPS_ATOMIC_SET:
return mips_atomic_set(arg1, arg2);
case MIPS_FIXADE:
if (arg1 & ~3)
return -EINVAL;
if (arg1 & 1)
set_thread_flag(TIF_FIXADE);
else
clear_thread_flag(TIF_FIXADE);
if (arg1 & 2)
set_thread_flag(TIF_LOGADE);
else
clear_thread_flag(TIF_LOGADE);
return 0;
case FLUSH_CACHE:
__flush_cache_all();
return 0;
}
return -EINVAL;
}
/*
* No implemented yet ...
*/
SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
{
return -ENOSYS;
}
/*
* If we ever come here the user sp is bad. Zap the process right away.
* Due to the bad stack signaling wouldn't work.
*/
asmlinkage void bad_stack(void)
{
do_exit(SIGSEGV);
}

134
arch/mips/kernel/time.c Normal file
View file

@ -0,0 +1,134 @@
/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
* Copyright (c) 2003, 2004 Maciej W. Rozycki
*
* Common time service routines for MIPS machines.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/bug.h>
#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/param.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/div64.h>
#include <asm/time.h>
/*
* forward reference
*/
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
int __weak rtc_mips_set_time(unsigned long sec)
{
return 0;
}
int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
return rtc_mips_set_time(nowtime);
}
int update_persistent_clock(struct timespec now)
{
return rtc_mips_set_mmss(now.tv_sec);
}
static int null_perf_irq(void)
{
return 0;
}
int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
/*
* time_init() - it does the following things.
*
* 1) plat_time_init() -
* a) (optional) set up RTC routines,
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
*/
unsigned int mips_hpt_frequency;
/*
* This function exists in order to cause an error due to a duplicate
* definition if platform code should have its own implementation. The hook
* to use instead is plat_time_init. plat_time_init does not receive the
* irqaction pointer argument anymore. This is because any function which
* initializes an interrupt timer now takes care of its own request_irq rsp.
* setup_irq calls and each clock_event_device should use its own
* struct irqrequest.
*/
void __init plat_timer_setup(void)
{
BUG();
}
static __init int cpu_has_mfc0_count_bug(void)
{
switch (current_cpu_type()) {
case CPU_R4000PC:
case CPU_R4000SC:
case CPU_R4000MC:
/*
* V3.0 is documented as suffering from the mfc0 from count bug.
* Afaik this is the last version of the R4000. Later versions
* were marketed as R4400.
*/
return 1;
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug.
*/
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
return 1;
/*
* we assume newer revisions are ok
*/
return 0;
}
return 0;
}
void __init time_init(void)
{
plat_time_init();
/*
* The use of the R4k timer as a clock event takes precedence;
* if reading the Count register might interfere with the timer
* interrupt, then we don't use the timer as a clock source.
* We may still use the timer as a clock source though if the
* timer interrupt isn't reliable; the interference doesn't
* matter then, because we don't use the interrupt.
*/
if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
init_mips_clocksource();
}

View file

@ -0,0 +1,32 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int i, ret;
#ifdef CONFIG_NUMA
for_each_online_node(i)
register_one_node(i);
#endif /* CONFIG_NUMA */
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = 1;
ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
}
return 0;
}
subsys_initcall(topology_init);

2217
arch/mips/kernel/traps.c Normal file

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more